diff --git a/.circle/tests.sh b/.circle/tests.sh deleted file mode 100644 index 202f9c5918..0000000000 --- a/.circle/tests.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -# -# Balance nipype testing workflows across CircleCI build nodes -# - -# Setting # $ help set -set -e # Exit immediately if a command exits with a non-zero status. -set -u # Treat unset variables as an error when substituting. -set -x # Print command traces before executing command. - -if [ "${CIRCLE_NODE_TOTAL:-}" != "4" ]; then - echo "These tests were designed to be run at 4x parallelism." - exit 1 -fi - -# These tests are manually balanced based on previous build timings. -# They may need to be rebalanced in the future. -case ${CIRCLE_NODE_INDEX} in - 0) - docker run --rm=false -it -e FSL_COURSE_DATA="/data/examples/nipype-fsl_course_data" -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_pytests.sh && \ - docker run --rm=false -it -e FSL_COURSE_DATA="/data/examples/nipype-fsl_course_data" -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py27 /usr/bin/run_pytests.sh && \ - docker run --rm=false -it -v $WORKDIR:/work -w /src/nipype/doc --entrypoint=/usr/bin/run_builddocs.sh nipype/nipype:py36 /usr/bin/run_builddocs.sh && \ - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow3d && \ - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow4d - exitcode=$? - ;; - 1) - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_spm_dartel Linear /data/examples/ level1 && \ - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_spm_dartel Linear /data/examples/ l2pipeline - exitcode=$? - ;; - 2) - docker run --rm=false -it -e NIPYPE_NUMBER_OF_CPUS=4 -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py27 /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 && \ - docker run --rm=false -it -e NIPYPE_NUMBER_OF_CPUS=4 -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline - exitcode=$? - ;; - 3) - docker run --rm=false -it -e NIPYPE_NUMBER_OF_CPUS=4 -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 && \ - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_fsl_feeds Linear /data/examples/ l1pipeline && \ - docker run --rm=false -it -v $HOME/examples:/data/examples:ro -v $WORKDIR:/work -w /work nipype/nipype:py36 /usr/bin/run_examples.sh fmri_fsl_reuse Linear /data/examples/ level1_workflow - exitcode=$? - ;; -esac - -cp ${WORKDIR}/tests/*.xml ${CIRCLE_TEST_REPORTS}/tests/ - -# Exit with error if any of the tests failed -if [ "$exitcode" != "0" ]; then exit 1; fi -codecov -f "coverage*.xml" -s "${WORKDIR}/tests/" -R "${HOME}/nipype/" -F unittests -e CIRCLE_NODE_INDEX -codecov -f "smoketest*.xml" -s "${WORKDIR}/tests/" -R "${HOME}/nipype/" -F smoketests -e CIRCLE_NODE_INDEX - diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..e922b37520 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,194 @@ +version: 2 +jobs: + + compare_base_dockerfiles: + docker: + - image: docker:17.10.0-ce-git + steps: + - checkout: + path: /home/circleci/nipype + - setup_remote_docker + - run: + name: Generate and prune base Dockerfile in preparation for cache check + working_directory: /home/circleci/nipype/docker + command: | + mkdir -p /tmp/docker + ash ./generate_dockerfiles.sh -b + + # Use the sha256 sum of the pruned Dockerfile as the cache key. + ash prune_dockerfile.sh Dockerfile.base > /tmp/docker/Dockerfile.base-pruned + - restore_cache: + key: dockerfile-cache-v1-master-{{ checksum "/tmp/docker/Dockerfile.base-pruned" }} + - run: + name: Determine how to get base image + command: | + if [ -f /tmp/docker/cache/Dockerfile.base-pruned ]; then + echo "Cache found. Will pull base image." + echo 'export GET_BASE=PULL' > /tmp/docker/get_base_image.sh + else + echo "Cache not found. Will build base image." + echo 'export GET_BASE=BUILD' > /tmp/docker/get_base_image.sh + fi + - persist_to_workspace: + root: /tmp + paths: + - docker/Dockerfile.base-pruned + - docker/get_base_image.sh + + + build_and_test: + parallelism: 4 + machine: + # Ubuntu 14.04 with Docker 17.10.0-ce + image: circleci/classic:201710-02 + working_directory: /home/circleci/nipype + steps: + - checkout: + path: /home/circleci/nipype + - attach_workspace: + at: /tmp + - run: + name: Get test dependencies and generate Dockerfiles + command: | + pip install --no-cache-dir codecov + make gen-dockerfiles + - run: + name: Modify Nipype version if necessary + command: | + if [ "$CIRCLE_TAG" != "" ]; then + sed -i -E "s/(__version__ = )'[A-Za-z0-9.-]+'/\1'$CIRCLE_TAG'/" nipype/info.py + fi + - run: + name: Get base image (pull or build) + no_output_timeout: 60m + command: | + source /tmp/docker/get_base_image.sh + if [ "$GET_BASE" == "PULL" ]; then + echo "Pulling base image ..." + docker pull nipype/nipype:base + elif [ "$GET_BASE" == "BUILD" ]; then + e=1 && for i in {1..5}; do + docker build -t nipype/nipype:base - < docker/Dockerfile.base && e=0 && break || sleep 15 + done && [ "$e" -eq "0" ] + else + echo "Error: method to get base image not understood" + exit 1 + fi + - run: + name: Build main image (py36) + no_output_timeout: 60m + command: | + e=1 && for i in {1..5}; do + docker build \ + --rm=false \ + --tag nipype/nipype:latest \ + --tag nipype/nipype:py36 \ + --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ + --build-arg VERSION="${CIRCLE_TAG}" /home/circleci/nipype \ + && e=0 && break || sleep 15 + done && [ "$e" -eq "0" ] + - run: + name: Build main image (py27) + no_output_timeout: 60m + command: | + e=1 && for i in {1..5}; do + docker build \ + --rm=false \ + --tag nipype/nipype:py27 \ + --build-arg PYTHON_VERSION_MAJOR=2 \ + --build-arg PYTHON_VERSION_MINOR=7 \ + --build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --build-arg VCS_REF="$(git rev-parse --short HEAD)" \ + --build-arg VERSION="${CIRCLE_TAG}-py27" /home/circleci/nipype \ + && e=0 && break || sleep 15 + done && [ "$e" -eq "0" ] + - run: + name: Download test data + no_output_timeout: 20m + working_directory: /home/circleci/examples + environment: + OSF_NIPYPE_URL: "https://files.osf.io/v1/resources/nefdp/providers/osfstorage" + command: | + export DATA_NIPYPE_TUTORIAL_URL="${OSF_NIPYPE_URL}/57f4739cb83f6901ed94bf21" + curl -sSL --retry 5 --connect-timeout 15 "$DATA_NIPYPE_TUTORIAL_URL" | tar xj + + export DATA_NIPYPE_FSL_COURSE="${OSF_NIPYPE_URL}/57f472cf9ad5a101f977ecfe" + curl -sSL --retry 5 --connect-timeout 15 "$DATA_NIPYPE_FSL_COURSE" | tar xz + + export DATA_NIPYPE_FSL_FEEDS="${OSF_NIPYPE_URL}/57f473066c613b01f113e7af" + curl -sSL --retry 5 --connect-timeout 15 "$DATA_NIPYPE_FSL_FEEDS" | tar xz + - run: + name: Run tests + no_output_timeout: 4h + environment: + WORKDIR: /home/circleci/work + command: | + mkdir -p "$WORKDIR" + chmod -R 777 "$WORKDIR" + bash /home/circleci/nipype/.circleci/tests.sh + - store_artifacts: + path: /home/circleci/work/tests + - run: + name: Save Docker images to workspace + no_output_timeout: 60m + command: | + if [ "$CIRCLE_NODE_INDEX" -eq "0" ] && [ "$CIRCLE_BRANCH" == "master" ]; then + docker save nipype/nipype:base \ + nipype/nipype:latest \ + nipype/nipype:py36 \ + nipype/nipype:py27 | gzip -1 > /tmp/docker/nipype-base-latest-py36-py27.tar.gz + du -h /tmp/docker/nipype-base-latest-py36-py27.tar.gz + fi + - persist_to_workspace: + root: /tmp + paths: + - docker/* + + + deploy: + docker: + - image: docker:17.10.0-ce-git + steps: + - setup_remote_docker + - attach_workspace: + at: /tmp + - run: + name: Load saved Docker images. + no_output_timeout: 60m + command: | + docker load < /tmp/docker/nipype-base-latest-py36-py27.tar.gz + - run: + name: Push to DockerHub + no_output_timeout: 120m + command: | + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + docker push nipype/nipype:base + docker push nipype/nipype:latest + docker push nipype/nipype:py36 + docker push nipype/nipype:py27 + - run: + name: Move pruned Dockerfile to /tmp/docker/cache directory + command: | + mkdir -p /tmp/docker/cache/ + mv /tmp/docker/Dockerfile.base-pruned /tmp/docker/cache/Dockerfile.base-pruned + - save_cache: + paths: + - /tmp/docker/cache/Dockerfile.base-pruned + key: dockerfile-cache-v1-{{ .Branch }}-{{ checksum "/tmp/docker/cache/Dockerfile.base-pruned" }} + + +workflows: + version: 2 + build_test_deply: + jobs: + - compare_base_dockerfiles + - build_and_test: + requires: + - compare_base_dockerfiles + - deploy: + filters: + branches: + only: master + requires: + - build_and_test diff --git a/.circleci/tests.sh b/.circleci/tests.sh new file mode 100644 index 0000000000..f55a3249d7 --- /dev/null +++ b/.circleci/tests.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Balance nipype testing workflows across CircleCI build nodes +# + +# Setting # $ help set +set -e # Exit immediately if a command exits with a non-zero status. +set -u # Treat unset variables as an error when substituting. +set -x # Print command traces before executing command. + +if [ "${CIRCLE_NODE_TOTAL:-}" != "4" ]; then + echo "These tests were designed to be run at 4x parallelism." + exit 1 +fi + +DOCKER_IMAGE="nipype/nipype" + +# These tests are manually balanced based on previous build timings. +# They may need to be rebalanced in the future. +case ${CIRCLE_NODE_INDEX} in + 0) + docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e CI_SKIP_TEST=1 -e NIPYPE_RESOURCE_MONITOR=1 -e FSL_COURSE_DATA="/data/examples/nipype-fsl_course_data" "${DOCKER_IMAGE}:py36" /usr/bin/run_pytests.sh \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e CI_SKIP_TEST=1 -e NIPYPE_RESOURCE_MONITOR=1 -e FSL_COURSE_DATA="/data/examples/nipype-fsl_course_data" "${DOCKER_IMAGE}:py27" /usr/bin/run_pytests.sh \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /src/nipype/doc "${DOCKER_IMAGE}:py36" /usr/bin/run_builddocs.sh \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow3d \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow4d + exitcode=$? + ;; + 1) + docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel Linear /data/examples/ level1 \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel Linear /data/examples/ l2pipeline + exitcode=$? + ;; + 2) + docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py27" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline + exitcode=$? + ;; + 3) + docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_feeds Linear /data/examples/ l1pipeline \ + && docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_reuse Linear /data/examples/ level1_workflow + exitcode=$? + ;; +esac + +# Exit with error if any of the tests failed +if [ "$exitcode" != "0" ]; then exit 1; fi + +codecov --file "${WORKDIR}/tests/coverage*.xml" \ + --root "${HOME}/nipype/" --flags unittests -e CIRCLE_NODE_INDEX + +codecov --file "${WORKDIR}/tests/smoketest*.xml" \ + --root "${HOME}/nipype/" --flags smoketests -e CIRCLE_NODE_INDEX diff --git a/.dockerignore b/.dockerignore index c44710d14e..fb4be03ec9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -34,4 +34,4 @@ Vagrantfile .mailmap # Previous coverage results -.coverage \ No newline at end of file +.coverage diff --git a/.mailmap b/.mailmap index af5a39bd66..0a982fb8a9 100644 --- a/.mailmap +++ b/.mailmap @@ -1,5 +1,6 @@ Aimi Watanabe stymy Aimi Watanabe stymy +Alejandro de la Vega adelavega Alexander Schaefer Alexander Schaefer Alexander Schaefer alexschaefer83 Alexander Schaefer aschaefer @@ -56,11 +57,13 @@ Franz Liem fliem Gael Varoquaux GaelVaroquaux Gael Varoquaux GaelVaroquaux Gavin Cooper gjcooper +Gilles de Hollander Gilles86 Hans Johnson Hans Johnson Hans Johnson hjmjohnson Horea Christian Horea Christian Isaac Schwabacher ischwabacher James Kent jdkent +Jakub Kaczmarzyk kaczmarj Jason Wong Jason Jason Wong jason Jason Wong Jason W @@ -72,7 +75,9 @@ Joerg Stadler Jörg Stadler jokedurnez Josh Warner Josh Warner (Mac) Kai Schlamp medihack Jessica Forbes jessicaforbes +Kevin Sitek sitek Leonie Lampe Leonie Lmape +Lukas Snoek Lukas Snoek Mathias Goncalves mathiasg Michael Dayan Michael Michael Dayan Michael @@ -83,6 +88,7 @@ Michael Waskom Michael Waskom Michael Waskom Michael Waskom Michael Waskom mwaskom Michael Waskom mwaskom +Michael Waskom mwaskom Oscar Esteban Oscar Esteban Oscar Esteban oesteban Russell Poldrack Russ Poldrack @@ -101,5 +107,6 @@ Steven Giavasis sgiavasis Tristan Glatard Tristan Glatard Victor Saase vsaase William Triplett William Triplett +Wolfgang Pauli Wolfgang Pauli Yaroslav Halchenko Yaroslav Halchenko pipolose pipolose diff --git a/.travis.yml b/.travis.yml index f97f48dddb..08d9234675 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,9 +8,10 @@ python: - 3.5 - 3.6 env: -- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler" -- INSTALL_DEB_DEPENDECIES=false NIPYPE_EXTRAS="doc,tests,fmri,profiler" -- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler,duecredit" +- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler" CI_SKIP_TEST=1 +- INSTALL_DEB_DEPENDECIES=false NIPYPE_EXTRAS="doc,tests,fmri,profiler" CI_SKIP_TEST=1 +- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler,duecredit" CI_SKIP_TEST=1 +- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler" PIP_FLAGS="--pre" CI_SKIP_TEST=1 before_install: - function apt_inst { if $INSTALL_DEB_DEPENDECIES; then sudo rm -rf /dev/shm; fi && @@ -33,15 +34,20 @@ before_install: hash -r && conda config --set always_yes yes --set changeps1 no && conda update -q conda && - conda install python=${TRAVIS_PYTHON_VERSION} && conda config --add channels conda-forge && - conda install -y nipype icu && - rm -r ${CONDA_HOME}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/nipype*; } + conda install python=${TRAVIS_PYTHON_VERSION} && + conda install -y icu && + pip install -r requirements.txt && + pushd $HOME; + git clone https://github.com/INCF/pybids.git; + cd pybids; + pip install -e .; + popd; } # Add install of vtk and mayavi to test mesh (disabled): conda install -y vtk mayavi - travis_retry apt_inst - travis_retry conda_inst install: -- travis_retry pip install -e .[$NIPYPE_EXTRAS] +- travis_retry pip install $PIP_FLAGS -e .[$NIPYPE_EXTRAS] script: - py.test -v --doctest-modules nipype deploy: @@ -53,4 +59,4 @@ deploy: tags: true repo: nipy/nipype branch: master - distributions: "sdist" + distributions: "sdist bdist_wheel" diff --git a/.zenodo.json b/.zenodo.json index bdded23dfd..509f8342cc 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -37,6 +37,11 @@ { "name": "Yvernault, Benjamin" }, + { + "affiliation": "Stanford University", + "name": "Markiewicz, Christopher J.", + "orcid": "0000-0002-6533-164X" + }, { "name": "Burns, Christopher" }, @@ -50,31 +55,25 @@ "name": "Jarecka, Dorota", "orcid": "0000-0003-1857-8129" }, - { - "affiliation": "Stanford University", - "name": "Markiewicz, Christopher J.", - "orcid": "0000-0002-6533-164X" - }, { "affiliation": "Florida International University", "name": "Salo, Taylor", "orcid": "0000-0001-9813-3167" }, { - "affiliation": "Developer", - "name": "Clark, Daniel", - "orcid": "0000-0002-8121-8954" + "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", + "name": "Wong, Jason" }, { "affiliation": "Department of Psychology, Stanford University", "name": "Waskom, Michael" }, { - "affiliation": "Shattuck Lab, UCLA Brain Mapping Center", - "name": "Wong, Jason" + "name": "Modat, Marc" }, { - "name": "Modat, Marc" + "affiliation": "National Institutes of Health", + "name": "Clark, Michael G. " }, { "affiliation": "Department of Electrical and Computer Engineering, Johns Hopkins University", @@ -82,17 +81,27 @@ "orcid": "0000-0003-4554-5058" }, { - "affiliation": "National Institutes of Health", - "name": "Clark, Michael G. " + "affiliation": "Developer", + "name": "Clark, Daniel", + "orcid": "0000-0002-8121-8954" }, { "affiliation": "Mayo Clinic, Neurology, Rochester, MN, USA", "name": "Dayan, Michael", "orcid": "0000-0002-2666-0969" }, + { + "affiliation": "MIT", + "name": "Goncalves, Mathias" + }, { "name": "Loney, Fred" }, + { + "affiliation": "National Institute of Mental Health", + "name": "Nielson, Dylan M.", + "orcid": "0000-0003-4613-6643" + }, { "name": "Madison, Cindee" }, @@ -110,11 +119,12 @@ "name": "Berleant, Shoshana" }, { - "name": "Pinsard, Basile" + "affiliation": "Montreal Neurological Institute and Hospital", + "name": "Markello, Ross", + "orcid": "0000-0003-1057-1336" }, { - "affiliation": "MIT", - "name": "Goncalves, Mathias" + "name": "Pinsard, Basile" }, { "affiliation": "UC Berkeley", @@ -126,6 +136,15 @@ "name": "Cipollini, Ben", "orcid": "0000-0002-7782-0790" }, + { + "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", + "name": "Horea, Christian", + "orcid": "0000-0001-7037-2449" + }, + { + "affiliation": "Molecular Imaging Research Center, CEA, France", + "name": "Bougacha, Salma" + }, { "affiliation": "INRIA", "name": "Varoquaux, Gael", @@ -146,9 +165,6 @@ "name": "Halchenko, Yaroslav O.", "orcid": "0000-0003-3456-2493" }, - { - "name": "Forbes, Jessica" - }, { "name": "Moloney, Brendan" }, @@ -157,11 +173,21 @@ "name": "Malone, Ian B.", "orcid": "0000-0001-7512-7856" }, + { + "affiliation": "MIT", + "name": "Kaczmarzyk, Jakub", + "orcid": "0000-0002-5544-7577" + }, { "affiliation": "Otto-von-Guericke-University Magdeburg, Germany", "name": "Hanke, Michael", "orcid": "0000-0001-6398-6370" }, + { + "affiliation": "Vrije Universiteit, Amsterdam", + "name": "Gilles de Hollander", + "orcid": "0000-0003-1988-5091" + }, { "name": "Mordom, David" }, @@ -179,9 +205,12 @@ "orcid": "0000-0003-0579-9811" }, { - "affiliation": "Institute for Biomedical Engineering, ETH and University of Zurich", - "name": "Horea, Christian", - "orcid": "0000-0001-7037-2449" + "name": "Forbes, Jessica" + }, + { + "affiliation": "Montreal Neurological Institute and Hospital", + "name": "DuPre, Elizabeth", + "orcid": "0000-0003-1358-196X" }, { "name": "Schwartz, Yannick" @@ -195,22 +224,18 @@ "orcid": "0000-0003-2766-8425" }, { - "affiliation": "UniversityHospital Heidelberg, Germany", - "name": "Kleesiek, Jens" - }, - { - "affiliation": "Nathan s Kline institute for psychiatric research", - "name": "Sikka, Sharad" + "name": "Kent, James" }, { - "affiliation": "Child Mind Institute", - "name": "Frohlich, Caroline" + "name": "Perez-Guevara, Martin" }, { - "name": "Kent, James" + "affiliation": "UniversityHospital Heidelberg, Germany", + "name": "Kleesiek, Jens" }, { - "name": "Perez-Guevara, Martin" + "affiliation": "Nathan s Kline institute for psychiatric research", + "name": "Sikka, Sharad" }, { "name": "Watanabe, Aimi" @@ -219,6 +244,10 @@ "affiliation": "University of Iowa", "name": "Welch, David" }, + { + "affiliation": "Child Mind Institute", + "name": "Frohlich, Caroline" + }, { "name": "Cumba, Chad" }, @@ -230,15 +259,16 @@ "name": "Eshaghi, Arman", "orcid": "0000-0002-6652-3512" }, + { + "affiliation": "University of Texas at Austin", + "name": "De La Vega, Alejandro", + "orcid": "0000-0001-9062-3778" + }, { "affiliation": "Harvard University - Psychology", "name": "Kastman, Erik", "orcid": "0000-0001-7221-9042" }, - { - "affiliation": "Molecular Imaging Research Center, CEA, France", - "name": "Bougacha, Salma" - }, { "name": "Blair, Ross" }, @@ -266,9 +296,6 @@ "affiliation": "Child Mind Institute", "name": "Giavasis, Steven" }, - { - "name": "Erickson, Drew" - }, { "name": "Correa, Carlos" }, @@ -276,18 +303,21 @@ "name": "Ghayoor, Ali" }, { - "name": "K\u00fcttner, Ren\u00e9" + "affiliation": "University of California, San Francisco", + "name": "Jordan, Kesshi", + "orcid": "0000-0001-6313-0580" }, { - "name": "Haselgrove, Christian" + "name": "Erickson, Drew" + }, + { + "name": "K\u00fcttner, Ren\u00e9" }, { "name": "Zhou, Dale" }, { - "affiliation": "Child Mind Institute", - "name": "Craddock, R. Cameron", - "orcid": "0000-0002-4950-1303" + "name": "Haselgrove, Christian" }, { "name": "Haehn, Daniel" @@ -300,10 +330,12 @@ "name": "Millman, Jarrod" }, { - "name": "Lai, Jeff" + "affiliation": "University of California, San Francisco", + "name": "Jordan, Kesshi", + "orcid": "0000-0001-6313-0580" }, { - "name": "Renfro, Mandy" + "name": "Lai, Jeff" }, { "affiliation": "The University of Sydney", @@ -319,6 +351,9 @@ "name": "Glatard, Tristan", "orcid": "0000-0003-2620-5883" }, + { + "name": "Renfro, Mandy" + }, { "affiliation": "University of Pennsylvania", "name": "Kahn, Ari E.", @@ -338,10 +373,10 @@ "name": "Park, Anne" }, { - "name": "McDermottroe, Conor" + "name": "Hallquist, Michael" }, { - "name": "Hallquist, Michael" + "name": "McDermottroe, Conor" }, { "name": "Poldrack, Russell" @@ -351,40 +386,44 @@ "name": "Perkins, L. Nathan" }, { - "name": "Noel, Maxime" + "affiliation": "University of California, San Francisco", + "name": "Jordan, Kesshi", + "orcid": "0000-0001-6313-0580" }, { - "affiliation": "Institute of Neuroinformatics, ETH/University of Zurich", - "name": "Gerhard, Stephan", - "orcid": "0000-0003-4454-6171" + "affiliation": "University of Newcastle, Australia", + "name": "Cooper, Gavin", + "orcid": "0000-0002-7186-5293" }, { - "name": "Salvatore, John" + "name": "Noel, Maxime" }, { - "name": "Mertz, Fred" + "name": "Salvatore, John" }, { - "affiliation": "Duke University", - "name": "Broderick, William", - "orcid": "0000-0002-8999-9003" + "name": "Mertz, Fred" }, { "name": "Inati, Souheil" }, { - "name": "Hinds, Oliver" + "affiliation": "University of Amsterdam", + "name": "Lukas Snoek", + "orcid": "0000-0001-8972-204X" }, { - "name": "Brett, Matthew" + "affiliation": "Child Mind Institute", + "name": "Craddock, R. Cameron", + "orcid": "0000-0002-4950-1303" }, { - "affiliation": "Department of Psychology, Stanford University; Parietal, INRIA", - "name": "Durnez, Joke", - "orcid": "0000-0001-9030-2202" + "name": "Hinds, Oliver" }, { - "name": "Tambini, Arielle" + "affiliation": "Institute of Neuroinformatics, ETH/University of Zurich", + "name": "Gerhard, Stephan", + "orcid": "0000-0003-4454-6171" }, { "name": "Rothmei, Simon" @@ -395,9 +434,7 @@ "orcid": "0000-0002-5650-3964" }, { - "affiliation": "University of Newcastle, Australia", - "name": "Cooper, Gavin", - "orcid": "0000-0002-7186-5293" + "name": "Tambini, Arielle" }, { "name": "Marina, Ana" @@ -412,6 +449,9 @@ "affiliation": "University of illinois urbana champaign", "name": "Sharp, Paul" }, + { + "name": "Brett, Matthew" + }, { "name": "Matsubara, K" }, @@ -423,11 +463,6 @@ { "name": "Cheung, Brian" }, - { - "affiliation": "The University of Texas at Austin", - "name": "Floren, Andrew", - "orcid": "0000-0003-3618-2056" - }, { "name": "Nickson, Thomas" }, @@ -440,24 +475,28 @@ "name": "Weinstein, Alejandro" }, { - "name": "Dubois, Mathieu" + "affiliation": "The University of Texas at Austin", + "name": "Floren, Andrew", + "orcid": "0000-0003-3618-2056" }, { - "name": "Arias, Jaime" + "affiliation": "Duke University", + "name": "Broderick, William", + "orcid": "0000-0002-8999-9003" }, { - "name": "Tarbert, Claire" + "name": "Dubois, Mathieu" }, { - "name": "Schlamp, Kai" + "affiliation": "Department of Psychology, Stanford University; Parietal, INRIA", + "name": "Durnez, Joke", + "orcid": "0000-0001-9030-2202" }, { - "affiliation": "University of California, San Francisco", - "name": "Jordan, Kesshi", - "orcid": "0000-0001-6313-0580" + "name": "Arias, Jaime" }, { - "name": "Liem, Franz" + "name": "Tarbert, Claire" }, { "name": "Saase, Victor" @@ -471,6 +510,9 @@ { "name": "Podranski, Kornelius" }, + { + "name": "Schlamp, Kai" + }, { "name": "Flandin, Guillaume" }, @@ -479,14 +521,6 @@ "name": "Papadopoulos Orfanos, Dimitri", "orcid": "0000-0002-1242-8990" }, - { - "name": "Schwabacher, Isaac" - }, - { - "affiliation": "University of Cambridge", - "name": "McNamee, Daniel", - "orcid": "0000-0001-9928-4960" - }, { "name": "Falkiewicz, Marcel" }, @@ -503,21 +537,37 @@ { "name": "Varada, Jan" }, + { + "name": "Schwabacher, Isaac" + }, + { + "name": "Liem, Franz" + }, { "affiliation": "Stereotaxy Core, Brain & Spine Institute", "name": "P\u00e9rez-Garc\u00eda, Fernando", "orcid": "0000-0001-9090-3024" }, { - "name": "Davison, Andrew" + "name": "Shachnev, Dmitry" }, { - "name": "Shachnev, Dmitry" + "affiliation": "University of Cambridge", + "name": "McNamee, Daniel", + "orcid": "0000-0001-9928-4960" + }, + { + "name": "Davison, Andrew" }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "University College London", + "name": "Mancini, Matteo", + "orcid": "0000-0001-7194-4568" } ], "keywords": [ diff --git a/CHANGES b/CHANGES index 2f2ad920af..01a09b735a 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,66 @@ -Upcoming release +Upcoming release (0.14.1) ================ +* MAINT: Cleaning / simplify ``Node`` (https://github.com/nipy/nipype/pull/#2325) + +0.14.0 (November 29, 2017) +========================== + +###### [Full changelog](https://github.com/nipy/nipype/milestone/13) + +* FIX+MAINT: Revision of the resource monitor (https://github.com/nipy/nipype/pull/2285) +* FIX: MultiProc mishandling crashes (https://github.com/nipy/nipype/pull/2301) +* MAINT: Revise use of `subprocess.Popen` (https://github.com/nipy/nipype/pull/2289) +* ENH: Memorize version checks (https://github.com/nipy/nipype/pull/2274, https://github.com/nipy/nipype/pull/2295) + + +0.14.0rc1 (November 21, 2017) +----------------------------- + +* ENH: Generate Dockerfiles with neurodocker (https://github.com/nipy/nipype/pull/2202) +* ENH: FLAIR options for recon-all (https://github.com/nipy/nipype/pull/2279) +* ENH: Config option for setting maxtasksperchild when multiprocessing (https://github.com/nipy/nipype/pull/2284) +* FIX: Testing maintainance and improvements (https://github.com/nipy/nipype/pull/2252) +* ENH: Add elapsed_time and final metric_value to ants.Registration (https://github.com/nipy/nipype/pull/1985) +* ENH: Improve terminal_output feature (https://github.com/nipy/nipype/pull/2209) +* ENH: Simple interface to FSL std2imgcoords (https://github.com/nipy/nipype/pull/2209, prev #1398) +* ENH: Centralize virtual/physical $DISPLAYs (https://github.com/nipy/nipype/pull/2203, https://github.com/nipy/nipype/pull/2211) +* ENH: New ResourceMonitor - replaces resource profiler (https://github.com/nipy/nipype/pull/2200) +* ENH: Quickshear interface (https://github.com/nipy/nipype/pull/2047) +* MAINT: updated deprecated HasTraits method (https://github.com/nipy/nipype/pull/2048) +* ENH: CLI versioning (https://github.com/nipy/nipype/pull/2054) +* ENH: Dual Regression interface (https://github.com/nipy/nipype/pull/2057) +* ENH: Additional args to ANTs registration (https://github.com/nipy/nipype/pull/2062, https://github.com/nipy/nipype/pull/2078) +* FIX: Mp2rage interfaces updated for new parameter names in cbstools 3 (https://github.com/nipy/nipype/pull/2065) +* MAINT: Removed automatic nipype folder creation in HOME (https://github.com/nipy/nipype/pull/2076) +* MAINT: Additional Windows support (https://github.com/nipy/nipype/pull/2085) +* ENH: Output realignment matrices from TOPUP (https://github.com/nipy/nipype/pull/2084) +* ENH: Additional AFNI interfaces: 3dZcat, 3dZeropad, 3dedge3, 3dDeconvolve, 3dQwarp, 1dCat, 3dNwarpApply, 3daxialize, + 3dREMLfit, 3dUndump, 3dCM, 3dSynthesize + more (https://github.com/nipy/nipype/pull/2087, https://github.com/nipy/nipype/pull/2090, + https://github.com/nipy/nipype/pull/2095, https://github.com/nipy/nipype/pull/2099, https://github.com/nipy/nipype/pull/2103, + https://github.com/nipy/nipype/pull/2114, https://github.com/nipy/nipype/pull/2135, https://github.com/nipy/nipype/pull/2186, + https://github.com/nipy/nipype/pull/2201, https://github.com/nipy/nipype/pull/2210) +* MAINT: cleanup and update AFNI's Allineate (https://github.com/nipy/nipype/pull/2098) +* ENH: Add cosine-basis high-pass-filter to CompCor, allow skip of initial volumes (https://github.com/nipy/nipype/pull/2107, https://github.com/nipy/nipype/pull/#2122) +* FIX: Catch more dcm2niix DTI conversions (https://github.com/nipy/nipype/pull/2110) +* FIX: Retrieve aseg + wmparc stats properly (https://github.com/nipy/nipype/pull/2117) +* ENH: ANTs MeasureImageSimilarity Inteface (https://github.com/nipy/nipype/pull/2128) +* FIX: CompCor filter_basis of correct size, pre-filter column headers (https://github.com/nipy/nipype/pull/2136, https://github.com/nipy/nipype/pull/2138) +* ENH: FreeSurfer lta_convert and mri_coreg interfaces (https://github.com/nipy/nipype/pull/2140, https://github.com/nipy/nipype/pull/2172) +* ENH: Speed up S3DataGrabber (https://github.com/nipy/nipype/pull/2143) +* FIX: Allow S3DataGrabber to grab single file (https://github.com/nipy/nipype/pull/2147) +* FIX: Allow 4D images as inputs to buildtemplateparallel.sh and N4BiasFieldCorrection (https://github.com/nipy/nipype/pull/2151) +* MAINT: Detect and warn unconnected duplicate nodes (https://github.com/nipy/nipype/pull/2163) +* ENH: Calcmedian Interface (https://github.com/nipy/nipype/pull/2167) +* FIX: probtrackx2 outputs (https://github.com/nipy/nipype/pull/2169) +* ENH: Improve FreeSurfer registration (https://github.com/nipy/nipype/pull/2172) +* ENH: BIDSDataGrabber interface (https://github.com/nipy/nipype/pull/2174) +* MAINT: Set minimum numpy version to 1.9.0 (https://github.com/nipy/nipype/pull/2182) +* ENH: Support for multiple intial-moving-transforms (https://github.com/nipy/nipype/pull/2187) +* MAINT: Fixes for networkx and afni (https://github.com/nipy/nipype/pull/2196, https://github.com/nipy/nipype/pull/2171) +* TST: Update C3D version in Docker build (https://github.com/nipy/nipype/pull/2199) +* ENH: SimpleInterface interface (https://github.com/nipy/nipype/pull/2220) +* ENH: Add LTA to Tkregister2 (https://github.com/nipy/nipype/pull/2217) 0.13.1 (May 20, 2017) ===================== diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..867bb1b38e --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting project leaders Satrajit Ghosh <> or Chris Gorgolewski <>. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project leaders is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f985bd1101..06ff582266 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,5 @@ +**Are you new to open source and GitHub?** If so reading the "[How to submit a contribution](https://opensource.guide/how-to-contribute/#how-to-submit-a-contribution)" guide will provide a great introduction to contributing to Nipype and other Open Source projects. All the Nipype specific contributing instructions listed below will make much more sense after reading this guide. + ## Contributing pull-requests (PRs) * All work is submitted via Pull Requests. @@ -14,13 +16,13 @@ * The person who accepts/merges your PR will include an update to the CHANGES file: prefix: description (URL of pull request) * Run `make check-before-commit` before submitting the PR. This will require you to either install or be in developer mode with: `python setup.py install/develop`. -* In general, do not catch exceptions without good reason. - * catching non-fatal exceptions. +* In general, do not catch exceptions without good reason. + * catching non-fatal exceptions. Log the exception as a warning. * adding more information about what may have caused the error. Raise a new exception using ``raise_from(NewException("message"), oldException)`` from ``future``. Do not log this, as it creates redundant/confusing logs. -* If you are new to the project don't forget to add your name and affiliation to the `.zenodo.json` file. +* **If you are new to the project don't forget to add your name and affiliation to the `.zenodo.json` file.** ## Contributing issues diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 502216cf8d..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2016, The developers of the Stanford CRN -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of crn_base nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -# -# Based on https://github.com/poldracklab/fmriprep/blob/9c92a3de9112f8ef1655b876de060a2ad336ffb0/Dockerfile -# -FROM nipype/base:latest -MAINTAINER The nipype developers https://github.com/nipy/nipype - -ARG PYTHON_VERSION_MAJOR=3 - -# Installing and setting up miniconda -RUN curl -sSLO https://repo.continuum.io/miniconda/Miniconda${PYTHON_VERSION_MAJOR}-4.2.12-Linux-x86_64.sh && \ - bash Miniconda${PYTHON_VERSION_MAJOR}-4.2.12-Linux-x86_64.sh -b -p /usr/local/miniconda && \ - rm Miniconda${PYTHON_VERSION_MAJOR}-4.2.12-Linux-x86_64.sh - -ENV PATH=/usr/local/miniconda/bin:$PATH \ - LANG=C.UTF-8 \ - LC_ALL=C.UTF-8 \ - ACCEPT_INTEL_PYTHON_EULA=yes \ - MKL_NUM_THREADS=1 \ - OMP_NUM_THREADS=1 -# MKL/OMP_NUM_THREADS: unless otherwise specified, each process should -# only use one thread - nipype will handle parallelization - -# Installing precomputed python packages -ARG PYTHON_VERSION_MINOR=6 -RUN conda config --add channels conda-forge; sync && \ - conda config --set always_yes yes --set changeps1 no; sync && \ - conda install -y python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} \ - mkl \ - numpy \ - scipy \ - scikit-learn \ - matplotlib \ - pandas \ - libxml2 \ - libxslt \ - traits=4.6.0 \ - psutil \ - icu=58.1 && \ - sync; - -# matplotlib cleanups: set default backend, precaching fonts -RUN sed -i 's/\(backend *: \).*$/\1Agg/g' /usr/local/miniconda/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages/matplotlib/mpl-data/matplotlibrc && \ - python -c "from matplotlib import font_manager" - -# Install CI scripts -COPY docker/files/run_* /usr/bin/ -RUN chmod +x /usr/bin/run_* - -# Replace imglob with a Python3 compatible version -COPY nipype/external/fsl_imglob.py /usr/bin/fsl_imglob.py -RUN rm -rf ${FSLDIR}/bin/imglob && \ - chmod +x /usr/bin/fsl_imglob.py && \ - ln -s /usr/bin/fsl_imglob.py ${FSLDIR}/bin/imglob - -# Installing dev requirements (packages that are not in pypi) -WORKDIR /src/ -COPY requirements.txt requirements.txt -RUN pip install -r requirements.txt && \ - rm -rf ~/.cache/pip - -# Installing nipype -COPY . /src/nipype -RUN cd /src/nipype && \ - pip install -e .[all] && \ - rm -rf ~/.cache/pip - -WORKDIR /work/ - -ARG BUILD_DATE -ARG VCS_REF -ARG VERSION -LABEL org.label-schema.build-date=$BUILD_DATE \ - org.label-schema.name="NIPYPE" \ - org.label-schema.description="NIPYPE - Neuroimaging in Python: Pipelines and Interfaces" \ - org.label-schema.url="http://nipype.readthedocs.io" \ - org.label-schema.vcs-ref=$VCS_REF \ - org.label-schema.vcs-url="https://github.com/nipy/nipype" \ - org.label-schema.version=$VERSION \ - org.label-schema.schema-version="1.0" diff --git a/Makefile b/Makefile index 31f67bf500..0e1e927232 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ PYTHON ?= python NOSETESTS=`which nosetests` -.PHONY: zipdoc sdist egg upload_to_pypi trailing-spaces clean-pyc clean-so clean-build clean-ctags clean in inplace test-code test-coverage test html specs check-before-commit check +.PHONY: zipdoc sdist egg upload_to_pypi trailing-spaces clean-pyc clean-so clean-build clean-ctags clean in inplace test-code test-coverage test html specs check-before-commit check gen-base-dockerfile gen-main-dockerfile gen-dockerfiles zipdoc: html zip documentation.zip doc/_build/html @@ -61,7 +61,7 @@ test-code: in test-coverage: clean-tests in py.test --doctest-modules --cov-config .coveragerc --cov=nipype nipype - + test: tests # just another name tests: clean test-code @@ -79,3 +79,13 @@ check-before-commit: specs trailing-spaces html test @echo "built docs" @echo "ran test" @echo "generated spec tests" + +gen-base-dockerfile: + @echo "Generating base Dockerfile" + bash docker/generate_dockerfiles.sh -b + +gen-main-dockerfile: + @echo "Generating main Dockerfile" + bash docker/generate_dockerfiles.sh -m + +gen-dockerfiles: gen-base-dockerfile gen-main-dockerfile diff --git a/README.rst b/README.rst index aa41f34d66..8831d11b2e 100644 --- a/README.rst +++ b/README.rst @@ -2,7 +2,7 @@ NIPYPE: Neuroimaging in Python: Pipelines and Interfaces ======================================================== -.. image:: https://travis-ci.org/nipy/nipype.png?branch=master +.. image:: https://travis-ci.org/nipy/nipype.svg?branch=master :target: https://travis-ci.org/nipy/nipype .. image:: https://circleci.com/gh/nipy/nipype/tree/master.svg?style=svg @@ -33,7 +33,7 @@ NIPYPE: Neuroimaging in Python: Pipelines and Interfaces .. image:: https://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg?style=flat :target: http://gitter.im/nipy/nipype :alt: Chat - + .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.581704.svg :target: https://doi.org/10.5281/zenodo.581704 @@ -90,33 +90,7 @@ To participate in the Nipype development related discussions please use the foll Please add *[nipype]* to the subject line when posting on the mailing list. +Contributing to the project +--------------------------- -Nipype structure ----------------- - -Currently Nipype consists of the following files and directories: - - INSTALL - NIPYPE prerequisites, installation, development, testing, and - troubleshooting. - - README - This document. - - THANKS - NIPYPE developers and contributors. Please keep it up to date!! - - LICENSE - NIPYPE license terms. - - doc/ - Sphinx/reST documentation - - examples/ - - nipype/ - Contains the source code. - - setup.py - Script for building and installing NIPYPE. - +If you'd like to contribute to the project please read our `guidelines `_. Please also read through our `code of conduct `_. diff --git a/circle.yml b/circle.yml deleted file mode 100644 index b122653b79..0000000000 --- a/circle.yml +++ /dev/null @@ -1,84 +0,0 @@ -machine: - pre: - - curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.0 - environment: - OSF_NIPYPE_URL: "https://files.osf.io/v1/resources/nefdp/providers/osfstorage" - DATA_NIPYPE_TUTORIAL_URL: "${OSF_NIPYPE_URL}/57f4739cb83f6901ed94bf21" - DATA_NIPYPE_FSL_COURSE: "${OSF_NIPYPE_URL}/57f472cf9ad5a101f977ecfe" - DATA_NIPYPE_FSL_FEEDS: "${OSF_NIPYPE_URL}/57f473066c613b01f113e7af" - WORKDIR: "$HOME/work" - CODECOV_TOKEN: "ac172a50-8e66-42e5-8822-5373fcf54686" - services: - - docker - -dependencies: - cache_directories: - - "~/docker" - - "~/examples" - - "~/.apt-cache" - - pre: - # Let CircleCI cache the apt archive - - mkdir -p ~/.apt-cache/partial && sudo rm -rf /var/cache/apt/archives && sudo ln -s ~/.apt-cache /var/cache/apt/archives - - sudo apt-get -y update && sudo apt-get install -y wget bzip2 - # Create work folder and force group permissions - - mkdir -p $WORKDIR && sudo setfacl -d -m group:ubuntu:rwx $WORKDIR && sudo setfacl -m group:ubuntu:rwx $WORKDIR - - mkdir -p $HOME/docker $HOME/examples $WORKDIR/tests $WORKDIR/logs $WORKDIR/crashfiles ${CIRCLE_TEST_REPORTS}/tests/ - - if [[ ! -e "$HOME/bin/codecov" ]]; then mkdir -p $HOME/bin; curl -so $HOME/bin/codecov https://codecov.io/bash && chmod 755 $HOME/bin/codecov; fi - - (cd $HOME/docker && gzip -d cache.tar.gz && docker load --input $HOME/docker/cache.tar) || true : - timeout: 6000 - override: - # Get data - - if [[ ! -d ~/examples/nipype-tutorial ]]; then wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q -O nipype-tutorial.tar.bz2 "${DATA_NIPYPE_TUTORIAL_URL}" && tar xjf nipype-tutorial.tar.bz2 -C ~/examples/; fi - - if [[ ! -d ~/examples/nipype-fsl_course_data ]]; then wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q -O nipype-fsl_course_data.tar.gz "${DATA_NIPYPE_FSL_COURSE}" && tar xzf nipype-fsl_course_data.tar.gz -C ~/examples/; fi - - if [[ ! -d ~/examples/feeds ]]; then wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q -O fsl-5.0.9-feeds.tar.gz "${DATA_NIPYPE_FSL_FEEDS}" && tar xzf fsl-5.0.9-feeds.tar.gz -C ~/examples/; fi - - if [ "$CIRCLE_TAG" != "" ]; then sed -i -E "s/(__version__ = )'[A-Za-z0-9.-]+'/\1'$CIRCLE_TAG'/" nipype/info.py; fi - # Docker - - docker images - - ? | - e=1 && for i in {1..5}; do - docker build --rm=false -f docker/base.Dockerfile -t nipype/base:latest . && e=0 && break || sleep 15; - done && [ "$e" -eq "0" ] - : - timeout: 21600 - - ? | - e=1 && for i in {1..5}; do - docker build --rm=false -t nipype/nipype:latest -t nipype/nipype:py36 --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` --build-arg VCS_REF=`git rev-parse --short HEAD` --build-arg VERSION=$CIRCLE_TAG . && e=0 && break || sleep 15; - done && [ "$e" -eq "0" ] - : - timeout: 6000 - - ? | - e=1 && for i in {1..5}; do - docker build --rm=false -t nipype/nipype:py27 --build-arg PYTHON_VERSION_MAJOR=2 --build-arg PYTHON_VERSION_MINOR=7 --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` --build-arg VCS_REF=`git rev-parse --short HEAD` --build-arg VERSION=$CIRCLE_TAG-py27 . && e=0 && break || sleep 15; - done && [ "$e" -eq "0" ] - : - timeout: 6000 - - docker save -o $HOME/docker/cache.tar ubuntu:xenial-20161213 nipype/base:latest nipype/nipype:py36 && (cd $HOME/docker && gzip cache.tar) : - timeout: 6000 - -test: - override: - - bash .circle/tests.sh : - timeout: 7200 - parallel: true - -general: - artifacts: - - "~/work/docs" - - "~/work/logs" - - "~/work/tests" - - "~/work/crashfiles" - -deployment: - production: - tag: /.*/ - commands: - # Deploy to docker hub - - if [[ -n "$DOCKER_PASS" ]]; then docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && docker push nipype/nipype:latest; fi : - timeout: 21600 - - if [[ -n "$DOCKER_PASS" ]]; then docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && docker tag nipype/nipype nipype/nipype:$CIRCLE_TAG && docker push nipype/nipype:$CIRCLE_TAG; fi : - timeout: 21600 - -# Automatic deployment to Pypi: -# - printf "[distutils]\nindex-servers =\n pypi\n\n[pypi]\nusername:$PYPI_USER\npassword:$PYPI_PASS\n" > ~/.pypirc -# - python setup.py sdist upload -r pypi diff --git a/codecov.yml b/codecov.yml index 2724855129..8a1ce2c18a 100644 --- a/codecov.yml +++ b/codecov.yml @@ -25,4 +25,4 @@ coverage: ignore: # files and folders that will be removed during processing - "nipype/external/*" - "tools/*" - - "doc/*" \ No newline at end of file + - "doc/*" diff --git a/doc/_static/nipype.css b/doc/_static/nipype.css index 7d0de1db74..cec080b3d6 100644 --- a/doc/_static/nipype.css +++ b/doc/_static/nipype.css @@ -57,4 +57,4 @@ div.doc2.container ul{ div.doc2 .reference.internal{ font-size: 14px; -} \ No newline at end of file +} diff --git a/doc/conf.py b/doc/conf.py index 65ee4c1e6a..094a8250aa 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # The short X.Y version. version = nipype.__version__ # The full version, including alpha/beta/rc tags. -release = "0.13.1" +release = "0.14.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/devel/gitwash/git_links.inc b/doc/devel/gitwash/git_links.inc index 14a76f5056..a679f2d78a 100644 --- a/doc/devel/gitwash/git_links.inc +++ b/doc/devel/gitwash/git_links.inc @@ -2,7 +2,7 @@ and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible - candidates for this list. + candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for @@ -42,7 +42,7 @@ .. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html .. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html .. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html -.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git +.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git .. _git management: http://kerneltrap.org/Linux/Git_Management .. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html diff --git a/doc/devel/gitwash/known_projects.inc b/doc/devel/gitwash/known_projects.inc index 2972352877..ce939b110e 100644 --- a/doc/devel/gitwash/known_projects.inc +++ b/doc/devel/gitwash/known_projects.inc @@ -6,7 +6,7 @@ .. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel .. numpy -.. _numpy: hhttp://numpy.scipy.org +.. _numpy: http://numpy.scipy.org .. _`numpy github`: http://github.com/numpy/numpy .. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion diff --git a/doc/devel/interface_specs.rst b/doc/devel/interface_specs.rst index 2f7d63496e..37f3533384 100644 --- a/doc/devel/interface_specs.rst +++ b/doc/devel/interface_specs.rst @@ -159,6 +159,70 @@ generated depending on inputs, by the tool. OutputSpecs inherit from ``interfaces.base.TraitedSpec`` directly. +Controlling outputs to terminal +------------------------------- + +It is very likely that the software wrapped within the interface writes +to the standard output or the standard error of the terminal. +Interfaces provide a means to access and retrieve these outputs, by +using the ``terminal_output`` attribute: :: + + import nipype.interfaces.fsl as fsl + mybet = fsl.BET(from_file='bet-settings.json') + mybet.terminal_output = 'file_split' + +In the example, the ``terminal_output = 'file_split'`` will redirect the +standard output and the standard error to split files (called +``stdout.nipype`` and ``stderr.nipype`` respectively). +The possible values for ``terminal_output`` are: + +*file* + Redirects both standard output and standard error to the same file + called ``output.nipype``. + Messages from both streams will be overlapped as they arrive to + the file. + +*file_split* + Redirects the output streams separately, to ``stdout.nipype`` + and ``stderr.nipype`` respectively, as described in the example. + +*file_stdout* + Only the standard output will be redirected to ``stdout.nipype`` + and the standard error will be discarded. + +*file_stderr* + Only the standard error will be redirected to ``stderr.nipype`` + and the standard output will be discarded. + +*stream* + Both output streams are redirected to the current logger printing + their messages interleaved and immediately to the terminal. + +*allatonce* + Both output streams will be forwarded to a buffer and stored + separately in the `runtime` object that the `run()` method returns. + No files are written nor streams printed out to terminal. + +*none* + Both outputs are discarded + +In all cases, except for the ``'none'`` setting of ``terminal_output``, +the ``run()`` method will return a "runtime" object that will contain +the streams in the corresponding properties (``runtime.stdout`` +for the standard output, ``runtime.stderr`` for the standard error, and +``runtime.merged`` for both when streams are mixed, eg. when using the +*file* option). :: + + import nipype.interfaces.fsl as fsl + mybet = fsl.BET(from_file='bet-settings.json') + mybet.terminal_output = 'file_split' + ... + result = mybet.run() + result.runtime.stdout + ' ... captured standard output ...' + + + Traited Attributes ------------------ diff --git a/doc/images/nipype_architecture_overview2.png b/doc/images/nipype_architecture_overview2.png index b89ace5c57..51bad491bd 100644 Binary files a/doc/images/nipype_architecture_overview2.png and b/doc/images/nipype_architecture_overview2.png differ diff --git a/doc/images/nipype_architecture_overview2.svg b/doc/images/nipype_architecture_overview2.svg index 4f0833371f..c9265bb293 100644 --- a/doc/images/nipype_architecture_overview2.svg +++ b/doc/images/nipype_architecture_overview2.svg @@ -1362,7 +1362,7 @@ id="tspan4980" x="152.89586" y="-28.912685" - style="font-size:20.79999924px">Idiosynchratic, Heterogeneous APIs + style="font-size:20.79999924px">Idiosyncratic, Heterogeneous APIs \ No newline at end of file + diff --git a/doc/users/aws.rst b/doc/users/aws.rst index 832072ba62..7ca7f1f3db 100644 --- a/doc/users/aws.rst +++ b/doc/users/aws.rst @@ -99,4 +99,4 @@ s3://mybucket/path/to/output/dir/sub001/motion/realigned_file1.nii.gz Using S3DataGrabber ====================== -Coming soon... \ No newline at end of file +Coming soon... diff --git a/doc/users/caching_tutorial.rst b/doc/users/caching_tutorial.rst index 8cd51917a2..4d648277bd 100644 --- a/doc/users/caching_tutorial.rst +++ b/doc/users/caching_tutorial.rst @@ -1,4 +1,3 @@ - .. _caching: =========================== diff --git a/doc/users/config_file.rst b/doc/users/config_file.rst index 7d55cc522d..279dc1aadd 100644 --- a/doc/users/config_file.rst +++ b/doc/users/config_file.rst @@ -14,48 +14,52 @@ Logging ~~~~~~~ *workflow_level* - How detailed the logs regarding workflow should be (possible values: - ``INFO`` and ``DEBUG``; default value: ``INFO``) -*filemanip_level* - How detailed the logs regarding file operations (for example overwriting - warning) should be (possible values: ``INFO`` and ``DEBUG``; default value: - ``INFO``) + How detailed the logs regarding workflow should be (possible values: + ``INFO`` and ``DEBUG``; default value: ``INFO``) +*utils_level* + How detailed the logs regarding nipype utils, like file operations + (for example overwriting warning) or the resource profiler, should be + (possible values: ``INFO`` and ``DEBUG``; default value: + ``INFO``) *interface_level* - How detailed the logs regarding interface execution should be (possible - values: ``INFO`` and ``DEBUG``; default value: ``INFO``) + How detailed the logs regarding interface execution should be (possible + values: ``INFO`` and ``DEBUG``; default value: ``INFO``) +*filemanip_level* (deprecated as of 1.0) + How detailed the logs regarding file operations (for example overwriting + warning) should be (possible values: ``INFO`` and ``DEBUG``) *log_to_file* Indicates whether logging should also send the output to a file (possible values: ``true`` and ``false``; default value: ``false``) *log_directory* - Where to store logs. (string, default value: home directory) + Where to store logs. (string, default value: home directory) *log_size* - Size of a single log file. (integer, default value: 254000) + Size of a single log file. (integer, default value: 254000) *log_rotate* - How many rotation should the log file make. (integer, default value: 4) + How many rotation should the log file make. (integer, default value: 4) Execution ~~~~~~~~~ *plugin* - This defines which execution plugin to use. (possible values: ``Linear``, - ``MultiProc``, ``SGE``, ``IPython``; default value: ``Linear``) + This defines which execution plugin to use. (possible values: ``Linear``, + ``MultiProc``, ``SGE``, ``IPython``; default value: ``Linear``) *stop_on_first_crash* - Should the workflow stop upon first node crashing or try to execute as many - nodes as possible? (possible values: ``true`` and ``false``; default value: - ``false``) + Should the workflow stop upon first node crashing or try to execute as many + nodes as possible? (possible values: ``true`` and ``false``; default value: + ``false``) *stop_on_first_rerun* - Should the workflow stop upon first node trying to recompute (by that we - mean rerunning a node that has been run before - this can happen due changed - inputs and/or hash_method since the last run). (possible values: ``true`` - and ``false``; default value: ``false``) + Should the workflow stop upon first node trying to recompute (by that we + mean rerunning a node that has been run before - this can happen due changed + inputs and/or hash_method since the last run). (possible values: ``true`` + and ``false``; default value: ``false``) *hash_method* - Should the input files be checked for changes using their content (slow, but - 100% accurate) or just their size and modification date (fast, but - potentially prone to errors)? (possible values: ``content`` and - ``timestamp``; default value: ``timestamp``) + Should the input files be checked for changes using their content (slow, but + 100% accurate) or just their size and modification date (fast, but + potentially prone to errors)? (possible values: ``content`` and + ``timestamp``; default value: ``timestamp``) *keep_inputs* Ensures that all inputs that are created in the nodes working directory are @@ -63,44 +67,47 @@ Execution value: ``false``) *single_thread_matlab* - Should all of the Matlab interfaces (including SPM) use only one thread? - This is useful if you are parallelizing your workflow using MultiProc or - IPython on a single multicore machine. (possible values: ``true`` and - ``false``; default value: ``true``) + Should all of the Matlab interfaces (including SPM) use only one thread? + This is useful if you are parallelizing your workflow using MultiProc or + IPython on a single multicore machine. (possible values: ``true`` and + ``false``; default value: ``true``) *display_variable* - What ``DISPLAY`` variable should all command line interfaces be - run with. This is useful if you are using `xnest - `_ - or `Xvfb `_ - and you would like to redirect all spawned windows to - it. (possible values: any X server address; default value: not - set) + Override the ``$DISPLAY`` environment variable for interfaces that require + an X server. This option is useful if there is a running X server, but + ``$DISPLAY`` was not defined in nipype's environment. For example, if an X + server is listening on the default port of 6000, set ``display_variable = :0`` + to enable nipype interfaces to use it. It may also point to displays provided + by VNC, `xnest `_ + or `Xvfb `_. + If neither ``display_variable`` nor the ``$DISPLAY`` environment variable are + set, nipype will try to configure a new virtual server using Xvfb. + (possible values: any X server address; default value: not set) *remove_unnecessary_outputs* - This will remove any interface outputs not needed by the workflow. If the - required outputs from a node changes, rerunning the workflow will rerun the - node. Outputs of leaf nodes (nodes whose outputs are not connected to any - other nodes) will never be deleted independent of this parameter. (possible - values: ``true`` and ``false``; default value: ``true``) + This will remove any interface outputs not needed by the workflow. If the + required outputs from a node changes, rerunning the workflow will rerun the + node. Outputs of leaf nodes (nodes whose outputs are not connected to any + other nodes) will never be deleted independent of this parameter. (possible + values: ``true`` and ``false``; default value: ``true``) *try_hard_link_datasink* - When the DataSink is used to produce an orginized output file outside - of nipypes internal cache structure, a file system hard link will be - attempted first. A hard link allow multiple file paths to point to the - same physical storage location on disk if the conditions allow. By - refering to the same physical file on disk (instead of copying files - byte-by-byte) we can avoid unnecessary data duplication. If hard links - are not supported for the source or destination paths specified, then - a standard byte-by-byte copy is used. (possible values: ``true`` and - ``false``; default value: ``true``) + When the DataSink is used to produce an orginized output file outside + of nipypes internal cache structure, a file system hard link will be + attempted first. A hard link allow multiple file paths to point to the + same physical storage location on disk if the conditions allow. By + refering to the same physical file on disk (instead of copying files + byte-by-byte) we can avoid unnecessary data duplication. If hard links + are not supported for the source or destination paths specified, then + a standard byte-by-byte copy is used. (possible values: ``true`` and + ``false``; default value: ``true``) *use_relative_paths* - Should the paths stored in results (and used to look for inputs) - be relative or absolute. Relative paths allow moving the whole - working directory around but may cause problems with - symlinks. (possible values: ``true`` and ``false``; default - value: ``false``) + Should the paths stored in results (and used to look for inputs) + be relative or absolute. Relative paths allow moving the whole + working directory around but may cause problems with + symlinks. (possible values: ``true`` and ``false``; default + value: ``false``) *local_hash_check* Perform the hash check on the job submission machine. This option minimizes @@ -115,10 +122,10 @@ Execution done after a job finish is detected. (float in seconds; default value: 5) *remove_node_directories (EXPERIMENTAL)* - Removes directories whose outputs have already been used - up. Doesn't work with IdentiInterface or any node that patches - data through (without copying) (possible values: ``true`` and - ``false``; default value: ``false``) + Removes directories whose outputs have already been used + up. Doesn't work with IdentiInterface or any node that patches + data through (without copying) (possible values: ``true`` and + ``false``; default value: ``false``) *stop_on_unknown_version* If this is set to True, an underlying interface will raise an error, when no @@ -146,18 +153,46 @@ Execution crashfiles allow portability across machines and shorter load time. (possible values: ``pklz`` and ``txt``; default value: ``pklz``) + +Resource Monitor +~~~~~~~~~~~~~~~~ + +*enabled* + Enables monitoring the resources occupation (possible values: ``true`` and + ``false``; default value: ``false``). All the following options will be + dismissed if the resource monitor is not enabled. + +*sample_frequency* + Sampling period (in seconds) between measurements of resources (memory, cpus) + being used by an interface (default value: ``1``) + +*summary_file* + Indicates where the summary file collecting all profiling information from the + resource monitor should be stored after execution of a workflow. + The ``summary_file`` does not apply to interfaces run independently. + (unset by default, in which case the summary file will be written out to + ``/resource_monitor.json`` of the top-level workflow). + +*summary_append* + Append to an existing summary file (only applies to workflows). + (default value: ``true``, possible values: ``true`` or ``false``). + Example ~~~~~~~ :: - [logging] - workflow_level = DEBUG + [logging] + workflow_level = DEBUG + + [execution] + stop_on_first_crash = true + hash_method = timestamp + display_variable = :1 + + [monitoring] + enabled = false - [execution] - stop_on_first_crash = true - hash_method = timestamp - display_variable = :1 Workflow.config property has a form of a nested dictionary reflecting the structure of the .cfg file. diff --git a/doc/users/install.rst b/doc/users/install.rst index 2e38122c68..e5ca16f3be 100644 --- a/doc/users/install.rst +++ b/doc/users/install.rst @@ -47,7 +47,7 @@ use the following command:: While `all` installs everything, one can also install select components as listed below:: - 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus'], + 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], 'tests': ['pytest-cov', 'codecov'], 'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'], 'profiler': ['psutil'], @@ -117,4 +117,4 @@ Developers should start `here <../devel/testing_nipype.html>`_. Developers can also use this docker container: `docker pull nipype/nipype:master` -.. include:: ../links_names.txt \ No newline at end of file +.. include:: ../links_names.txt diff --git a/doc/users/interface_tutorial.rst b/doc/users/interface_tutorial.rst index 25e6d54120..ced4be7f60 100644 --- a/doc/users/interface_tutorial.rst +++ b/doc/users/interface_tutorial.rst @@ -10,7 +10,7 @@ Specifying input settings The nipype interface modules provide a Python interface to external packages like FSL_ and SPM_. Within the module are a series of Python classes which wrap specific package functionality. For example, in -the fsl module, the class :class:`nipype.interfaces.fsl.Bet` wraps the +the fsl module, the class :class:`nipype.interfaces.fsl.BET` wraps the ``bet`` command-line tool. Using the command-line tool, one would specify input settings using flags like ``-o``, ``-m``, ``-f ``, etc... However, in nipype, options are assigned to Python attributes and can diff --git a/doc/users/plugins.rst b/doc/users/plugins.rst index 6c825aa8f8..e655e5f6db 100644 --- a/doc/users/plugins.rst +++ b/doc/users/plugins.rst @@ -74,6 +74,18 @@ Optional arguments:: n_procs : Number of processes to launch in parallel, if not set number of processors/threads will be automatically detected + memory_gb : Total memory available to be shared by all simultaneous tasks + currently running, if not set it will be automatically set to 90\% of + system RAM. + + raise_insufficient : Raise exception when the estimated resources of a node + exceed the total amount of resources available (memory and threads), when + ``False`` (default), only a warning will be issued. + + maxtasksperchild : number of nodes to run on each process before refreshing + the worker (default: 10). + + To distribute processing on a multicore machine, simply call:: workflow.run(plugin='MultiProc') diff --git a/doc/users/resource_sched_profiler.rst b/doc/users/resource_sched_profiler.rst index 37404b27da..7fa0819c19 100644 --- a/doc/users/resource_sched_profiler.rst +++ b/doc/users/resource_sched_profiler.rst @@ -82,7 +82,7 @@ by setting the ``status_callback`` parameter to point to this function in the :: - from nipype.pipeline.plugins.callback_log import log_nodes_cb + from nipype.utils.profiler import log_nodes_cb args_dict = {'n_procs' : 8, 'memory_gb' : 10, 'status_callback' : log_nodes_cb} To set the filepath for the callback log the ``'callback'`` logger must be @@ -141,7 +141,7 @@ The pandas_ Python package is required to use this feature. :: - from nipype.pipeline.plugins.callback_log import log_nodes_cb + from nipype.utils.profiler import log_nodes_cb args_dict = {'n_procs' : 8, 'memory_gb' : 10, 'status_callback' : log_nodes_cb} workflow.run(plugin='MultiProc', plugin_args=args_dict) diff --git a/doc/users/saving_workflows.rst b/doc/users/saving_workflows.rst index c97751eead..d6631a8bb8 100644 --- a/doc/users/saving_workflows.rst +++ b/doc/users/saving_workflows.rst @@ -55,7 +55,7 @@ This will create a file "outputtestsave.py" with the following content: from nipype.pipeline.engine import Workflow, Node, MapNode from nipype.interfaces.utility import IdentityInterface from nipype.interfaces.utility import Function - from nipype.utils.misc import getsource + from nipype.utils.functions import getsource from nipype.interfaces.fsl.preprocess import BET from nipype.interfaces.fsl.utils import ImageMaths # Functions @@ -82,20 +82,20 @@ This will create a file "outputtestsave.py" with the following content: bet2.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} bet2.inputs.ignore_exception = False bet2.inputs.output_type = 'NIFTI_GZ' - bet2.inputs.terminal_output = 'stream' + bet2.terminal_output = 'stream' # Node: testsave.bet bet = Node(BET(), name="bet") bet.iterables = ('frac', [0.3, 0.4]) bet.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} bet.inputs.ignore_exception = False bet.inputs.output_type = 'NIFTI_GZ' - bet.inputs.terminal_output = 'stream' + bet.terminal_output = 'stream' # Node: testsave.maths maths = Node(ImageMaths(), name="maths") maths.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'} maths.inputs.ignore_exception = False maths.inputs.output_type = 'NIFTI_GZ' - maths.inputs.terminal_output = 'stream' + maths.terminal_output = 'stream' testsave.connect(bet2, ('mask_file', func), maths, "in_file2") testsave.connect(bet, "mask_file", maths, "in_file") testsave.connect(testfunc, "output", maths, "op_string") diff --git a/doc/users/sphinx_ext.rst b/doc/users/sphinx_ext.rst index 6326a6041a..9e6732a2ef 100644 --- a/doc/users/sphinx_ext.rst +++ b/doc/users/sphinx_ext.rst @@ -1,4 +1,3 @@ - .. _sphinx_ext: Sphinx extensions @@ -11,4 +10,4 @@ and simplify the generation process. .. automodule:: nipype.sphinxext.plot_workflow :undoc-members: - :noindex: \ No newline at end of file + :noindex: diff --git a/docker/base.Dockerfile b/docker/base.Dockerfile deleted file mode 100644 index 1c4b1c490f..0000000000 --- a/docker/base.Dockerfile +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2016, The developers of the Stanford CRN -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of crn_base nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -# -# Based on https://github.com/poldracklab/fmriprep/blob/9c92a3de9112f8ef1655b876de060a2ad336ffb0/Dockerfile -# -FROM ubuntu:xenial-20161213 -MAINTAINER The nipype developers https://github.com/nipy/nipype - -# Set noninteractive -ENV DEBIAN_FRONTEND=noninteractive - -# Installing requirements for freesurfer installation -RUN apt-get update && \ - apt-get install -y --no-install-recommends curl ca-certificates && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -WORKDIR /opt -# Installing freesurfer -- do it first so that it is cached early -#----------------------------------------------------------------------------- -# 3. Install FreeSurfer v6.0 (minimized with reprozip): -# https://github.com/freesurfer/freesurfer/issues/70 -#----------------------------------------------------------------------------- -RUN curl -sSL https://dl.dropbox.com/s/pbaisn6m5qpi9uu/recon-all-freesurfer6-2.min.tgz?dl=0 | tar zx -C /opt -ENV FS_OVERRIDE=0 \ - OS=Linux \ - FSF_OUTPUT_FORMAT=nii.gz \ - FIX_VERTEX_AREA=\ - FREESURFER_HOME=/opt/freesurfer -ENV MNI_DIR=$FREESURFER_HOME/mni \ - SUBJECTS_DIR=$FREESURFER_HOME/subjects -ENV PERL5LIB=$MNI_DIR/share/perl5 \ - MNI_PERL5LIB=$MNI_DIR/share/perl5 \ - MINC_BIN_DIR=$MNI_DIR/bin \ - MINC_LIB_DIR=$MNI_DIR/lib \ - MNI_DATAPATH=$MNI_DIR/data -ENV PATH=$FREESURFER_HOME/bin:$FREESURFER_HOME/tktools:$MINC_BIN_DIR:$PATH -ENV FSL_DIR=/usr/share/fsl/5.0 -RUN echo "cHJpbnRmICJrcnp5c3p0b2YuZ29yZ29sZXdza2lAZ21haWwuY29tXG41MTcyXG4gKkN2dW12RVYzelRmZ1xuRlM1Si8yYzFhZ2c0RVxuIiA+IC9vcHQvZnJlZXN1cmZlci9saWNlbnNlLnR4dAo=" | base64 -d | sh - -# Enable neurodebian -COPY docker/files/neurodebian.gpg /etc/apt/neurodebian.gpg -RUN curl -sSL http://neuro.debian.net/lists/xenial.us-ca.full >> /etc/apt/sources.list.d/neurodebian.sources.list && \ - apt-key add /etc/apt/neurodebian.gpg && \ - apt-key adv --refresh-keys --keyserver hkp://ha.pool.sks-keyservers.net 0xA5D32F012649A5A9 || true - -# Installing general Debian utilities and Neurodebian packages (FSL, AFNI, git) -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - fsl-core=5.0.9-1~nd+1+nd16.04+1 \ - fsl-mni152-templates=5.0.7-2 \ - afni=16.2.07~dfsg.1-2~nd16.04+1 \ - bzip2 \ - ca-certificates \ - xvfb \ - git=1:2.7.4-0ubuntu1 \ - graphviz=2.38.0-12ubuntu2 \ - unzip \ - apt-utils \ - fusefat \ - make \ - file \ - # Added g++ to compile dipy in py3.6 - g++=4:5.3.1-1ubuntu1 \ - ruby=1:2.3.0+1 && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -ENV FSLDIR=/usr/share/fsl/5.0 \ - FSLOUTPUTTYPE=NIFTI_GZ \ - FSLMULTIFILEQUIT=TRUE \ - POSSUMDIR=/usr/share/fsl/5.0 \ - LD_LIBRARY_PATH=/usr/lib/fsl/5.0:$LD_LIBRARY_PATH \ - FSLTCLSH=/usr/bin/tclsh \ - FSLWISH=/usr/bin/wish \ - AFNI_MODELPATH=/usr/lib/afni/models \ - AFNI_IMSAVE_WARNINGS=NO \ - AFNI_TTATLAS_DATASET=/usr/share/afni/atlases \ - AFNI_PLUGINPATH=/usr/lib/afni/plugins \ - PATH=/usr/lib/fsl/5.0:/usr/lib/afni/bin:$PATH - -# Installing and setting up ANTs -RUN mkdir -p /opt/ants && \ - curl -sSL "https://dl.dropbox.com/s/2f4sui1z6lcgyek/ANTs-Linux-centos5_x86_64-v2.2.0-0740f91.tar.gz?dl=0" \ - | tar -zx -C /opt - -ENV ANTSPATH=/opt/ants \ - PATH=$ANTSPATH:$PATH - -# Installing and setting up c3d -RUN mkdir -p /opt/c3d && \ - curl -sSL "http://downloads.sourceforge.net/project/c3d/c3d/1.0.0/c3d-1.0.0-Linux-x86_64.tar.gz" \ - | tar -xzC /opt/c3d --strip-components 1 - -ENV C3DPATH=/opt/c3d/ \ - PATH=$C3DPATH/bin:$PATH - -# Install fake-S3 -ENV GEM_HOME /usr/lib/ruby/gems/2.3 -ENV BUNDLE_PATH="$GEM_HOME" \ - BUNDLE_BIN="$GEM_HOME/bin" \ - BUNDLE_SILENCE_ROOT_WARNING=1 \ - BUNDLE_APP_CONFIG="$GEM_HOME" -ENV PATH $BUNDLE_BIN:$PATH -RUN mkdir -p "$GEM_HOME" "$BUNDLE_BIN" && \ - chmod 777 "$GEM_HOME" "$BUNDLE_BIN" - -RUN gem install fakes3 - -# Install Matlab MCR: from the good old install_spm_mcr.sh of @chrisfilo -RUN echo "destinationFolder=/opt/mcr" > mcr_options.txt && \ - echo "agreeToLicense=yes" >> mcr_options.txt && \ - echo "outputFile=/tmp/matlabinstall_log" >> mcr_options.txt && \ - echo "mode=silent" >> mcr_options.txt && \ - mkdir -p matlab_installer && \ - curl -sSL http://www.mathworks.com/supportfiles/downloads/R2015a/deployment_files/R2015a/installers/glnxa64/MCR_R2015a_glnxa64_installer.zip \ - -o matlab_installer/installer.zip && \ - unzip matlab_installer/installer.zip -d matlab_installer/ && \ - matlab_installer/install -inputFile mcr_options.txt && \ - rm -rf matlab_installer mcr_options.txt - -# Install SPM -RUN curl -sSL http://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/dev/spm12_r6472_Linux_R2015a.zip -o spm12.zip && \ - unzip spm12.zip && \ - rm -rf spm12.zip - -ENV MATLABCMD="/opt/mcr/v85/toolbox/matlab" \ - SPMMCRCMD="/opt/spm12/run_spm12.sh /opt/mcr/v85/ script" \ - FORCE_SPMMCR=1 - -WORKDIR /work - diff --git a/docker/files/run_examples.sh b/docker/files/run_examples.sh index a23c27e76b..7959bdb597 100644 --- a/docker/files/run_examples.sh +++ b/docker/files/run_examples.sh @@ -12,10 +12,19 @@ mkdir -p ${HOME}/.nipype ${WORKDIR}/logs/example_${example_id} ${WORKDIR}/tests echo "[logging]" > ${HOME}/.nipype/nipype.cfg echo "workflow_level = DEBUG" >> ${HOME}/.nipype/nipype.cfg echo "interface_level = DEBUG" >> ${HOME}/.nipype/nipype.cfg -echo "filemanip_level = DEBUG" >> ${HOME}/.nipype/nipype.cfg +echo "utils_level = DEBUG" >> ${HOME}/.nipype/nipype.cfg echo "log_to_file = true" >> ${HOME}/.nipype/nipype.cfg echo "log_directory = ${WORKDIR}/logs/example_${example_id}" >> ${HOME}/.nipype/nipype.cfg +echo '[execution]' >> ${HOME}/.nipype/nipype.cfg +echo 'crashfile_format = txt' >> ${HOME}/.nipype/nipype.cfg + +if [[ "${NIPYPE_RESOURCE_MONITOR:-0}" == "1" ]]; then + echo '[monitoring]' >> ${HOME}/.nipype/nipype.cfg + echo 'enabled = true' >> ${HOME}/.nipype/nipype.cfg + echo 'sample_frequency = 3' >> ${HOME}/.nipype/nipype.cfg +fi + # Set up coverage export COVERAGE_FILE=${WORKDIR}/tests/.coverage.${example_id} if [ "$2" == "MultiProc" ]; then @@ -25,8 +34,10 @@ fi coverage run /src/nipype/tools/run_examples.py $@ exit_code=$? +if [[ "${NIPYPE_RESOURCE_MONITOR:-0}" == "1" ]]; then + cp resource_monitor.json 2>/dev/null ${WORKDIR}/logs/example_${example_id}/ || : +fi # Collect crashfiles and generate xml report coverage xml -o ${WORKDIR}/tests/smoketest_${example_id}.xml -find /work -name "crash-*" -maxdepth 1 -exec mv {} ${WORKDIR}/crashfiles/ \; +find /work -maxdepth 1 -name "crash-*" -exec mv {} ${WORKDIR}/crashfiles/ \; exit $exit_code - diff --git a/docker/files/run_pytests.sh b/docker/files/run_pytests.sh index f76734ad45..76935b42f8 100644 --- a/docker/files/run_pytests.sh +++ b/docker/files/run_pytests.sh @@ -4,7 +4,7 @@ set -x set -u -TESTPATH=${1:-/src/nipype/} +TESTPATH=${1:-/src/nipype/nipype} WORKDIR=${WORK:-/work} PYTHON_VERSION=$( python -c "import sys; print('{}{}'.format(sys.version_info[0], sys.version_info[1]))" ) @@ -17,10 +17,11 @@ echo '[logging]' > ${HOME}/.nipype/nipype.cfg echo 'log_to_file = true' >> ${HOME}/.nipype/nipype.cfg echo "log_directory = ${WORKDIR}/logs/py${PYTHON_VERSION}" >> ${HOME}/.nipype/nipype.cfg -# Enable profile_runtime tests only for python 2.7 -if [[ "${PYTHON_VERSION}" -lt "30" ]]; then - echo '[execution]' >> ${HOME}/.nipype/nipype.cfg - echo 'profile_runtime = true' >> ${HOME}/.nipype/nipype.cfg +echo '[execution]' >> ${HOME}/.nipype/nipype.cfg +echo 'crashfile_format = txt' >> ${HOME}/.nipype/nipype.cfg + +if [[ "${NIPYPE_RESOURCE_MONITOR:-0}" == "1" ]]; then + echo 'resource_monitor = true' >> ${HOME}/.nipype/nipype.cfg fi # Run tests using pytest @@ -28,18 +29,8 @@ export COVERAGE_FILE=${WORKDIR}/tests/.coverage.py${PYTHON_VERSION} py.test -v --junitxml=${WORKDIR}/tests/pytests_py${PYTHON_VERSION}.xml --cov nipype --cov-config /src/nipype/.coveragerc --cov-report xml:${WORKDIR}/tests/coverage_py${PYTHON_VERSION}.xml ${TESTPATH} exit_code=$? -# Workaround: run here the profiler tests in python 3 -if [[ "${PYTHON_VERSION}" -ge "30" ]]; then - echo '[execution]' >> ${HOME}/.nipype/nipype.cfg - echo 'profile_runtime = true' >> ${HOME}/.nipype/nipype.cfg - export COVERAGE_FILE=${WORKDIR}/tests/.coverage.py${PYTHON_VERSION}_extra - py.test -v --junitxml=${WORKDIR}/tests/pytests_py${PYTHON_VERSION}_extra.xml --cov nipype --cov-report xml:${WORKDIR}/tests/coverage_py${PYTHON_VERSION}_extra.xml /src/nipype/nipype/interfaces/tests/test_runtime_profiler.py /src/nipype/nipype/pipeline/plugins/tests/test_multiproc*.py - exit_code=$(( $exit_code + $? )) -fi - # Collect crashfiles -find ${WORKDIR} -name "crash-*" -maxdepth 1 -exec mv {} ${WORKDIR}/crashfiles/ \; +find ${WORKDIR} -maxdepth 1 -name "crash-*" -exec mv {} ${WORKDIR}/crashfiles/ \; echo "Unit tests finished with exit code ${exit_code}" exit ${exit_code} - diff --git a/docker/generate_dockerfiles.sh b/docker/generate_dockerfiles.sh new file mode 100755 index 0000000000..5baa9f52e6 --- /dev/null +++ b/docker/generate_dockerfiles.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +# +# Generate base and main Dockerfiles for Nipype. + +set -e + +USAGE="usage: $(basename $0) [-h] [-b] [-m]" + +function Help { + cat <&2 + exit 1 + ;; + esac +done + + +# neurodocker version 0.3.1-22-gb0ee069 +NEURODOCKER_IMAGE="kaczmarj/neurodocker@sha256:c670ec2e0666a63d4e017a73780f66554283e294f3b12250928ee74b8a48bc59" + +# neurodebian:stretch-non-free pulled on November 3, 2017 +BASE_IMAGE="neurodebian@sha256:7590552afd0e7a481a33314724ae27f76ccedd05ffd7ac06ec38638872427b9b" + +NIPYPE_BASE_IMAGE="nipype/nipype:base" +PKG_MANAGER="apt" +DIR="$(dirname "$0")" + +function generate_base_dockerfile() { + docker run --rm "$NEURODOCKER_IMAGE" generate \ + --base "$BASE_IMAGE" --pkg-manager "$PKG_MANAGER" \ + --label maintainer="The nipype developers https://github.com/nipy/nipype" \ + --spm version=12 matlab_version=R2017a \ + --afni version=latest install_python2=true \ + --freesurfer version=6.0.0 min=true \ + --run 'echo "cHJpbnRmICJrcnp5c3p0b2YuZ29yZ29sZXdza2lAZ21haWwuY29tXG41MTcyXG4gKkN2dW12RVYzelRmZ1xuRlM1Si8yYzFhZ2c0RVxuIiA+IC9vcHQvZnJlZXN1cmZlci9saWNlbnNlLnR4dAo=" | base64 -d | sh' \ + --install ants apt-utils bzip2 convert3d file fsl-core fsl-mni152-templates \ + fusefat g++ git graphviz make ruby unzip xvfb \ + --add-to-entrypoint "source /etc/fsl/fsl.sh" \ + --env ANTSPATH='/usr/lib/ants' PATH='/usr/lib/ants:$PATH' \ + --run "gem install fakes3" \ + --no-check-urls > "$DIR/Dockerfile.base" +} + + +function generate_main_dockerfile() { + docker run --rm "$NEURODOCKER_IMAGE" generate \ + --base "$NIPYPE_BASE_IMAGE" --pkg-manager "$PKG_MANAGER" \ + --label maintainer="The nipype developers https://github.com/nipy/nipype" \ + --env MKL_NUM_THREADS=1 OMP_NUM_THREADS=1 \ + --user neuro \ + --miniconda env_name=neuro \ + activate=true \ + --copy docker/files/run_builddocs.sh docker/files/run_examples.sh \ + docker/files/run_pytests.sh nipype/external/fsl_imglob.py /usr/bin/ \ + --copy . /src/nipype \ + --user root \ + --run 'chown -R neuro /src +&& chmod +x /usr/bin/fsl_imglob.py /usr/bin/run_*.sh +&& . /etc/fsl/fsl.sh +&& ln -sf /usr/bin/fsl_imglob.py ${FSLDIR}/bin/imglob +&& mkdir /work +&& chown neuro /work' \ + --user neuro \ + --arg PYTHON_VERSION_MAJOR=3 PYTHON_VERSION_MINOR=6 BUILD_DATE VCS_REF VERSION \ + --miniconda env_name=neuro \ + conda_install='python=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} + icu=58.1 libxml2 libxslt matplotlib mkl numpy + pandas psutil scikit-learn scipy traits=4.6.0' \ + pip_opts="-e" \ + pip_install="/src/nipype[all]" \ + --run-bash "mkdir -p /src/pybids + && curl -sSL --retry 5 https://github.com/INCF/pybids/tarball/master + | tar -xz -C /src/pybids --strip-components 1 + && source activate neuro + && pip install --no-cache-dir -e /src/pybids" \ + --workdir /work \ + --label org.label-schema.build-date='$BUILD_DATE' \ + org.label-schema.name="NIPYPE" \ + org.label-schema.description="NIPYPE - Neuroimaging in Python: Pipelines and Interfaces" \ + org.label-schema.url="http://nipype.readthedocs.io" \ + org.label-schema.vcs-ref='$VCS_REF' \ + org.label-schema.vcs-url="https://github.com/nipy/nipype" \ + org.label-schema.version='$VERSION' \ + org.label-schema.schema-version="1.0" \ + --no-check-urls +} + + +if [ "$GENERATE_BASE" == 1 ]; then + generate_base_dockerfile > "$DIR/Dockerfile.base" +fi +if [ "$GENERATE_MAIN" == 1 ]; then + generate_main_dockerfile > "$DIR/../Dockerfile" +fi diff --git a/docker/prune_dockerfile.sh b/docker/prune_dockerfile.sh new file mode 100644 index 0000000000..e6b05ebbcf --- /dev/null +++ b/docker/prune_dockerfile.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +if [ -z "$1" ]; then + echo "Usage: $(basename $0) " + exit 1 +fi + +# Remove empty lines, comments, and timestamp. +sed -e '/\s*#.*$/d' -e '/^\s*$/d' -e '/generation_timestamp/d' "$1" diff --git a/examples/dmri_dtk_dti.py b/examples/dmri_dtk_dti.py index e71d519912..4a5e2676cf 100755 --- a/examples/dmri_dtk_dti.py +++ b/examples/dmri_dtk_dti.py @@ -37,7 +37,6 @@ package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') -package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') diff --git a/examples/dmri_dtk_odf.py b/examples/dmri_dtk_odf.py index ff295b1d9f..b4fb978dd0 100755 --- a/examples/dmri_dtk_odf.py +++ b/examples/dmri_dtk_odf.py @@ -37,7 +37,6 @@ package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') -package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') diff --git a/examples/dmri_fsl_dti.py b/examples/dmri_fsl_dti.py index 1eb3c99bdd..05891a8727 100755 --- a/examples/dmri_fsl_dti.py +++ b/examples/dmri_fsl_dti.py @@ -37,7 +37,6 @@ package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') -package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') diff --git a/examples/fmri_ants_openfmri.py b/examples/fmri_ants_openfmri.py index ba5ce3ce0c..ee6ddee3f9 100755 --- a/examples/fmri_ants_openfmri.py +++ b/examples/fmri_ants_openfmri.py @@ -26,7 +26,7 @@ import nipype.pipeline.engine as pe import nipype.algorithms.modelgen as model import nipype.algorithms.rapidart as ra -from nipype.algorithms.misc import TSNR +from nipype.algorithms.misc import TSNR, CalculateMedian from nipype.interfaces.c3 import C3dAffineTool from nipype.interfaces import fsl, Function, ants, freesurfer as fs import nipype.interfaces.io as nio @@ -55,33 +55,6 @@ 'from scipy.special import legendre' ] -def median(in_files): - """Computes an average of the median of each realigned timeseries - - Parameters - ---------- - - in_files: one or more realigned Nifti 4D time series - - Returns - ------- - - out_file: a 3D Nifti file - """ - average = None - for idx, filename in enumerate(filename_to_list(in_files)): - img = nb.load(filename, mmap=NUMPY_MMAP) - data = np.median(img.get_data(), axis=3) - if average is None: - average = data - else: - average = average + data - median_img = nb.Nifti1Image(average / float(idx + 1), img.affine, - img.header) - filename = os.path.join(os.getcwd(), 'median.nii.gz') - median_img.to_filename(filename) - return filename - def create_reg_workflow(name='registration'): """Create a FEAT preprocessing workflow together with freesurfer @@ -245,7 +218,7 @@ def create_reg_workflow(name='registration'): warpmean.inputs.input_image_type = 0 warpmean.inputs.interpolation = 'Linear' warpmean.inputs.invert_transform_flags = [False, False] - warpmean.inputs.terminal_output = 'file' + warpmean.terminal_output = 'file' register.connect(inputnode, 'target_image_brain', warpmean, 'reference_image') register.connect(inputnode, 'mean_image', warpmean, 'input_image') @@ -261,7 +234,7 @@ def create_reg_workflow(name='registration'): warpall.inputs.input_image_type = 0 warpall.inputs.interpolation = 'Linear' warpall.inputs.invert_transform_flags = [False, False] - warpall.inputs.terminal_output = 'file' + warpall.terminal_output = 'file' register.connect(inputnode, 'target_image_brain', warpall, 'reference_image') register.connect(inputnode, 'source_files', warpall, 'input_image') @@ -455,7 +428,7 @@ def create_fs_reg_workflow(name='registration'): warpmean.inputs.input_image_type = 0 warpmean.inputs.interpolation = 'Linear' warpmean.inputs.invert_transform_flags = [False, False] - warpmean.inputs.terminal_output = 'file' + warpmean.terminal_output = 'file' warpmean.inputs.args = '--float' # warpmean.inputs.num_threads = 4 # warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'} @@ -470,7 +443,7 @@ def create_fs_reg_workflow(name='registration'): warpall.inputs.input_image_type = 0 warpall.inputs.interpolation = 'Linear' warpall.inputs.invert_transform_flags = [False, False] - warpall.inputs.terminal_output = 'file' + warpall.terminal_output = 'file' warpall.inputs.args = '--float' warpall.inputs.num_threads = 2 warpall.plugin_args = {'sbatch_args': '--mem=6G -c 2'} @@ -818,11 +791,7 @@ def check_behav_list(behav, run_id, conds): wf.connect(preproc, "outputspec.realigned_files", tsnr, "in_file") # Compute the median image across runs - calc_median = Node(Function(input_names=['in_files'], - output_names=['median_file'], - function=median, - imports=imports), - name='median') + calc_median = Node(CalculateMedian(), name='median') wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') """ diff --git a/examples/fmri_slicer_coregistration.py b/examples/fmri_slicer_coregistration.py index daf5bbb9e7..e0129651dd 100755 --- a/examples/fmri_slicer_coregistration.py +++ b/examples/fmri_slicer_coregistration.py @@ -37,7 +37,6 @@ package_check('numpy', '1.3', 'tutorial1') package_check('scipy', '0.7', 'tutorial1') -package_check('networkx', '1.0', 'tutorial1') package_check('IPython', '0.10', 'tutorial1') """The nipype tutorial contains data for two subjects. Subject data diff --git a/examples/nipype_tutorial.ipynb b/examples/nipype_tutorial.ipynb index f18cc6b187..4a01645fe5 100644 --- a/examples/nipype_tutorial.ipynb +++ b/examples/nipype_tutorial.ipynb @@ -1747,4 +1747,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/examples/rsfmri_vol_surface_preprocessing.py b/examples/rsfmri_vol_surface_preprocessing.py index 77c7598f84..8d86f73fd7 100644 --- a/examples/rsfmri_vol_surface_preprocessing.py +++ b/examples/rsfmri_vol_surface_preprocessing.py @@ -67,7 +67,7 @@ # mlab.MatlabCommand.set_default_paths('/software/matlab/spm12') from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR +from nipype.algorithms.misc import TSNR, CalculateMedian from nipype.interfaces.utility import Rename, Merge, IdentityInterface from nipype.utils.filemanip import filename_to_list from nipype.interfaces.io import DataSink, FreeSurferSource @@ -547,7 +547,7 @@ def create_reg_workflow(name='registration'): warpmean.inputs.input_image_type = 3 warpmean.inputs.interpolation = 'Linear' warpmean.inputs.invert_transform_flags = [False, False] - warpmean.inputs.terminal_output = 'file' + warpmean.terminal_output = 'file' warpmean.inputs.args = '--float' warpmean.inputs.num_threads = 4 @@ -623,11 +623,7 @@ def create_workflow(files, wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file') # Compute the median image across runs - calc_median = Node(Function(input_names=['in_files'], - output_names=['median_file'], - function=median, - imports=imports), - name='median') + calc_median = Node(CalculateMedian(), name='median') wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') """Segment and Register @@ -771,7 +767,7 @@ def merge_files(in1, in2): warpall.inputs.input_image_type = 3 warpall.inputs.interpolation = 'Linear' warpall.inputs.invert_transform_flags = [False, False] - warpall.inputs.terminal_output = 'file' + warpall.terminal_output = 'file' warpall.inputs.reference_image = target_file warpall.inputs.args = '--float' warpall.inputs.num_threads = 1 diff --git a/examples/rsfmri_vol_surface_preprocessing_nipy.py b/examples/rsfmri_vol_surface_preprocessing_nipy.py index a624326537..51a5742284 100644 --- a/examples/rsfmri_vol_surface_preprocessing_nipy.py +++ b/examples/rsfmri_vol_surface_preprocessing_nipy.py @@ -65,7 +65,7 @@ from nipype import Workflow, Node, MapNode from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR +from nipype.algorithms.misc import TSNR, CalculateMedian from nipype.algorithms.confounds import ACompCor from nipype.interfaces.utility import Rename, Merge, IdentityInterface from nipype.utils.filemanip import filename_to_list @@ -482,7 +482,7 @@ def create_reg_workflow(name='registration'): warpmean.inputs.input_image_type = 3 warpmean.inputs.interpolation = 'Linear' warpmean.inputs.invert_transform_flags = [False, False] - warpmean.inputs.terminal_output = 'file' + warpmean.terminal_output = 'file' warpmean.inputs.args = '--float' warpmean.inputs.num_threads = 4 warpmean.plugin_args = {'sbatch_args': '-c%d' % 4} @@ -556,11 +556,7 @@ def create_workflow(files, wf.connect(realign, "out_file", tsnr, "in_file") # Compute the median image across runs - calc_median = Node(Function(input_names=['in_files'], - output_names=['median_file'], - function=median, - imports=imports), - name='median') + calc_median = Node(CalculateMedian(), name='median') wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') """Segment and Register @@ -708,7 +704,7 @@ def merge_files(in1, in2): warpall.inputs.input_image_type = 3 warpall.inputs.interpolation = 'Linear' warpall.inputs.invert_transform_flags = [False, False] - warpall.inputs.terminal_output = 'file' + warpall.terminal_output = 'file' warpall.inputs.reference_image = target_file warpall.inputs.args = '--float' warpall.inputs.num_threads = 2 diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index 64d95767db..4bb0ddeac6 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -22,7 +22,7 @@ from numpy.polynomial import Legendre from scipy import linalg -from .. import logging +from .. import config, logging from ..external.due import BibTeX from ..interfaces.base import (traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, isdefined, @@ -30,7 +30,7 @@ from ..utils import NUMPY_MMAP from ..utils.misc import normalize_mc_params -IFLOG = logging.getLogger('interface') +IFLOGGER = logging.getLogger('interface') class ComputeDVARSInputSpec(BaseInterfaceInputSpec): @@ -286,7 +286,7 @@ def _run_interface(self, runtime): tr = self.inputs.series_tr if self.inputs.normalize and tr is None: - IFLOG.warn('FD plot cannot be normalized if TR is not set') + IFLOGGER.warn('FD plot cannot be normalized if TR is not set') self._results['out_figure'] = op.abspath(self.inputs.out_figure) fig = plot_confound(fd_res, self.inputs.figsize, 'FD', units='mm', @@ -323,9 +323,13 @@ class CompCorInputSpec(BaseInterfaceInputSpec): desc=('Position of mask in `mask_files` to use - ' 'first is the default.')) components_file = traits.Str('components_file.txt', usedefault=True, - desc='Filename to store physiological components') + desc='Filename to store physiological components') num_components = traits.Int(6, usedefault=True) # 6 for BOLD, 4 for ASL - use_regress_poly = traits.Bool(True, usedefault=True, + pre_filter = traits.Enum('polynomial', 'cosine', False, usedefault=True, + desc='Detrend time series prior to component ' + 'extraction') + use_regress_poly = traits.Bool(True, + deprecated='0.15.0', new_name='pre_filter', desc=('use polynomial regression ' 'pre-component extraction')) regress_poly_degree = traits.Range(low=1, default=1, usedefault=True, @@ -333,17 +337,49 @@ class CompCorInputSpec(BaseInterfaceInputSpec): header_prefix = traits.Str(desc=('the desired header for the output tsv ' 'file (one column). If undefined, will ' 'default to "CompCor"')) + high_pass_cutoff = traits.Float( + 128, usedefault=True, + desc='Cutoff (in seconds) for "cosine" pre-filter') + repetition_time = traits.Float( + desc='Repetition time (TR) of series - derived from image header if ' + 'unspecified') + save_pre_filter = traits.Either( + traits.Bool, File, desc='Save pre-filter basis as text file') + ignore_initial_volumes = traits.Range( + low=0, usedefault=True, + desc='Number of volumes at start of series to ignore') class CompCorOutputSpec(TraitedSpec): components_file = File(exists=True, desc='text file containing the noise components') + pre_filter_file = File(desc='text file containing high-pass filter basis') class CompCor(BaseInterface): """ Interface with core CompCor computation, used in aCompCor and tCompCor + CompCor provides three pre-filter options, all of which include per-voxel + mean removal: + - polynomial: Legendre polynomial basis + - cosine: Discrete cosine basis + - False: mean-removal only + + In the case of ``polynomial`` and ``cosine`` filters, a pre-filter file may + be saved with a row for each volume/timepoint, and a column for each + non-constant regressor. + If no non-constant (mean-removal) columns are used, this file may be empty. + + If ``ignore_initial_volumes`` is set, then the specified number of initial + volumes are excluded both from pre-filtering and CompCor component + extraction. + Each column in the components and pre-filter files are prefixe with zeros + for each excluded volume so that the number of rows continues to match the + number of volumes in the input file. + In addition, for each excluded volume, a column is added to the pre-filter + file with a 1 in the corresponding row. + Example ------- @@ -351,7 +387,7 @@ class CompCor(BaseInterface): >>> ccinterface.inputs.realigned_file = 'functional.nii' >>> ccinterface.inputs.mask_files = 'mask.nii' >>> ccinterface.inputs.num_components = 1 - >>> ccinterface.inputs.use_regress_poly = True + >>> ccinterface.inputs.pre_filter = 'polynomial' >>> ccinterface.inputs.regress_poly_degree = 2 """ @@ -383,17 +419,20 @@ def _run_interface(self, runtime): self.inputs.merge_method, self.inputs.mask_index) + if self.inputs.use_regress_poly: + self.inputs.pre_filter = 'polynomial' + + # Degree 0 == remove mean; see compute_noise_components degree = (self.inputs.regress_poly_degree if - self.inputs.use_regress_poly else 0) + self.inputs.pre_filter == 'polynomial' else 0) - imgseries = nb.load(self.inputs.realigned_file, - mmap=NUMPY_MMAP) + imgseries = nb.load(self.inputs.realigned_file, mmap=NUMPY_MMAP) if len(imgseries.shape) != 4: - raise ValueError('tCompCor expected a 4-D nifti file. Input {} has ' - '{} dimensions (shape {})'.format( - self.inputs.realigned_file, len(imgseries.shape), - imgseries.shape)) + raise ValueError('{} expected a 4-D nifti file. Input {} has ' + '{} dimensions (shape {})'.format( + self._header, self.inputs.realigned_file, + len(imgseries.shape), imgseries.shape)) if len(mask_images) == 0: img = nb.Nifti1Image(np.ones(imgseries.shape[:3], dtype=np.bool), @@ -401,15 +440,66 @@ def _run_interface(self, runtime): header=imgseries.header) mask_images = [img] + skip_vols = self.inputs.ignore_initial_volumes + if skip_vols: + imgseries = imgseries.__class__( + imgseries.get_data()[..., skip_vols:], imgseries.affine, + imgseries.header) + mask_images = self._process_masks(mask_images, imgseries.get_data()) - components = compute_noise_components(imgseries.get_data(), - mask_images, degree, - self.inputs.num_components) + TR = 0 + if self.inputs.pre_filter == 'cosine': + if isdefined(self.inputs.repetition_time): + TR = self.inputs.repetition_time + else: + # Derive TR from NIfTI header, if possible + try: + TR = imgseries.header.get_zooms()[3] + if imgseries.get_xyzt_units()[1] == 'msec': + TR /= 1000 + except (AttributeError, IndexError): + TR = 0 + + if TR == 0: + raise ValueError( + '{} cannot detect repetition time from image - ' + 'Set the repetition_time input'.format(self._header)) + + components, filter_basis = compute_noise_components( + imgseries.get_data(), mask_images, self.inputs.num_components, + self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR) + + if skip_vols: + old_comp = components + nrows = skip_vols + components.shape[0] + components = np.zeros((nrows, components.shape[1]), + dtype=components.dtype) + components[skip_vols:] = old_comp components_file = os.path.join(os.getcwd(), self.inputs.components_file) np.savetxt(components_file, components, fmt=b"%.10f", delimiter='\t', header=self._make_headers(components.shape[1]), comments='') + + if self.inputs.pre_filter and self.inputs.save_pre_filter: + pre_filter_file = self._list_outputs()['pre_filter_file'] + ftype = {'polynomial': 'Legendre', + 'cosine': 'Cosine'}[self.inputs.pre_filter] + ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 + header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] + if skip_vols: + old_basis = filter_basis + # nrows defined above + filter_basis = np.zeros((nrows, ncols + skip_vols), + dtype=filter_basis.dtype) + if old_basis.size > 0: + filter_basis[skip_vols:, :ncols] = old_basis + filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) + header.extend(['NonSteadyStateOutlier{:02d}'.format(i) + for i in range(skip_vols)]) + np.savetxt(pre_filter_file, filter_basis, fmt=b'%.10f', + delimiter='\t', header='\t'.join(header), comments='') + return runtime def _process_masks(self, mask_images, timeseries=None): @@ -418,14 +508,19 @@ def _process_masks(self, mask_images, timeseries=None): def _list_outputs(self): outputs = self._outputs().get() outputs['components_file'] = os.path.abspath(self.inputs.components_file) + + save_pre_filter = self.inputs.save_pre_filter + if save_pre_filter: + if isinstance(save_pre_filter, bool): + save_pre_filter = os.path.abspath('pre_filter.tsv') + outputs['pre_filter_file'] = save_pre_filter + return outputs def _make_headers(self, num_col): - headers = [] header = self.inputs.header_prefix if \ isdefined(self.inputs.header_prefix) else self._header - for i in range(num_col): - headers.append(header + '{:02d}'.format(i)) + headers = ['{}{:02d}'.format(header, i) for i in range(num_col)] return '\t'.join(headers) @@ -473,7 +568,7 @@ class TCompCor(CompCor): >>> ccinterface.inputs.realigned_file = 'functional.nii' >>> ccinterface.inputs.mask_files = 'mask.nii' >>> ccinterface.inputs.num_components = 1 - >>> ccinterface.inputs.use_regress_poly = True + >>> ccinterface.inputs.pre_filter = 'polynomial' >>> ccinterface.inputs.regress_poly_degree = 2 >>> ccinterface.inputs.percentile_threshold = .03 @@ -494,7 +589,7 @@ def _process_masks(self, mask_images, timeseries=None): for i, img in enumerate(mask_images): mask = img.get_data().astype(np.bool) imgseries = timeseries[mask, :] - imgseries = regress_poly(2, imgseries) + imgseries = regress_poly(2, imgseries)[0] tSTD = _compute_tSTD(imgseries, 0, axis=-1) threshold_std = np.percentile(tSTD, np.round(100. * (1. - self.inputs.percentile_threshold)).astype(int)) @@ -506,8 +601,8 @@ def _process_masks(self, mask_images, timeseries=None): # save mask mask_file = os.path.abspath('mask_{:03d}.nii.gz'.format(i)) out_image.to_filename(mask_file) - IFLOG.debug('tCompcor computed and saved mask of shape {} to ' - 'mask_file {}'.format(mask.shape, mask_file)) + IFLOGGER.debug('tCompcor computed and saved mask of shape %s to ' + 'mask_file %s', str(mask.shape), mask_file) self._mask_files.append(mask_file) out_images.append(out_image) return out_images @@ -569,7 +664,7 @@ def _run_interface(self, runtime): data = data.astype(np.float32) if isdefined(self.inputs.regress_poly): - data = regress_poly(self.inputs.regress_poly, data, remove_mean=False) + data = regress_poly(self.inputs.regress_poly, data, remove_mean=False)[0] img = nb.Nifti1Image(data, img.affine, header) nb.save(img, op.abspath(self.inputs.detrended_file)) @@ -618,7 +713,7 @@ def _run_interface(self, runtime): global_signal = in_nii.get_data()[:,:,:,:50].mean(axis=0).mean(axis=0).mean(axis=0) self._results = { - 'n_volumes_to_discard': _is_outlier(global_signal) + 'n_volumes_to_discard': is_outlier(global_signal) } return runtime @@ -685,9 +780,10 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False, func_sd = func_sd[func_sd != 0] # Compute (non-robust) estimate of lag-1 autocorrelation - ar1 = np.apply_along_axis(AR_est_YW, 1, - regress_poly(0, mfunc, remove_mean=True).astype( - np.float32), 1)[:, 0] + ar1 = np.apply_along_axis( + AR_est_YW, 1, + regress_poly(0, mfunc, remove_mean=True)[0].astype(np.float32), + 1)[:, 0] # Compute (predicted) standard deviation of temporal difference time series diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd @@ -720,7 +816,7 @@ def plot_confound(tseries, figsize, name, units=None, """ import matplotlib - matplotlib.use('Agg') + matplotlib.use(config.get('execution', 'matplotlib_backend')) import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas @@ -794,6 +890,27 @@ def is_outlier(points, thresh=3.5): return timepoints_to_discard +def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1): + datashape = data.shape + timepoints = datashape[axis] + + data = data.reshape((-1, timepoints)) + + frametimes = timestep * np.arange(timepoints) + X = _full_rank(_cosine_drift(period_cut, frametimes))[0] + non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) + + betas = np.linalg.lstsq(X, data.T)[0] + + if not remove_mean: + X = X[:, :-1] + betas = betas[:-1] + + residuals = data - X.dot(betas).T + + return residuals.reshape(datashape), non_constant_regressors + + def regress_poly(degree, data, remove_mean=True, axis=-1): """ Returns data with degree polynomial regressed out. @@ -802,7 +919,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1): :param int axis: numpy array axes along which regression is performed """ - IFLOG.debug('Performing polynomial regression on data of shape ' + str(data.shape)) + IFLOGGER.debug('Performing polynomial regression on data of shape %s', + str(data.shape)) datashape = data.shape timepoints = datashape[axis] @@ -817,6 +935,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1): value_array = np.linspace(-1, 1, timepoints) X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis])) + non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) + # Calculate coefficients betas = np.linalg.pinv(X).dot(data.T) @@ -828,7 +948,7 @@ def regress_poly(degree, data, remove_mean=True, axis=-1): regressed_data = data - datahat # Back to original shape - return regressed_data.reshape(datashape) + return regressed_data.reshape(datashape), non_constant_regressors def combine_mask_files(mask_files, mask_method=None, mask_index=None): @@ -886,37 +1006,57 @@ def combine_mask_files(mask_files, mask_method=None, mask_index=None): return [img] -def compute_noise_components(imgseries, mask_images, degree, num_components): +def compute_noise_components(imgseries, mask_images, num_components, + filter_type, degree, period_cut, + repetition_time): """Compute the noise components from the imgseries for each mask imgseries: a nibabel img mask_images: a list of nibabel images - degree: order of polynomial used to remove trends from the timeseries num_components: number of noise components to return + filter_type: type off filter to apply to time series before computing + noise components. + 'polynomial' - Legendre polynomial basis + 'cosine' - Discrete cosine (DCT) basis + False - None (mean-removal only) + + Filter options: + + degree: order of polynomial used to remove trends from the timeseries + period_cut: minimum period (in sec) for DCT high-pass filter + repetition_time: time (in sec) between volume acquisitions returns: components: a numpy array + basis: a numpy array containing the (non-constant) filter regressors """ components = None + basis = np.array([]) for img in mask_images: mask = img.get_data().astype(np.bool) if imgseries.shape[:3] != mask.shape: - raise ValueError('Inputs for CompCor, timeseries and mask, ' - 'do not have matching spatial dimensions ' - '({} and {}, respectively)'.format( - imgseries.shape[:3], mask.shape)) + raise ValueError( + 'Inputs for CompCor, timeseries and mask, do not have ' + 'matching spatial dimensions ({} and {}, respectively)'.format( + imgseries.shape[:3], mask.shape)) voxel_timecourses = imgseries[mask, :] # Zero-out any bad values voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 - # from paper: - # "The constant and linear trends of the columns in the matrix M were - # removed [prior to ...]" - voxel_timecourses = regress_poly(degree, voxel_timecourses) + # Currently support Legendre-polynomial or cosine or detrending + # With no filter, the mean is nonetheless removed (poly w/ degree 0) + if filter_type == 'cosine': + voxel_timecourses, basis = cosine_filter( + voxel_timecourses, repetition_time, period_cut) + elif filter_type in ('polynomial', False): + # from paper: + # "The constant and linear trends of the columns in the matrix M were + # removed [prior to ...]" + voxel_timecourses, basis = regress_poly(degree, voxel_timecourses) # "Voxel time series from the noise ROI (either anatomical or tSTD) were # placed in a matrix M of size Nxm, with time along the row dimension @@ -936,7 +1076,7 @@ def compute_noise_components(imgseries, mask_images, degree, num_components): u[:, :num_components])) if components is None and num_components > 0: raise ValueError('No components found') - return components + return components, basis def _compute_tSTD(M, x, axis=0): @@ -945,3 +1085,71 @@ def _compute_tSTD(M, x, axis=0): stdM[stdM == 0] = x stdM[np.isnan(stdM)] = x return stdM + + +# _cosine_drift and _full_rank copied from nipy/modalities/fmri/design_matrix +# +# Nipy release: 0.4.1 +# Modified for smooth integration in CompCor classes + +def _cosine_drift(period_cut, frametimes): + """Create a cosine drift matrix with periods greater or equals to period_cut + + Parameters + ---------- + period_cut: float + Cut period of the low-pass filter (in sec) + frametimes: array of shape(nscans) + The sampling times (in sec) + + Returns + ------- + cdrift: array of shape(n_scans, n_drifts) + cosin drifts plus a constant regressor at cdrift[:,0] + + Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II + """ + len_tim = len(frametimes) + n_times = np.arange(len_tim) + hfcut = 1. / period_cut # input parameter is the period + + # frametimes.max() should be (len_tim-1)*dt + dt = frametimes[1] - frametimes[0] + # hfcut = 1/(2*dt) yields len_time + # If series is too short, return constant regressor + order = max(int(np.floor(2*len_tim*hfcut*dt)), 1) + cdrift = np.zeros((len_tim, order)) + nfct = np.sqrt(2.0/len_tim) + + for k in range(1, order): + cdrift[:, k-1] = nfct * np.cos((np.pi / len_tim) * (n_times + .5) * k) + + cdrift[:, order-1] = 1. # or 1./sqrt(len_tim) to normalize + return cdrift + + +def _full_rank(X, cmax=1e15): + """ + This function possibly adds a scalar matrix to X + to guarantee that the condition number is smaller than a given threshold. + + Parameters + ---------- + X: array of shape(nrows, ncols) + cmax=1.e-15, float tolerance for condition number + + Returns + ------- + X: array of shape(nrows, ncols) after regularization + cmax=1.e-15, float tolerance for condition number + """ + U, s, V = np.linalg.svd(X, 0) + smax, smin = s.max(), s.min() + c = smax / smin + if c < cmax: + return X, c + IFLOGGER.warn('Matrix is singular at working precision, regularizing...') + lda = (smax - cmax * smin) / (cmax - 1) + s = s + lda + X = np.dot(U, np.dot(np.diag(s), V)) + return X, cmax diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py index 2436d2542b..23963de679 100644 --- a/nipype/algorithms/metrics.py +++ b/nipype/algorithms/metrics.py @@ -24,7 +24,7 @@ from scipy.spatial.distance import cdist, euclidean, dice, jaccard from scipy.ndimage.measurements import center_of_mass, label -from .. import logging +from .. import config, logging from ..utils.misc import package_check from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, @@ -138,6 +138,8 @@ def _eucl_mean(self, nii1, nii2, weighted=False): dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) min_dist_matrix = np.amin(dist_matrix, axis=0) + import matplotlib + matplotlib.use(config.get('execution', 'matplotlib_backend')) import matplotlib.pyplot as plt plt.figure() plt.hist(min_dist_matrix, 50, normed=1, facecolor='green') diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py index 8b0bce02a9..a16507bf36 100644 --- a/nipype/algorithms/misc.py +++ b/nipype/algorithms/misc.py @@ -33,7 +33,7 @@ InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec, isdefined, DynamicTraitedSpec, Undefined) -from ..utils.filemanip import fname_presuffix, split_filename +from ..utils.filemanip import fname_presuffix, split_filename, filename_to_list from ..utils import NUMPY_MMAP from . import confounds @@ -362,26 +362,23 @@ def _run_interface(self, runtime): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: - iflogger.info('One of the keys in the input file, {k}, is not a Numpy array'.format(k=key)) + iflogger.info('One of the keys in the input file, %s, is ' + 'not a Numpy array', key) if len(saved_variables) > 1: - iflogger.info( - '{N} variables found:'.format(N=len(saved_variables))) + iflogger.info('%i variables found:', len(saved_variables)) iflogger.info(saved_variables) for variable in saved_variables: - iflogger.info( - '...Converting {var} - type {ty} - to\ - CSV'.format(var=variable, ty=type(in_dict[variable])) - ) - matlab2csv( - in_dict[variable], variable, self.inputs.reshape_matrix) + iflogger.info('...Converting %s - type %s - to CSV', + variable, type(in_dict[variable])) + matlab2csv(in_dict[variable], variable, self.inputs.reshape_matrix) elif len(saved_variables) == 1: _, name, _ = split_filename(self.inputs.in_file) variable = saved_variables[0] - iflogger.info('Single variable found {var}, type {ty}:'.format( - var=variable, ty=type(in_dict[variable]))) - iflogger.info('...Converting {var} to CSV from {f}'.format( - var=variable, f=self.inputs.in_file)) + iflogger.info('Single variable found %s, type %s:', variable, + type(in_dict[variable])) + iflogger.info('...Converting %s to CSV from %s', variable, + self.inputs.in_file) matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix) else: iflogger.error('No values in the MATLAB file?!') @@ -396,8 +393,8 @@ def _list_outputs(self): if isinstance(in_dict[key][0], np.ndarray): saved_variables.append(key) else: - iflogger.error('One of the keys in the input file, {k}, is\ - not a Numpy array'.format(k=key)) + iflogger.error('One of the keys in the input file, %s, is ' + 'not a Numpy array', key) if len(saved_variables) > 1: outputs['csv_files'] = replaceext(saved_variables, '.csv') @@ -555,19 +552,16 @@ def _run_interface(self, runtime): iflogger.info('Column headings have been provided:') headings = self.inputs.column_headings else: - iflogger.info( - 'Column headings not provided! Pulled from input filenames:') + iflogger.info('Column headings not provided! Pulled from input filenames:') headings = remove_identical_paths(self.inputs.in_files) if isdefined(self.inputs.extra_field): if isdefined(self.inputs.extra_column_heading): extraheading = self.inputs.extra_column_heading - iflogger.info('Extra column heading provided: {col}'.format( - col=extraheading)) + iflogger.info('Extra column heading provided: %s', extraheading) else: extraheading = 'type' - iflogger.info( - 'Extra column heading was not defined. Using "type"') + iflogger.info('Extra column heading was not defined. Using "type"') headings.append(extraheading) extraheadingBool = True @@ -575,8 +569,8 @@ def _run_interface(self, runtime): iflogger.warn('Only one file input!') if isdefined(self.inputs.row_headings): - iflogger.info('Row headings have been provided. Adding "labels"\ - column header.') + iflogger.info('Row headings have been provided. Adding "labels"' + 'column header.') prefix = '"{p}","'.format(p=self.inputs.row_heading_title) csv_headings = prefix + '","'.join(itertools.chain( headings)) + '"\n' @@ -1310,7 +1304,7 @@ def merge_rois(in_files, in_idxs, in_ref, # to avoid memory errors if op.splitext(in_ref)[1] == '.gz': try: - iflogger.info('uncompress %i' % in_ref) + iflogger.info('uncompress %i', in_ref) sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True) in_ref = op.splitext(in_ref)[0] except: @@ -1380,6 +1374,94 @@ def merge_rois(in_files, in_idxs, in_ref, return out_file +class CalculateMedianInputSpec(BaseInterfaceInputSpec): + in_files = InputMultiPath(File(exists=True, mandatory=True, + desc="One or more realigned Nifti 4D timeseries")) + median_file = traits.Str(desc="Filename prefix to store median images") + median_per_file = traits.Bool(False, usedefault=True, + desc="Calculate a median file for each Nifti") + +class CalculateMedianOutputSpec(TraitedSpec): + median_files = OutputMultiPath(File(exists=True), + desc="One or more median images") + +class CalculateMedian(BaseInterface): + """ + Computes an average of the median across one or more 4D Nifti timeseries + + Example + ------- + + >>> from nipype.algorithms.misc import CalculateMedian + >>> mean = CalculateMedian() + >>> mean.inputs.in_files = 'functional.nii' + >>> mean.run() # doctest: +SKIP + + """ + input_spec = CalculateMedianInputSpec + output_spec = CalculateMedianOutputSpec + + def __init__(self, *args, **kwargs): + super(CalculateMedian, self).__init__(*args, **kwargs) + self._median_files = [] + + def _gen_fname(self, suffix, idx=None, ext=None): + if idx: + in_file = self.inputs.in_files[idx] + else: + if isinstance(self.inputs.in_files, list): + in_file = self.inputs.in_files[0] + else: + in_file = self.inputs.in_files + fname, in_ext = op.splitext(op.basename(in_file)) + if in_ext == '.gz': + fname, in_ext2 = op.splitext(fname) + in_ext = in_ext2 + in_ext + if ext is None: + ext = in_ext + if ext.startswith('.'): + ext = ext[1:] + if self.inputs.median_file: + outname = self.inputs.median_file + else: + outname = '{}_{}'.format(fname, suffix) + if idx: + outname += str(idx) + return op.abspath('{}.{}'.format(outname, ext)) + + def _run_interface(self, runtime): + total = None + self._median_files = [] + for idx, fname in enumerate(filename_to_list(self.inputs.in_files)): + img = nb.load(fname, mmap=NUMPY_MMAP) + data = np.median(img.get_data(), axis=3) + if self.inputs.median_per_file: + self._median_files.append(self._write_nifti(img, data, idx)) + else: + if total is None: + total = data + else: + total += data + if not self.inputs.median_per_file: + self._median_files.append(self._write_nifti(img, total, idx)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['median_files'] = self._median_files + return outputs + + def _write_nifti(self, img, data, idx, suffix='median'): + if self.inputs.median_per_file: + median_img = nb.Nifti1Image(data, img.affine, img.header) + filename = self._gen_fname(suffix, idx=idx) + else: + median_img = nb.Nifti1Image(data/(idx+1), img.affine, img.header) + filename = self._gen_fname(suffix) + median_img.to_filename(filename) + return filename + + # Deprecated interfaces ------------------------------------------------------ class Distance(nam.Distance): diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py index 87367f7955..2c994bf20d 100644 --- a/nipype/algorithms/modelgen.py +++ b/nipype/algorithms/modelgen.py @@ -374,9 +374,9 @@ def _generate_standard_design(self, infolist, functional_runs=None, for f in filename_to_list(sessinfo[i]['scans']): shape = load(f, mmap=NUMPY_MMAP).shape if len(shape) == 3 or shape[3] == 1: - iflogger.warning(('You are using 3D instead of 4D ' - 'files. Are you sure this was ' - 'intended?')) + iflogger.warning('You are using 3D instead of 4D ' + 'files. Are you sure this was ' + 'intended?') numscans += 1 else: numscans += shape[3] @@ -686,7 +686,7 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): if dt < 1: raise Exception('Time multiple less than 1 ms') - iflogger.info('Setting dt = %d ms\n' % dt) + iflogger.info('Setting dt = %d ms\n', dt) npts = int(np.ceil(total_time / dt)) times = np.arange(0, total_time, dt) * 1e-3 timeline = np.zeros((npts)) @@ -705,9 +705,9 @@ def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: response = np.convolve(boxcar, hrf) reg_scale = 1.0 / response.max() - iflogger.info('response sum: %.4f max: %.4f' % (response.sum(), - response.max())) - iflogger.info('reg_scale: %.4f' % reg_scale) + iflogger.info('response sum: %.4f max: %.4f', response.sum(), + response.max()) + iflogger.info('reg_scale: %.4f', reg_scale) for i, t in enumerate(onsets): idx = int(np.round(t / dt)) diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py index b0511c0fc6..0ab74b7404 100644 --- a/nipype/algorithms/rapidart.py +++ b/nipype/algorithms/rapidart.py @@ -18,7 +18,8 @@ >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ -from __future__ import print_function, division, unicode_literals, absolute_import +from __future__ import (print_function, division, + unicode_literals, absolute_import) from builtins import open, range, str, bytes import os @@ -99,6 +100,29 @@ def _calc_norm(mc, use_differences, source, brain_pts=None): """ + affines = [_get_affine_matrix(mc[i, :], source) + for i in range(mc.shape[0])] + return _calc_norm_affine(affines, use_differences, brain_pts) + + +def _calc_norm_affine(affines, use_differences, brain_pts=None): + """Calculates the maximum overall displacement of the midpoints + of the faces of a cube due to translation and rotation. + + Parameters + ---------- + affines : list of [4 x 4] affine matrices + use_differences : boolean + brain_pts : [4 x n_points] of coordinates + + Returns + ------- + + norm : at each time point + displacement : euclidean distance (mm) of displacement at each coordinate + + """ + if brain_pts is None: respos = np.diag([70, 70, 75]) resneg = np.diag([-70, -110, -45]) @@ -107,90 +131,91 @@ def _calc_norm(mc, use_differences, source, brain_pts=None): else: all_pts = brain_pts n_pts = all_pts.size - all_pts.shape[1] - newpos = np.zeros((mc.shape[0], n_pts)) + newpos = np.zeros((len(affines), n_pts)) if brain_pts is not None: - displacement = np.zeros((mc.shape[0], int(n_pts / 3))) - for i in range(mc.shape[0]): - affine = _get_affine_matrix(mc[i, :], source) - newpos[i, :] = np.dot(affine, - all_pts)[0:3, :].ravel() + displacement = np.zeros((len(affines), int(n_pts / 3))) + for i, affine in enumerate(affines): + newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel() if brain_pts is not None: - displacement[i, :] = \ - np.sqrt(np.sum(np.power(np.reshape(newpos[i, :], - (3, all_pts.shape[1])) - - all_pts[0:3, :], - 2), - axis=0)) + displacement[i, :] = np.sqrt(np.sum( + np.power(np.reshape(newpos[i, :], + (3, all_pts.shape[1])) - all_pts[0:3, :], + 2), + axis=0)) # np.savez('displacement.npz', newpos=newpos, pts=all_pts) - normdata = np.zeros(mc.shape[0]) + normdata = np.zeros(len(affines)) if use_differences: newpos = np.concatenate((np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0) for i in range(newpos.shape[0]): normdata[i] = \ - np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2), - (3, all_pts.shape[1])), axis=0))) + np.max(np.sqrt(np.sum( + np.reshape(np.power(np.abs(newpos[i, :]), 2), + (3, all_pts.shape[1])), + axis=0))) else: newpos = np.abs(signal.detrend(newpos, axis=0, type='constant')) normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1)) return normdata, displacement -def _nanmean(a, axis=None): - """Return the mean excluding items that are nan - - >>> a = [1, 2, np.nan] - >>> _nanmean(a) - 1.5 - - """ - if axis: - return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis) - else: - return np.nansum(a) / np.sum(1 - np.isnan(a)) - - class ArtifactDetectInputSpec(BaseInterfaceInputSpec): realigned_files = InputMultiPath(File(exists=True), - desc="Names of realigned functional data files", + desc=("Names of realigned functional data " + "files"), mandatory=True) - realignment_parameters = InputMultiPath(File(exists=True), mandatory=True, - desc=("Names of realignment parameters" - "corresponding to the functional data files")) + realignment_parameters = InputMultiPath(File(exists=True), + mandatory=True, + desc=("Names of realignment " + "parameters corresponding to " + "the functional data files")) parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST", desc="Source of movement parameters", mandatory=True) - use_differences = traits.ListBool([True, False], minlen=2, maxlen=2, + use_differences = traits.ListBool([True, False], + minlen=2, + maxlen=2, usedefault=True, - desc=("Use differences between successive motion (first element)" - "and intensity paramter (second element) estimates in order" - "to determine outliers. (default is [True, False])")) - use_norm = traits.Bool(True, requires=['norm_threshold'], + desc=("Use differences between successive" + " motion (first element) and " + "intensity parameter (second " + "element) estimates in order to " + "determine outliers. " + "(default is [True, False])")) + use_norm = traits.Bool(True, + usedefault=True, + requires=['norm_threshold'], desc=("Uses a composite of the motion parameters in " - "order to determine outliers."), - usedefault=True) - norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela" + "order to determine outliers.")) + norm_threshold = traits.Float(xor=['rotation_threshold', + 'translation_threshold'], + mandatory=True, + desc=("Threshold to use to detect motion-rela" "ted outliers when composite motion is " - "being used"), mandatory=True, - xor=['rotation_threshold', - 'translation_threshold']) - rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'], - desc=("Threshold (in radians) to use to detect rotation-related " - "outliers")) - translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'], - desc=("Threshold (in mm) to use to detect translation-related " + "being used")) + rotation_threshold = traits.Float(mandatory=True, + xor=['norm_threshold'], + desc=("Threshold (in radians) to use to " + "detect rotation-related outliers")) + translation_threshold = traits.Float(mandatory=True, + xor=['norm_threshold'], + desc=("Threshold (in mm) to use to " + "detect translation-related " "outliers")) zintensity_threshold = traits.Float(mandatory=True, - desc=("Intensity Z-threshold use to detection images that deviate " + desc=("Intensity Z-threshold use to " + "detection images that deviate " "from the mean")) mask_type = traits.Enum('spm_global', 'file', 'thresh', - desc=("Type of mask that should be used to mask the functional " - "data. *spm_global* uses an spm_global like calculation to " - "determine the brain mask. *file* specifies a brain mask " - "file (should be an image file consisting of 0s and 1s). " - "*thresh* specifies a threshold to use. By default all voxels" - "are used, unless one of these mask types are defined."), - mandatory=True) + mandatory=True, + desc=("Type of mask that should be used to mask the" + " functional data. *spm_global* uses an " + "spm_global like calculation to determine the" + " brain mask. *file* specifies a brain mask " + "file (should be an image file consisting of " + "0s and 1s). *thresh* specifies a threshold " + "to use. By default all voxels are used," + "unless one of these mask types are defined")) mask_file = File(exists=True, desc="Mask file to be used if mask_type is 'file'.") mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type" @@ -216,28 +241,36 @@ class ArtifactDetectInputSpec(BaseInterfaceInputSpec): class ArtifactDetectOutputSpec(TraitedSpec): outlier_files = OutputMultiPath(File(exists=True), - desc=("One file for each functional run containing a list of " - "0-based indices corresponding to outlier volumes")) + desc=("One file for each functional run " + "containing a list of 0-based indices" + " corresponding to outlier volumes")) intensity_files = OutputMultiPath(File(exists=True), - desc=("One file for each functional run containing the global " - "intensity values determined from the brainmask")) + desc=("One file for each functional run " + "containing the global intensity " + "values determined from the " + "brainmask")) norm_files = OutputMultiPath(File, - desc=("One file for each functional run containing the composite " - "norm")) + desc=("One file for each functional run " + "containing the composite norm")) statistic_files = OutputMultiPath(File(exists=True), - desc=("One file for each functional run containing information " - "about the different types of artifacts and if design info is" - " provided then details of stimulus correlated motion and a " - "listing or artifacts by event type.")) + desc=("One file for each functional run " + "containing information about the " + "different types of artifacts and " + "if design info is provided then " + "details of stimulus correlated " + "motion and a listing or artifacts " + "by event type.")) plot_files = OutputMultiPath(File, - desc=("One image file for each functional run containing the " - "detected outliers")) + desc=("One image file for each functional run " + "containing the detected outliers")) mask_files = OutputMultiPath(File, - desc=("One image file for each functional run containing the mask" - "used for global signal calculation")) + desc=("One image file for each functional run " + "containing the mask used for global " + "signal calculation")) displacement_files = OutputMultiPath(File, - desc=("One image file for each functional run containing the voxel" - "displacement timeseries")) + desc=("One image file for each " + "functional run containing the " + "voxel displacement timeseries")) class ArtifactDetect(BaseInterface): @@ -245,7 +278,9 @@ class ArtifactDetect(BaseInterface): Uses intensity and motion parameters to infer outliers. If `use_norm` is True, it computes the movement of the center of each face a cuboid centered - around the head and returns the maximal movement across the centers. + around the head and returns the maximal movement across the centers. If you + wish to use individual thresholds instead, import `Undefined` from + `nipype.interfaces.base` and set `....inputs.use_norm = Undefined` Examples @@ -376,11 +411,11 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): vol = data[:, :, :, t0] # Use an SPM like approach mask_tmp = vol > \ - (_nanmean(vol) / self.inputs.global_threshold) + (np.nanmean(vol) / self.inputs.global_threshold) mask = mask * mask_tmp for t0 in range(timepoints): vol = data[:, :, :, t0] - g[t0] = _nanmean(vol[mask]) + g[t0] = np.nanmean(vol[mask]) if len(find_indices(mask)) < (np.prod((x, y, z)) / 10): intersect_mask = False g = np.zeros((timepoints, 1)) @@ -390,7 +425,7 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): for t0 in range(timepoints): vol = data[:, :, :, t0] mask_tmp = vol > \ - (_nanmean(vol) / self.inputs.global_threshold) + (np.nanmean(vol) / self.inputs.global_threshold) mask[:, :, :, t0] = mask_tmp g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp) elif masktype == 'file': # uses a mask image to determine intensity @@ -400,15 +435,15 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): mask = mask > 0.5 for t0 in range(timepoints): vol = data[:, :, :, t0] - g[t0] = _nanmean(vol[mask]) + g[t0] = np.nanmean(vol[mask]) elif masktype == 'thresh': # uses a fixed signal threshold for t0 in range(timepoints): vol = data[:, :, :, t0] mask = vol > self.inputs.mask_threshold - g[t0] = _nanmean(vol[mask]) + g[t0] = np.nanmean(vol[mask]) else: mask = np.ones((x, y, z)) - g = _nanmean(data[mask > 0, :], 1) + g = np.nanmean(data[mask > 0, :], 1) # compute normalized intensity values gz = signal.detrend(g, axis=0) # detrend the signal @@ -508,17 +543,19 @@ def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): motion_outliers)), 'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)), }, - {'motion': [{'using differences': self.inputs.use_differences[0]}, - {'mean': np.mean(mc_in, axis=0).tolist(), - 'min': np.min(mc_in, axis=0).tolist(), - 'max': np.max(mc_in, axis=0).tolist(), - 'std': np.std(mc_in, axis=0).tolist()}, + {'motion': [ + {'using differences': self.inputs.use_differences[0]}, + {'mean': np.mean(mc_in, axis=0).tolist(), + 'min': np.min(mc_in, axis=0).tolist(), + 'max': np.max(mc_in, axis=0).tolist(), + 'std': np.std(mc_in, axis=0).tolist()}, ]}, - {'intensity': [{'using differences': self.inputs.use_differences[1]}, - {'mean': np.mean(gz, axis=0).tolist(), - 'min': np.min(gz, axis=0).tolist(), - 'max': np.max(gz, axis=0).tolist(), - 'std': np.std(gz, axis=0).tolist()}, + {'intensity': [ + {'using differences': self.inputs.use_differences[1]}, + {'mean': np.mean(gz, axis=0).tolist(), + 'min': np.min(gz, axis=0).tolist(), + 'max': np.max(gz, axis=0).tolist(), + 'std': np.std(gz, axis=0).tolist()}, ]}, ] if self.inputs.use_norm: @@ -542,20 +579,27 @@ def _run_interface(self, runtime): class StimCorrInputSpec(BaseInterfaceInputSpec): - realignment_parameters = InputMultiPath(File(exists=True), mandatory=True, - desc=('Names of realignment parameters corresponding to the functional ' - 'data files')) - intensity_values = InputMultiPath(File(exists=True), mandatory=True, - desc='Name of file containing intensity values') - spm_mat_file = File(exists=True, mandatory=True, - desc='SPM mat file (use pre-estimate SPM.mat file)') + realignment_parameters = InputMultiPath(File(exists=True), + mandatory=True, + desc=("Names of realignment " + "parameters corresponding to " + "the functional data files")) + intensity_values = InputMultiPath(File(exists=True), + mandatory=True, + desc=("Name of file containing intensity " + "values")) + spm_mat_file = File(exists=True, + mandatory=True, + desc="SPM mat file (use pre-estimate SPM.mat file)") concatenated_design = traits.Bool(mandatory=True, - desc='state if the design matrix contains concatenated sessions') + desc=("state if the design matrix " + "contains concatenated sessions")) class StimCorrOutputSpec(TraitedSpec): stimcorr_files = OutputMultiPath(File(exists=True), - desc='List of files containing correlation values') + desc=("List of files containing " + "correlation values")) class StimulusCorrelation(BaseInterface): @@ -565,8 +609,9 @@ class StimulusCorrelation(BaseInterface): Currently this class supports an SPM generated design matrix and requires intensity parameters. This implies that one must run :ref:`ArtifactDetect ` - and :ref:`Level1Design ` prior to running this or - provide an SPM.mat file and intensity parameters through some other means. + and :ref:`Level1Design ` prior to + running this or provide an SPM.mat file and intensity parameters through + some other means. Examples -------- @@ -642,7 +687,8 @@ def _get_spm_submatrix(self, spmmat, sessidx, rows=None): U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0] if rows is None: rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1 - cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][list(range(len(U)))] - 1 + cols = ( + spmmat['SPM'][0][0].Sess[0][sessidx].col[0][list(range(len(U)))]-1) outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(), axis=1) return outmatrix diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py index 7867b259ed..a266d57ddb 100644 --- a/nipype/algorithms/tests/test_auto_ACompCor.py +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -7,9 +7,14 @@ def test_ACompCor_inputs(): input_map = dict(components_file=dict(usedefault=True, ), header_prefix=dict(), - ignore_exception=dict(nohash=True, + high_pass_cutoff=dict(usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), + ignore_initial_volumes=dict(usedefault=True, + ), mask_files=dict(), mask_index=dict(requires=['mask_files'], xor=['merge_method'], @@ -19,11 +24,16 @@ def test_ACompCor_inputs(): ), num_components=dict(usedefault=True, ), + pre_filter=dict(usedefault=True, + ), realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), - use_regress_poly=dict(usedefault=True, + repetition_time=dict(), + save_pre_filter=dict(), + use_regress_poly=dict(deprecated='0.15.0', + new_name='pre_filter', ), ) inputs = ACompCor.input_spec() @@ -35,6 +45,7 @@ def test_ACompCor_inputs(): def test_ACompCor_outputs(): output_map = dict(components_file=dict(), + pre_filter_file=dict(), ) outputs = ACompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py index f38319040b..31cc6f7a09 100644 --- a/nipype/algorithms/tests/test_auto_AddCSVRow.py +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -6,7 +6,8 @@ def test_AddCSVRow_inputs(): input_map = dict(_outputs=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py index 054bc1da99..196d297847 100644 --- a/nipype/algorithms/tests/test_auto_ArtifactDetect.py +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -8,7 +8,8 @@ def test_ArtifactDetect_inputs(): ), global_threshold=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), intersect_mask=dict(), diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py new file mode 100644 index 0000000000..77784f1bc9 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import CalculateMedian + + +def test_CalculateMedian_inputs(): + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(), + median_file=dict(), + median_per_file=dict(usedefault=True, + ), + ) + inputs = CalculateMedian.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_CalculateMedian_outputs(): + output_map = dict(median_files=dict(), + ) + outputs = CalculateMedian.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index 7c59f851d1..81aa16dde4 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -10,7 +10,8 @@ def test_ComputeDVARS_inputs(): ), figsize=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py index 61f64de033..e6cda0e7d7 100644 --- a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -4,7 +4,8 @@ def test_ComputeMeshWarp_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), metric=dict(usedefault=True, diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py index 3e365b8894..0d74e283fe 100644 --- a/nipype/algorithms/tests/test_auto_CreateNifti.py +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -9,7 +9,8 @@ def test_CreateNifti_inputs(): ), header_file=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py index 5cf8c425c8..713221c14a 100644 --- a/nipype/algorithms/tests/test_auto_Distance.py +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -4,7 +4,8 @@ def test_Distance_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask_volume=dict(), diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index e230992eec..cb56b470a4 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -8,7 +8,8 @@ def test_FramewiseDisplacement_inputs(): ), figsize=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py index 764d821bc6..726b3bec5b 100644 --- a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -4,7 +4,8 @@ def test_FuzzyOverlap_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_ref=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py index 6b06654f1d..43f74364c5 100644 --- a/nipype/algorithms/tests/test_auto_Gunzip.py +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -4,7 +4,8 @@ def test_Gunzip_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py index da3110fd76..ec2a37baa6 100644 --- a/nipype/algorithms/tests/test_auto_ICC.py +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -4,7 +4,8 @@ def test_ICC_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py index 3c6077922b..c19a4a7506 100644 --- a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -5,7 +5,8 @@ def test_MeshWarpMaths_inputs(): input_map = dict(float_trait=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_surf=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py index c7b4b25d0c..a9c7fe1b49 100644 --- a/nipype/algorithms/tests/test_auto_ModifyAffine.py +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -4,7 +4,8 @@ def test_ModifyAffine_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), transformation_matrix=dict(usedefault=True, diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index 7b12363ee8..6d3fe0c879 100644 --- a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -4,7 +4,8 @@ def test_NonSteadyStateDetector_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py index 59c749da30..a1ddcd56c0 100644 --- a/nipype/algorithms/tests/test_auto_P2PDistance.py +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -4,7 +4,8 @@ def test_P2PDistance_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), metric=dict(usedefault=True, diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py index 990b71e289..11b84f8e8a 100644 --- a/nipype/algorithms/tests/test_auto_PickAtlas.py +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -10,7 +10,8 @@ def test_PickAtlas_inputs(): ), hemi=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), labels=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py index 4dce363864..6b90321975 100644 --- a/nipype/algorithms/tests/test_auto_Similarity.py +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -4,7 +4,8 @@ def test_Similarity_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask1=dict(), diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py index 0031f4bb7f..8d4993425e 100644 --- a/nipype/algorithms/tests/test_auto_SimpleThreshold.py +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -4,7 +4,8 @@ def test_SimpleThreshold_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), threshold=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py index 33d5435b5f..11af243c11 100644 --- a/nipype/algorithms/tests/test_auto_SpecifyModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -12,7 +12,8 @@ def test_SpecifyModel_inputs(): ), high_pass_filter_cutoff=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_units=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py index 7a33ac63c4..bea8ee473e 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -14,7 +14,8 @@ def test_SpecifySPMModel_inputs(): ), high_pass_filter_cutoff=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_units=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py index 4caf1c1033..c8e07a292b 100644 --- a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -12,7 +12,8 @@ def test_SpecifySparseModel_inputs(): ), high_pass_filter_cutoff=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_units=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py index 95581bb111..169dcf6d80 100644 --- a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -6,7 +6,8 @@ def test_StimulusCorrelation_inputs(): input_map = dict(concatenated_design=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), intensity_values=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py index 47bb550da3..8f9585f26b 100644 --- a/nipype/algorithms/tests/test_auto_TCompCor.py +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -7,9 +7,14 @@ def test_TCompCor_inputs(): input_map = dict(components_file=dict(usedefault=True, ), header_prefix=dict(), - ignore_exception=dict(nohash=True, + high_pass_cutoff=dict(usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), + ignore_initial_volumes=dict(usedefault=True, + ), mask_files=dict(), mask_index=dict(requires=['mask_files'], xor=['merge_method'], @@ -21,11 +26,16 @@ def test_TCompCor_inputs(): ), percentile_threshold=dict(usedefault=True, ), + pre_filter=dict(usedefault=True, + ), realigned_file=dict(mandatory=True, ), regress_poly_degree=dict(usedefault=True, ), - use_regress_poly=dict(usedefault=True, + repetition_time=dict(), + save_pre_filter=dict(), + use_regress_poly=dict(deprecated='0.15.0', + new_name='pre_filter', ), ) inputs = TCompCor.input_spec() @@ -38,6 +48,7 @@ def test_TCompCor_inputs(): def test_TCompCor_outputs(): output_map = dict(components_file=dict(), high_variance_masks=dict(), + pre_filter_file=dict(), ) outputs = TCompCor.output_spec() diff --git a/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py index d6e38722fe..14e20fd36a 100644 --- a/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py +++ b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py @@ -4,7 +4,8 @@ def test_TVTKBaseInterface_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py index ab59d22cff..d866214fec 100644 --- a/nipype/algorithms/tests/test_auto_WarpPoints.py +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -4,7 +4,8 @@ def test_WarpPoints_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interp=dict(mandatory=True, diff --git a/nipype/algorithms/tests/test_compcor.py b/nipype/algorithms/tests/test_compcor.py index adb495f90f..9407e6ef0d 100644 --- a/nipype/algorithms/tests/test_compcor.py +++ b/nipype/algorithms/tests/test_compcor.py @@ -21,8 +21,7 @@ class TestCompCor(): @pytest.fixture(autouse=True) def setup_class(self, tmpdir): # setup - self.temp_dir = str(tmpdir) - os.chdir(self.temp_dir) + tmpdir.chdir() noise = np.fromfunction(self.fake_noise_fun, self.fake_data.shape) self.realigned_file = utils.save_toy_nii(self.fake_data + noise, self.filenames['functionalnii']) @@ -80,7 +79,7 @@ def test_compcor_no_regress_poly(self): self.run_cc(CompCor(realigned_file=self.realigned_file, mask_files=self.mask_files, mask_index=0, - use_regress_poly=False), + pre_filter=False), [['0.4451946442', '-0.7683311482'], ['-0.4285129505', '-0.0926034137'], ['0.5721540256', '0.5608764842'], diff --git a/nipype/algorithms/tests/test_confounds.py b/nipype/algorithms/tests/test_confounds.py index 7d6eff1283..8c2626457e 100644 --- a/nipype/algorithms/tests/test_confounds.py +++ b/nipype/algorithms/tests/test_confounds.py @@ -20,7 +20,7 @@ def test_fd(tmpdir): - tempdir = str(tmpdir) + tempdir = tmpdir.strpath ground_truth = np.loadtxt(example_data('fsl_motion_outliers_fd.txt')) fdisplacement = FramewiseDisplacement(in_file=example_data('fsl_mcflirt_movpar.txt'), out_file=tempdir + '/fd.txt', @@ -43,7 +43,7 @@ def test_dvars(tmpdir): in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'), save_all=True, intensity_normalization=0) - os.chdir(str(tmpdir)) + tmpdir.chdir() res = dvars.run() dv1 = np.loadtxt(res.outputs.out_all, skiprows=1) @@ -66,7 +66,8 @@ def test_dvars(tmpdir): assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 -def test_outliers(tmpdir): + +def test_outliers(): np.random.seed(0) in_data = np.random.randn(100) in_data[0] += 10 diff --git a/nipype/algorithms/tests/test_errormap.py b/nipype/algorithms/tests/test_errormap.py index a700725e41..4b40d14907 100644 --- a/nipype/algorithms/tests/test_errormap.py +++ b/nipype/algorithms/tests/test_errormap.py @@ -11,7 +11,6 @@ def test_errormap(tmpdir): - tempdir = str(tmpdir) # Single-Spectual # Make two fake 2*2*2 voxel volumes volume1 = np.array([[[2.0, 8.0], [1.0, 2.0]], [[1.0, 9.0], [0.0, 3.0]]]) # John von Neumann's birthday @@ -22,15 +21,15 @@ def test_errormap(tmpdir): img2 = nb.Nifti1Image(volume2, np.eye(4)) maskimg = nb.Nifti1Image(mask, np.eye(4)) - nb.save(img1, os.path.join(tempdir, 'von.nii.gz')) - nb.save(img2, os.path.join(tempdir, 'alan.nii.gz')) - nb.save(maskimg, os.path.join(tempdir, 'mask.nii.gz')) + nb.save(img1, tmpdir.join('von.nii.gz').strpath) + nb.save(img2, tmpdir.join('alan.nii.gz').strpath) + nb.save(maskimg, tmpdir.join('mask.nii.gz').strpath) # Default metric errmap = ErrorMap() - errmap.inputs.in_tst = os.path.join(tempdir, 'von.nii.gz') - errmap.inputs.in_ref = os.path.join(tempdir, 'alan.nii.gz') - errmap.out_map = os.path.join(tempdir, 'out_map.nii.gz') + errmap.inputs.in_tst = tmpdir.join('von.nii.gz').strpath + errmap.inputs.in_ref = tmpdir.join('alan.nii.gz').strpath + errmap.out_map = tmpdir.join('out_map.nii.gz').strpath result = errmap.run() assert result.outputs.distance == 1.125 @@ -45,7 +44,7 @@ def test_errormap(tmpdir): assert result.outputs.distance == 0.875 # Masked - errmap.inputs.mask = os.path.join(tempdir, 'mask.nii.gz') + errmap.inputs.mask = tmpdir.join('mask.nii.gz').strpath result = errmap.run() assert result.outputs.distance == 1.0 @@ -62,11 +61,11 @@ def test_errormap(tmpdir): msvolume2[:, :, :, 1] = volume1 msimg2 = nb.Nifti1Image(msvolume2, np.eye(4)) - nb.save(msimg1, os.path.join(tempdir, 'von-ray.nii.gz')) - nb.save(msimg2, os.path.join(tempdir, 'alan-ray.nii.gz')) + nb.save(msimg1, tmpdir.join('von-ray.nii.gz').strpath) + nb.save(msimg2, tmpdir.join('alan-ray.nii.gz').strpath) - errmap.inputs.in_tst = os.path.join(tempdir, 'von-ray.nii.gz') - errmap.inputs.in_ref = os.path.join(tempdir, 'alan-ray.nii.gz') + errmap.inputs.in_tst = tmpdir.join('von-ray.nii.gz').strpath + errmap.inputs.in_ref = tmpdir.join('alan-ray.nii.gz').strpath errmap.inputs.metric = 'sqeuclidean' result = errmap.run() assert result.outputs.distance == 5.5 diff --git a/nipype/algorithms/tests/test_mesh_ops.py b/nipype/algorithms/tests/test_mesh_ops.py index fa7ebebe54..d5fbc56825 100644 --- a/nipype/algorithms/tests/test_mesh_ops.py +++ b/nipype/algorithms/tests/test_mesh_ops.py @@ -15,14 +15,13 @@ @pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") def test_ident_distances(tmpdir): - tempdir = str(tmpdir) - os.chdir(tempdir) + tmpdir.chdir() in_surf = example_data('surf01.vtk') dist_ident = m.ComputeMeshWarp() dist_ident.inputs.surface1 = in_surf dist_ident.inputs.surface2 = in_surf - dist_ident.inputs.out_file = os.path.join(tempdir, 'distance.npy') + dist_ident.inputs.out_file = tmpdir.join('distance.npy') res = dist_ident.run() assert res.outputs.distance == 0.0 @@ -33,11 +32,11 @@ def test_ident_distances(tmpdir): @pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") def test_trans_distances(tmpdir): - tempdir = str(tmpdir) + tempdir = tmpdir.strpath from ...interfaces.vtkbase import tvtk in_surf = example_data('surf01.vtk') - warped_surf = os.path.join(tempdir, 'warped.vtk') + warped_surf = tmpdir.join('warped.vtk') inc = np.array([0.7, 0.3, -0.2]) @@ -53,7 +52,7 @@ def test_trans_distances(tmpdir): dist = m.ComputeMeshWarp() dist.inputs.surface1 = in_surf dist.inputs.surface2 = warped_surf - dist.inputs.out_file = os.path.join(tempdir, 'distance.npy') + dist.inputs.out_file = tmpdir.join('distance.npy') res = dist.run() assert np.allclose(res.outputs.distance, np.linalg.norm(inc), 4) dist.inputs.weighting = 'area' @@ -63,14 +62,14 @@ def test_trans_distances(tmpdir): @pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") def test_warppoints(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() # TODO: include regression tests for when tvtk is installed @pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") def test_meshwarpmaths(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() # TODO: include regression tests for when tvtk is installed diff --git a/nipype/algorithms/tests/test_misc.py b/nipype/algorithms/tests/test_misc.py index eda249c88b..d148ee8ca1 100644 --- a/nipype/algorithms/tests/test_misc.py +++ b/nipype/algorithms/tests/test_misc.py @@ -9,6 +9,8 @@ from nipype.algorithms import misc from nipype.utils.filemanip import fname_presuffix from nipype.testing.fixtures import create_analyze_pair_file_in_directory +from nipype.utils import NUMPY_MMAP +from nipype.testing import example_data def test_CreateNifti(create_analyze_pair_file_in_directory): @@ -31,4 +33,16 @@ def test_CreateNifti(create_analyze_pair_file_in_directory): result = create_nifti.run() assert os.path.exists(result.outputs.nifti_file) - assert nb.load(result.outputs.nifti_file) + assert nb.load(result.outputs.nifti_file, mmap=NUMPY_MMAP) + +def test_CalculateMedian(create_analyze_pair_file_in_directory): + + mean = misc.CalculateMedian() + + with pytest.raises(TypeError): mean.run() + + mean.inputs.in_files = example_data('ds003_sub-01_mc.nii.gz') + eg = mean.run() + + assert os.path.exists(eg.outputs.median_files) + assert nb.load(eg.outputs.median_files, mmap=NUMPY_MMAP) diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py index cb10304fea..3c9ec4096b 100644 --- a/nipype/algorithms/tests/test_modelgen.py +++ b/nipype/algorithms/tests/test_modelgen.py @@ -17,9 +17,8 @@ def test_modelgen1(tmpdir): - tempdir = str(tmpdir) - filename1 = os.path.join(tempdir, 'test1.nii') - filename2 = os.path.join(tempdir, 'test2.nii') + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 200), np.eye(4)).to_filename(filename2) s = SpecifyModel() @@ -56,9 +55,8 @@ def test_modelgen1(tmpdir): def test_modelgen_spm_concat(tmpdir): - tempdir = str(tmpdir) - filename1 = os.path.join(tempdir, 'test1.nii') - filename2 = os.path.join(tempdir, 'test2.nii') + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2) @@ -97,7 +95,7 @@ def test_modelgen_spm_concat(tmpdir): npt.assert_almost_equal(np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0])) # Test case for variable number of events in separate runs, sometimes unique. - filename3 = os.path.join(tempdir, 'test3.nii') + filename3 = tmpdir.join('test3.nii').strpath Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename3) s.inputs.functional_runs = [filename1, filename2, filename3] info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]]), @@ -122,9 +120,8 @@ def test_modelgen_spm_concat(tmpdir): def test_modelgen_sparse(tmpdir): - tempdir = str(tmpdir) - filename1 = os.path.join(tempdir, 'test1.nii') - filename2 = os.path.join(tempdir, 'test2.nii') + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1) Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2) s = SpecifySparseModel() diff --git a/nipype/algorithms/tests/test_moments.py b/nipype/algorithms/tests/test_moments.py index 12de44750a..17c8e922b2 100644 --- a/nipype/algorithms/tests/test_moments.py +++ b/nipype/algorithms/tests/test_moments.py @@ -1,10 +1,9 @@ # -*- coding: utf-8 -*- import numpy as np -import tempfile from nipype.algorithms.misc import calc_moments -def test_skew(): +def test_skew(tmpdir): data = """14.62418305 5.916396751 -1.658088086 4.71113546 1.598428608 5.612553811 -5.004056368 -4.057513911 11.16365251 17.32688599 -3.099920667 2.630189741 2.389709914 0.379332731 -0.2899694205 -4.363591482 2.059205599 23.90705054 0.7180462297 -1.976963652 7.487682025 -5.583986129 1.094800525 -2.319858134 @@ -126,13 +125,12 @@ def test_skew(): -0.5057854071 -2.415896554 -9.663571931 -5.714041661 -6.037933426 8.673756933 10.03557773 8.629816199 3.622185659 0.4716627142 -10.92515308 -3.705286841 -2.776089545 2.271920902 9.251504922 5.744980887 """ - with tempfile.NamedTemporaryFile(mode='w', delete=True) as f: - f.write(data) - f.flush() - skewness = calc_moments(f.name, 3) - assert np.allclose(skewness, np.array( - [-0.23418937314622, 0.2946365564954823, -0.05781002053540932, - -0.3512508282578762, - - 0.07035664150233077, - - 0.01935867699166935, - 0.00483863369427428, 0.21879460029850167])) + f = tmpdir.join("filetest") + f.write(data) + skewness = calc_moments(f.strpath, 3) + assert np.allclose(skewness, np.array( + [-0.23418937314622, 0.2946365564954823, -0.05781002053540932, + -0.3512508282578762, - + 0.07035664150233077, - + 0.01935867699166935, + 0.00483863369427428, 0.21879460029850167])) diff --git a/nipype/algorithms/tests/test_normalize_tpms.py b/nipype/algorithms/tests/test_normalize_tpms.py index 19a183bee0..5d0fc5c47b 100644 --- a/nipype/algorithms/tests/test_normalize_tpms.py +++ b/nipype/algorithms/tests/test_normalize_tpms.py @@ -18,7 +18,6 @@ def test_normalize_tpms(tmpdir): - tempdir = str(tmpdir) in_mask = example_data('tpms_msk.nii.gz') mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() @@ -30,8 +29,8 @@ def test_normalize_tpms(tmpdir): for i in range(3): mapname = example_data('tpm_%02d.nii.gz' % i) - filename = os.path.join(tempdir, 'modtpm_%02d.nii.gz' % i) - out_files.append(os.path.join(tempdir, 'normtpm_%02d.nii.gz' % i)) + filename = tmpdir.join('modtpm_%02d.nii.gz' % i).strpath + out_files.append(tmpdir.join('normtpm_%02d.nii.gz' % i).strpath) im = nb.load(mapname, mmap=NUMPY_MMAP) data = im.get_data() diff --git a/nipype/algorithms/tests/test_overlap.py b/nipype/algorithms/tests/test_overlap.py index ab0f564b1a..e0ec5bcfcb 100644 --- a/nipype/algorithms/tests/test_overlap.py +++ b/nipype/algorithms/tests/test_overlap.py @@ -20,7 +20,7 @@ def check_close(val1, val2): in1 = example_data('segmentation0.nii.gz') in2 = example_data('segmentation1.nii.gz') - os.chdir(str(tmpdir)) + tmpdir.chdir() overlap = Overlap() overlap.inputs.volume1 = in1 overlap.inputs.volume2 = in1 diff --git a/nipype/algorithms/tests/test_splitmerge.py b/nipype/algorithms/tests/test_splitmerge.py index e122fef077..d7e98a47ba 100644 --- a/nipype/algorithms/tests/test_splitmerge.py +++ b/nipype/algorithms/tests/test_splitmerge.py @@ -14,13 +14,13 @@ def test_split_and_merge(tmpdir): from nipype.algorithms.misc import split_rois, merge_rois in_mask = example_data('tpms_msk.nii.gz') - dwfile = op.join(str(tmpdir), 'dwi.nii.gz') + dwfile = tmpdir.join('dwi.nii.gz').strpath mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() aff = nb.load(in_mask, mmap=NUMPY_MMAP).affine dwshape = (mskdata.shape[0], mskdata.shape[1], mskdata.shape[2], 6) dwdata = np.random.normal(size=dwshape) - os.chdir(str(tmpdir)) + tmpdir.chdir() nb.Nifti1Image(dwdata.astype(np.float32), aff, None).to_filename(dwfile) diff --git a/nipype/algorithms/tests/test_tsnr.py b/nipype/algorithms/tests/test_tsnr.py index e53ffd2f34..f4bac9a17d 100644 --- a/nipype/algorithms/tests/test_tsnr.py +++ b/nipype/algorithms/tests/test_tsnr.py @@ -30,8 +30,7 @@ class TestTSNR(): @pytest.fixture(autouse=True) def setup_class(self, tmpdir): # setup temp folder - self.temp_dir = str(tmpdir) - os.chdir(self.temp_dir) + tmpdir.chdir() utils.save_toy_nii(self.fake_data, self.in_filenames['in_file']) diff --git a/nipype/caching/tests/test_memory.py b/nipype/caching/tests/test_memory.py index d2968ae3f2..50f56d4700 100644 --- a/nipype/caching/tests/test_memory.py +++ b/nipype/caching/tests/test_memory.py @@ -25,7 +25,7 @@ def test_caching(tmpdir): try: # Prevent rerun to check that evaluation is computed only once config.set('execution', 'stop_on_first_rerun', 'true') - mem = Memory(str(tmpdir)) + mem = Memory(tmpdir.strpath) first_nb_run = nb_runs results = mem.cache(SideEffectInterface)(input1=2, input2=1) assert nb_runs == first_nb_run + 1 diff --git a/nipype/conftest.py b/nipype/conftest.py new file mode 100644 index 0000000000..27a3789ea4 --- /dev/null +++ b/nipype/conftest.py @@ -0,0 +1,12 @@ +import pytest +import numpy, os + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + doctest_namespace['os'] = os + + + filepath = os.path.dirname(os.path.realpath(__file__)) + datadir = os.path.realpath(os.path.join(filepath, 'testing/data')) + doctest_namespace["datadir"] = datadir diff --git a/nipype/external/d3.js b/nipype/external/d3.js index 4577992aee..e1ddb0379e 100644 --- a/nipype/external/d3.js +++ b/nipype/external/d3.js @@ -9252,4 +9252,4 @@ } else { this.d3 = d3; } -}(); \ No newline at end of file +}(); diff --git a/nipype/info.py b/nipype/info.py index bbf2e8b157..8714f99707 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -10,7 +10,7 @@ # full release. '.dev' as a version_extra string means this is a development # version # Remove -dev for release -__version__ = '1.0.0-dev' +__version__ = '0.14.1-dev' def get_nipype_gitversion(): @@ -98,15 +98,16 @@ def get_nipype_gitversion(): # versions NIBABEL_MIN_VERSION = '2.1.0' NETWORKX_MIN_VERSION = '1.9' -NUMPY_MIN_VERSION = '1.8.2' +NUMPY_MIN_VERSION = '1.9.0' SCIPY_MIN_VERSION = '0.14' TRAITS_MIN_VERSION = '4.6' DATEUTIL_MIN_VERSION = '2.2' PYTEST_MIN_VERSION = '3.0' FUTURE_MIN_VERSION = '0.16.0' SIMPLEJSON_MIN_VERSION = '3.8.0' -PROV_MIN_VERSION = '1.5.0' +PROV_VERSION = '1.5.0' CLICK_MIN_VERSION = '6.6.0' +PYDOT_MIN_VERSION = '1.2.3' NAME = 'nipype' MAINTAINER = 'nipype developers' @@ -136,12 +137,13 @@ def get_nipype_gitversion(): 'traits>=%s' % TRAITS_MIN_VERSION, 'future>=%s' % FUTURE_MIN_VERSION, 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, - 'prov>=%s' % PROV_MIN_VERSION, + 'prov==%s' % PROV_VERSION, 'click>=%s' % CLICK_MIN_VERSION, 'funcsigs', 'pytest>=%s' % PYTEST_MIN_VERSION, 'mock', 'pydotplus', + 'pydot>=%s' % PYDOT_MIN_VERSION, 'packaging', ] @@ -154,12 +156,13 @@ def get_nipype_gitversion(): ] EXTRA_REQUIRES = { - 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus'], + 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], 'tests': TESTS_REQUIRES, 'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'], - 'profiler': ['psutil'], + 'profiler': ['psutil>=5.0'], 'duecredit': ['duecredit'], 'xvfbwrapper': ['xvfbwrapper'], + 'pybids' : ['pybids'] # 'mesh': ['mayavi'] # Enable when it works } diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py index 60076eefc8..6a1e7df767 100644 --- a/nipype/interfaces/afni/__init__.py +++ b/nipype/interfaces/afni/__init__.py @@ -8,16 +8,22 @@ """ from .base import Info -from .preprocess import (Allineate, Automask, AutoTcorrelate, +from .preprocess import (AlignEpiAnatPy, Allineate, Automask, + AutoTcorrelate, AutoTLRC, Bandpass, BlurInMask, BlurToFWHM, ClipLevel, DegreeCentrality, Despike, Detrend, ECM, Fim, Fourier, Hist, LFCD, Maskave, Means, OutlierCount, QualityIndex, ROIStats, Retroicor, Seg, SkullStrip, TCorr1D, TCorrMap, TCorrelate, - TShift, Volreg, Warp, QwarpPlusMinus) + TNorm, + TShift, Volreg, Warp, QwarpPlusMinus, Qwarp) from .svm import (SVMTest, SVMTrain) -from .utils import (AFNItoNIFTI, Autobox, BrickStat, Calc, Copy, - Eval, FWHMx, - MaskTool, Merge, Notes, Refit, Resample, TCat, TStat, To3D, - Unifize, ZCutUp, GCOR,) +from .utils import (ABoverlap, AFNItoNIFTI, Autobox, Axialize, BrickStat, + Bucket, Calc, Cat, CatMatvec, CenterMass, Copy, Dot, + Edge3, Eval, FWHMx, MaskTool, Merge, Notes, NwarpApply, + NwarpCat, OneDToolPy, + Refit, Resample, TCat, TCatSubBrick, TStat, To3D, Unifize, + Undump, ZCutUp, GCOR, + Zcat, Zeropad) +from .model import (Deconvolve, Remlfit, Synthesize) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 9fc3696f9a..1097a28d46 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -8,69 +8,46 @@ import os from sys import platform +from distutils import spawn + +from ... import logging, LooseVersion +from ...utils.filemanip import split_filename, fname_presuffix -from ... import logging -from ...utils.filemanip import split_filename from ..base import ( - CommandLine, traits, CommandLineInputSpec, isdefined, File, TraitedSpec) + CommandLine, traits, CommandLineInputSpec, isdefined, File, TraitedSpec, + PackageInfo) from ...external.due import BibTeX # Use nipype's logging system IFLOGGER = logging.getLogger('interface') -class Info(object): +class Info(PackageInfo): """Handle afni output type and version information. """ __outputtype = 'AFNI' ftypes = {'NIFTI': '.nii', 'AFNI': '', 'NIFTI_GZ': '.nii.gz'} + version_cmd = 'afni --version' @staticmethod - def version(): - """Check for afni version on system - - Parameters - ---------- - None - - Returns - ------- - version : str - Version number as string or None if AFNI not found - - """ - try: - clout = CommandLine(command='afni_vcheck', - terminal_output='allatonce').run() - - # Try to parse the version number - currv = clout.runtime.stdout.split('\n')[1].split('=', 1)[1].strip() - except IOError: - # If afni_vcheck is not present, return None - IFLOGGER.warn('afni_vcheck executable not found.') + def parse_version(raw_info): + version_stamp = raw_info.split('\n')[0].split('Version ')[1] + if version_stamp.startswith('AFNI'): + version_stamp = version_stamp.split('AFNI_')[1] + elif version_stamp.startswith('Debian'): + version_stamp = version_stamp.split('Debian-')[1].split('~')[0] + else: return None - except RuntimeError as e: - # If AFNI is outdated, afni_vcheck throws error. - # Show new version, but parse current anyways. - currv = str(e).split('\n')[4].split('=', 1)[1].strip() - nextv = str(e).split('\n')[6].split('=', 1)[1].strip() - IFLOGGER.warn( - 'AFNI is outdated, detected version %s and %s is available.' % (currv, nextv)) - - if currv.startswith('AFNI_'): - currv = currv[5:] - - v = currv.split('.') - try: - v = [int(n) for n in v] - except ValueError: - return currv - return tuple(v) + + version = LooseVersion(version_stamp.replace('_', '.')).version[:3] + if version[0] < 1000: + version[0] = version[0] + 2000 + return tuple(version) @classmethod - def outputtype_to_ext(cls, outputtype): + def output_type_to_ext(cls, outputtype): """Get the file extension for the given output type. Parameters @@ -111,6 +88,8 @@ def standard_image(img_name): Could be made more fancy to allow for more relocatability''' clout = CommandLine('which afni', + ignore_exception=True, + resource_monitor=False, terminal_output='allatonce').run() if clout.runtime.returncode is not 0: return None @@ -132,6 +111,8 @@ def _run_interface(self, runtime): class AFNICommandInputSpec(CommandLineInputSpec): + num_threads = traits.Int(1, usedefault=True, nohash=True, + desc='set number of threads') outputtype = traits.Enum('AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') out_file = File(name_template="%s_afni", desc='output image file name', @@ -175,10 +156,36 @@ class AFNICommand(AFNICommandBase): 'tags': ['implementation'], }] + @property + def num_threads(self): + return self.inputs.num_threads + + @num_threads.setter + def num_threads(self, value): + self.inputs.num_threads = value + + @classmethod + def set_default_output_type(cls, outputtype): + """Set the default output type for AFNI classes. + + This method is used to set the default output type for all afni + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.outputtype. + """ + + if outputtype in Info.ftypes: + cls._outputtype = outputtype + else: + raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) + def __init__(self, **inputs): super(AFNICommand, self).__init__(**inputs) self.inputs.on_trait_change(self._output_update, 'outputtype') + if hasattr(self.inputs, 'num_threads'): + self.inputs.on_trait_change(self._nthreads_update, 'num_threads') + if self._outputtype is None: self._outputtype = Info.outputtype() @@ -187,11 +194,9 @@ def __init__(self, **inputs): else: self._output_update() - def _run_interface(self, runtime): - # Update num threads estimate from OMP_NUM_THREADS env var - # Default to 1 if not set - self.inputs.environ['OMP_NUM_THREADS'] = str(self.num_threads) - return super(AFNICommand, self)._run_interface(runtime) + def _nthreads_update(self): + """Update environment with new number of threads""" + self.inputs.environ['OMP_NUM_THREADS'] = '%d' % self.inputs.num_threads def _output_update(self): """ i think? updates class private attribute based on instance input @@ -200,24 +205,9 @@ def _output_update(self): """ self._outputtype = self.inputs.outputtype - @classmethod - def set_default_output_type(cls, outputtype): - """Set the default output type for AFNI classes. - - This method is used to set the default output type for all afni - subclasses. However, setting this will not update the output - type for any existing instances. For these, assign the - .inputs.outputtype. - """ - - if outputtype in Info.ftypes: - cls._outputtype = outputtype - else: - raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) - def _overload_extension(self, value, name=None): path, base, _ = split_filename(value) - return os.path.join(path, base + Info.outputtype_to_ext(self.inputs.outputtype)) + return os.path.join(path, base + Info.output_type_to_ext(self.inputs.outputtype)) def _list_outputs(self): outputs = super(AFNICommand, self)._list_outputs() @@ -231,9 +221,76 @@ def _list_outputs(self): outputs[name] = outputs[name] + "+orig.BRIK" return outputs + def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, + ext=None): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.outputtype) + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix(basename, suffix=suffix, + use_ext=False, newpath=cwd) + return fname + def no_afni(): """ Checks if AFNI is available """ if Info.version() is None: return True return False + + +class AFNIPythonCommandInputSpec(CommandLineInputSpec): + outputtype = traits.Enum('AFNI', list(Info.ftypes.keys()), + desc='AFNI output filetype') + py27_path = traits.Either('python2', File(exists=True), + usedefault=True, + default='python2') + + +class AFNIPythonCommand(AFNICommand): + @property + def cmd(self): + if spawn.find_executable(super(AFNIPythonCommand, self).cmd) is not None: + return spawn.find_executable(super(AFNIPythonCommand, self).cmd) + else: + return super(AFNIPythonCommand, self).cmd + + @property + def cmdline(self): + return "{} {}".format(self.inputs.py27_path, super(AFNIPythonCommand, self).cmdline) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py new file mode 100644 index 0000000000..d5730d15f9 --- /dev/null +++ b/nipype/interfaces/afni/model.py @@ -0,0 +1,670 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft = python sts = 4 ts = 4 sw = 4 et: +"""AFNI modeling interfaces + +Examples +-------- +See the docstrings of the individual classes for examples. + .. testsetup:: + # Change directory to provide relative paths for doctests + >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) + >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) + >>> os.chdir(datadir) +""" +from __future__ import print_function, division, unicode_literals, absolute_import + +import os + +from ..base import ( + CommandLineInputSpec, CommandLine, Directory, TraitedSpec, + traits, isdefined, File, InputMultiPath, Undefined, Str) +from ...external.due import BibTeX + +from .base import ( + AFNICommandBase, AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec) + +class DeconvolveInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File( + exists=True), + desc='filenames of 3D+time input datasets. More than one filename can ' + 'be given and the datasets will be auto-catenated in time. ' + 'You can input a 1D time series file here, but the time axis ' + 'should run along the ROW direction, not the COLUMN direction as ' + 'in the \'input1D\' option.', + argstr='-input %s', + copyfile=False, + sep=" ", + position=0) + sat = traits.Bool( + desc='check the dataset time series for initial saturation transients,' + ' which should normally have been excised before data analysis.', + argstr='-sat', + xor=['trans']) + trans = traits.Bool( + desc='check the dataset time series for initial saturation transients,' + ' which should normally have been excised before data analysis.', + argstr='-trans', + xor=['sat']) + noblock = traits.Bool( + desc='normally, if you input multiple datasets with \'input\', then ' + 'the separate datasets are taken to be separate image runs that ' + 'get separate baseline models. Use this options if you want to ' + 'have the program consider these to be all one big run.' + '* If any of the input dataset has only 1 sub-brick, then this ' + 'option is automatically invoked!' + '* If the auto-catenation feature isn\'t used, then this option ' + 'has no effect, no how, no way.', + argstr='-noblock') + force_TR = traits.Int( + desc='use this value instead of the TR in the \'input\' ' + 'dataset. (It\'s better to fix the input using Refit.)', + argstr='-force_TR %d') + input1D = File( + desc='filename of single (fMRI) .1D time series where time runs down ' + 'the column.', + argstr='-input1D %s', + exists=True) + TR_1D = traits.Float( + desc='TR to use with \'input1D\'. This option has no effect if you do ' + 'not also use \'input1D\'.', + argstr='-TR_1D %f') + legendre = traits.Bool( + desc='use Legendre polynomials for null hypothesis (baseline model)', + argstr='-legendre') + nolegendre = traits.Bool( + desc='use power polynomials for null hypotheses. Don\'t do this ' + 'unless you are crazy!', + argstr='-nolegendre') + nodmbase = traits.Bool( + desc='don\'t de-mean baseline time series', + argstr='-nodmbase') + dmbase = traits.Bool( + desc='de-mean baseline time series (default if \'polort\' >= 0)', + argstr='-dmbase') + svd = traits.Bool( + desc='use SVD instead of Gaussian elimination (default)', + argstr='-svd') + nosvd = traits.Bool( + desc='use Gaussian elimination instead of SVD', + argstr='-nosvd') + rmsmin = traits.Float( + desc='minimum rms error to reject reduced model (default = 0; don\'t ' + 'use this option normally!)', + argstr='-rmsmin %f') + nocond = traits.Bool( + desc='DON\'T calculate matrix condition number', + argstr='-nocond') + singvals = traits.Bool( + desc='print out the matrix singular values', + argstr='-singvals') + goforit = traits.Int( + desc='use this to proceed even if the matrix has bad problems (e.g., ' + 'duplicate columns, large condition number, etc.).', + argstr='-GOFORIT %i') + allzero_OK = traits.Bool( + desc='don\'t consider all zero matrix columns to be the type of error ' + 'that \'gotforit\' is needed to ignore.', + argstr='-allzero_OK') + dname = traits.Tuple( + Str, Str, + desc='set environmental variable to provided value', + argstr='-D%s=%s') + mask = File( + desc='filename of 3D mask dataset; only data time series from within ' + 'the mask will be analyzed; results for voxels outside the mask ' + 'will be set to zero.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + desc='build a mask automatically from input data (will be slow for ' + 'long time series datasets)', + argstr='-automask') + STATmask = File( + desc='build a mask from provided file, and use this mask for the ' + 'purpose of reporting truncation-to float issues AND for ' + 'computing the FDR curves. The actual results ARE not masked ' + 'with this option (only with \'mask\' or \'automask\' options).', + argstr='-STATmask %s', + exists=True) + censor = File( + desc='filename of censor .1D time series. This is a file of 1s and ' + '0s, indicating which time points are to be included (1) and ' + 'which are to be excluded (0).', + argstr='-censor %s', + exists=True) + polort = traits.Int( + desc='degree of polynomial corresponding to the null hypothesis ' + '[default: 1]', + argstr='-polort %d') + ortvec = traits.Tuple( + File( + desc='filename', + exists=True), + Str( + desc='label'), + desc='this option lets you input a rectangular array of 1 or more ' + 'baseline vectors from a file. This method is a fast way to ' + 'include a lot of baseline regressors in one step. ', + argstr='ortvec %s') + x1D = File( + desc='specify name for saved X matrix', + argstr='-x1D %s') + x1D_stop = traits.Bool( + desc='stop running after writing .xmat.1D file', + argstr='-x1D_stop') + cbucket = traits.Str( + desc='Name for dataset in which to save the regression ' + 'coefficients (no statistics). This dataset ' + 'will be used in a -xrestore run [not yet implemented] ' + 'instead of the bucket dataset, if possible.', + argstr='-cbucket %s') + out_file = File( + desc='output statistics file', + argstr='-bucket %s') + jobs = traits.Int( + desc='run the program with provided number of sub-processes', + argstr='-jobs %d') + fout = traits.Bool( + desc='output F-statistic for each stimulus', + argstr='-fout') + rout = traits.Bool( + desc='output the R^2 statistic for each stimulus', + argstr='-rout') + tout = traits.Bool( + desc='output the T-statistic for each stimulus', + argstr='-tout') + vout = traits.Bool( + desc='output the sample variance (MSE) for each stimulus', + argstr='-vout') + global_times = traits.Bool( + desc='use global timing for stimulus timing files', + argstr='-global_times', + xor=['local_times']) + local_times = traits.Bool( + desc='use local timing for stimulus timing files', + argstr='-local_times', + xor=['global_times']) + num_stimts = traits.Int( + desc='number of stimulus timing files', + argstr='-num_stimts %d', + position=-6) + stim_times = traits.List( + traits.Tuple(traits.Int(desc='k-th response model'), + File(desc='stimulus timing file',exists=True), + Str(desc='model')), + desc='generate a response model from a set of stimulus times' + ' given in file.', + argstr='-stim_times %d %s \'%s\'...', + position=-5) + stim_label = traits.List( + traits.Tuple(traits.Int(desc='k-th input stimulus'), + Str(desc='stimulus label')), + desc='label for kth input stimulus (e.g., Label1)', + argstr='-stim_label %d %s...', + requires=['stim_times'], + position=-4) + stim_times_subtract = traits.Float( + desc='this option means to subtract specified seconds from each time ' + 'encountered in any \'stim_times\' option. The purpose of this ' + 'option is to make it simple to adjust timing files for the ' + 'removal of images from the start of each imaging run.', + argstr='-stim_times_subtract %f') + num_glt = traits.Int( + desc='number of general linear tests (i.e., contrasts)', + argstr='-num_glt %d', + position=-3) + gltsym = traits.List( + Str(desc='symbolic general linear test'), + desc='general linear tests (i.e., contrasts) using symbolic ' + 'conventions (e.g., \'+Label1 -Label2\')', + argstr='-gltsym \'SYM: %s\'...', + position=-2) + glt_label = traits.List( + traits.Tuple(traits.Int(desc='k-th general linear test'), + Str(desc='GLT label')), + desc='general linear test (i.e., contrast) labels', + argstr='-glt_label %d %s...', + requires=['gltsym'], + position=-1) + + +class DeconvolveOutputSpec(TraitedSpec): + out_file = File( + desc='output statistics file', exists=True) + reml_script = File( + desc='automatical generated script to run 3dREMLfit', exists=True) + x1D = File( + desc='save out X matrix', exists=True) + cbucket = File( + desc='output regression coefficients file (if generated)') + + +class Deconvolve(AFNICommand): + """Performs OLS regression given a 4D neuroimage file and stimulus timings + + For complete details, see the `3dDeconvolve Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> deconvolve = afni.Deconvolve() + >>> deconvolve.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> deconvolve.inputs.out_file = 'output.nii' + >>> deconvolve.inputs.x1D = 'output.1D' + >>> stim_times = [(1, 'timeseries.txt', 'SPMG1(4)')] + >>> deconvolve.inputs.stim_times = stim_times + >>> deconvolve.inputs.stim_label = [(1, 'Houses')] + >>> deconvolve.inputs.gltsym = ['SYM: +Houses'] + >>> deconvolve.inputs.glt_label = [(1, 'Houses')] + >>> deconvolve.cmdline + "3dDeconvolve -input functional.nii functional2.nii -bucket output.nii -x1D output.1D -num_stimts 1 -stim_times 1 timeseries.txt 'SPMG1(4)' -stim_label 1 Houses -num_glt 1 -gltsym 'SYM: +Houses' -glt_label 1 Houses" + >>> res = deconvolve.run() # doctest: +SKIP + """ + + _cmd = '3dDeconvolve' + input_spec = DeconvolveInputSpec + output_spec = DeconvolveOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'gltsym': + for n, val in enumerate(value): + if val.startswith('SYM: '): + value[n] = val.lstrip('SYM: ') + + return super(Deconvolve,self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + if len(self.inputs.stim_times) and not isdefined(self.inputs.num_stimts): + self.inputs.num_stimts = len(self.inputs.stim_times) + if len(self.inputs.gltsym) and not isdefined(self.inputs.num_glt): + self.inputs.num_glt = len(self.inputs.gltsym) + if not isdefined(self.inputs.out_file): + self.inputs.out_file = 'Decon.nii' + + return super(Deconvolve, self)._parse_inputs(skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + + _gen_fname_opts = {} + _gen_fname_opts['basename'] = self.inputs.out_file + _gen_fname_opts['cwd'] = os.getcwd() + + if isdefined(self.inputs.x1D): + if not self.inputs.x1D.endswith('.xmat.1D'): + outputs['x1D'] = os.path.abspath(self.inputs.x1D + '.xmat.1D') + else: + outputs['x1D'] = os.path.abspath(self.inputs.x1D) + else: + outputs['x1D'] = self._gen_fname(suffix='.xmat.1D', **_gen_fname_opts) + + outputs['reml_script'] = self._gen_fname(suffix='.REML_cmd', **_gen_fname_opts) + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + + return outputs + + +class RemlfitInputSpec(AFNICommandInputSpec): + # mandatory files + in_files = InputMultiPath( + File( + exists=True), + desc='Read time series dataset', + argstr='-input "%s"', + mandatory=True, + copyfile=False, + sep=" ") + matrix = File( + desc='the design matrix file, which should have been output from ' + 'Deconvolve via the \'x1D\' option', + argstr='-matrix %s', + mandatory=True) + # "Semi-Hidden Alternative Ways to Define the Matrix" + polort = traits.Int( + desc='if no \'matrix\' option is given, AND no \'matim\' option, ' + 'create a matrix with Legendre polynomial regressors' + 'up to the specified order. The default value is 0, which' + 'produces a matrix with a single column of all ones', + argstr='-polort %d', + xor=['matrix']) + matim = traits.File( + desc='read a standard file as the matrix. You can use only Col as ' + 'a name in GLTs with these nonstandard matrix input methods, ' + 'since the other names come from the \'matrix\' file. ' + 'These mutually exclusive options are ignored if \'matrix\' ' + 'is used.', + argstr='-matim %s', + xor=['matrix']) + # Other arguments + mask = File( + desc='filename of 3D mask dataset; only data time series from within ' + 'the mask will be analyzed; results for voxels outside the mask ' + 'will be set to zero.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + usedefault=True, + argstr='-automask', + desc='build a mask automatically from input data (will be slow for ' + 'long time series datasets)') + STATmask = File( + desc='filename of 3D mask dataset to be used for the purpose ' + 'of reporting truncation-to float issues AND for computing the ' + 'FDR curves. The actual results ARE not masked with this option ' + '(only with \'mask\' or \'automask\' options).', + argstr='-STATmask %s', + exists=True) + addbase = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='file(s) to add baseline model columns to the matrix with this ' + 'option. Each column in the specified file(s) will be appended ' + 'to the matrix. File(s) must have at least as many rows as the ' + 'matrix does.', + copyfile=False, + sep=" ", + argstr='-addbase %s') + slibase = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='similar to \'addbase\' in concept, BUT each specified file ' + 'must have an integer multiple of the number of slices ' + 'in the input dataset(s); then, separate regression ' + 'matrices are generated for each slice, with the ' + 'first column of the file appended to the matrix for ' + 'the first slice of the dataset, the second column of the file ' + 'appended to the matrix for the first slice of the dataset, ' + 'and so on. Intended to help model physiological noise in FMRI, ' + 'or other effects you want to regress out that might ' + 'change significantly in the inter-slice time intervals. This ' + 'will slow the program down, and make it use a lot more memory ' + '(to hold all the matrix stuff).', + argstr='-slibase %s') + slibase_sm = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='similar to \'slibase\', BUT each file much be in slice major ' + 'order (i.e. all slice0 columns come first, then all slice1 ' + 'columns, etc).', + argstr='-slibase_sm %s') + usetemp = traits.Bool( + desc='write intermediate stuff to disk, to economize on RAM. ' + 'Using this option might be necessary to run with ' + '\'slibase\' and with \'Grid\' values above the default, ' + 'since the program has to store a large number of ' + 'matrices for such a problem: two for every slice and ' + 'for every (a,b) pair in the ARMA parameter grid. Temporary ' + 'files are written to the directory given in environment ' + 'variable TMPDIR, or in /tmp, or in ./ (preference is in that ' + 'order)', + argstr='-usetemp') + nodmbase = traits.Bool( + desc='by default, baseline columns added to the matrix via ' + '\'addbase\' or \'slibase\' or \'dsort\' will each have their ' + 'mean removed (as is done in Deconvolve); this option turns this ' + 'centering off', + argstr='-nodmbase', + requires=['addbase','dsort']) + dsort = File( + desc='4D dataset to be used as voxelwise baseline regressor', + exists=True, + copyfile=False, + argstr='-dsort %s') + dsort_nods = traits.Bool( + desc='if \'dsort\' option is used, this command will output ' + 'additional results files excluding the \'dsort\' file', + argstr='-dsort_nods', + requires=['dsort']) + fout = traits.Bool( + desc='output F-statistic for each stimulus', + argstr='-fout') + rout = traits.Bool( + desc='output the R^2 statistic for each stimulus', + argstr='-rout') + tout = traits.Bool( + desc='output the T-statistic for each stimulus; if you use ' + '\'out_file\' and do not give any of \'fout\', \'tout\',' + 'or \'rout\', then the program assumes \'fout\' is activated.', + argstr='-tout') + nofdr = traits.Bool( + desc='do NOT add FDR curve data to bucket datasets; FDR curves can ' + 'take a long time if \'tout\' is used', + argstr='-noFDR') + nobout = traits.Bool( + desc='do NOT add baseline (null hypothesis) regressor betas ' + 'to the \'rbeta_file\' and/or \'obeta_file\' output datasets.', + argstr='-nobout') + gltsym = traits.List( + traits.Either(traits.Tuple(File(exists=True), Str()), + traits.Tuple(Str(), Str())), + desc='read a symbolic GLT from input file and associate it with a ' + 'label. As in Deconvolve, you can also use the \'SYM:\' method ' + 'to provide the definition of the GLT directly as a string ' + '(e.g., with \'SYM: +Label1 -Label2\'). Unlike Deconvolve, you ' + 'MUST specify \'SYM: \' if providing the GLT directly as a ' + 'string instead of from a file', + argstr='-gltsym "%s" %s...') + out_file = File( + desc='output dataset for beta + statistics from the REML estimation; ' + 'also contains the results of any GLT analysis requested ' + 'in the Deconvolve setup, similar to the \'bucket\' output ' + 'from Deconvolve. This dataset does NOT get the betas ' + '(or statistics) of those regressors marked as \'baseline\' ' + 'in the matrix file.', + argstr='-Rbuck %s') + var_file = File( + desc='output dataset for REML variance parameters', + argstr='-Rvar %s') + rbeta_file = File( + desc='output dataset for beta weights from the REML estimation, ' + 'similar to the \'cbucket\' output from Deconvolve. This dataset ' + 'will contain all the beta weights, for baseline and stimulus ' + 'regressors alike, unless the \'-nobout\' option is given -- ' + 'in that case, this dataset will only get the betas for the ' + 'stimulus regressors.', + argstr='-Rbeta %s') + glt_file = File( + desc='output dataset for beta + statistics from the REML estimation, ' + 'but ONLY for the GLTs added on the REMLfit command line itself ' + 'via \'gltsym\'; GLTs from Deconvolve\'s command line will NOT ' + 'be included.', + argstr='-Rglt %s') + fitts_file = File( + desc='ouput dataset for REML fitted model', + argstr='-Rfitts %s') + errts_file = File( + desc='output dataset for REML residuals = data - fitted model', + argstr='-Rerrts %s') + wherr_file = File( + desc='dataset for REML residual, whitened using the estimated ' + 'ARMA(1,1) correlation matrix of the noise', + argstr='-Rwherr %s') + quiet = traits.Bool( + desc='turn off most progress messages', + argstr='-quiet') + verb = traits.Bool( + desc='turns on more progress messages, including memory usage ' + 'progress reports at various stages', + argstr='-verb') + ovar = File( + desc='dataset for OLSQ st.dev. parameter (kind of boring)', + argstr='-Ovar %s') + obeta = File( + desc='dataset for beta weights from the OLSQ estimation', + argstr='-Obeta %s') + obuck = File( + desc='dataset for beta + statistics from the OLSQ estimation', + argstr='-Obuck %s') + oglt = File( + desc='dataset for beta + statistics from \'gltsym\' options', + argstr='-Oglt %s') + ofitts = File( + desc='dataset for OLSQ fitted model', + argstr='-Ofitts %s') + oerrts = File( + desc='dataset for OLSQ residuals (data - fitted model)', + argstr='-Oerrts %s') + + +class RemlfitOutputSpec(AFNICommandOutputSpec): + out_file = File( + desc='dataset for beta + statistics from the REML estimation (if ' + 'generated') + var_file = File( + desc='dataset for REML variance parameters (if generated)') + rbeta_file = File( + desc='dataset for beta weights from the REML estimation (if ' + 'generated)') + rbeta_file = File( + desc='output dataset for beta weights from the REML estimation (if ' + 'generated') + glt_file = File( + desc='output dataset for beta + statistics from the REML estimation, ' + 'but ONLY for the GLTs added on the REMLfit command ' + 'line itself via \'gltsym\' (if generated)') + fitts_file = File( + desc='ouput dataset for REML fitted model (if generated)') + errts_file = File( + desc='output dataset for REML residuals = data - fitted model (if ' + 'generated') + wherr_file = File( + desc='dataset for REML residual, whitened using the estimated ' + 'ARMA(1,1) correlation matrix of the noise (if generated)') + ovar = File( + desc='dataset for OLSQ st.dev. parameter (if generated)') + obeta = File( + desc='dataset for beta weights from the OLSQ estimation (if ' + 'generated)') + obuck = File( + desc='dataset for beta + statistics from the OLSQ estimation (if ' + 'generated)') + oglt = File( + desc='dataset for beta + statistics from \'gltsym\' options (if ' + 'generated') + ofitts = File( + desc='dataset for OLSQ fitted model (if generated)') + oerrts = File( + desc='dataset for OLSQ residuals = data - fitted model (if ' + 'generated') + + +class Remlfit(AFNICommand): + """Performs Generalized least squares time series fit with Restricted + Maximum Likelihood (REML) estimation of the temporal auto-correlation + structure. + + For complete details, see the `3dREMLfit Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> remlfit = afni.Remlfit() + >>> remlfit.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> remlfit.inputs.out_file = 'output.nii' + >>> remlfit.inputs.matrix = 'output.1D' + >>> remlfit.inputs.gltsym = [('SYM: +Lab1 -Lab2', 'TestSYM'), ('timeseries.txt', 'TestFile')] + >>> remlfit.cmdline + '3dREMLfit -gltsym "SYM: +Lab1 -Lab2" TestSYM -gltsym "timeseries.txt" TestFile -input "functional.nii functional2.nii" -matrix output.1D -Rbuck output.nii' + >>> res = remlfit.run() # doctest: +SKIP + """ + + _cmd = '3dREMLfit' + input_spec = RemlfitInputSpec + output_spec = RemlfitOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + return super(Remlfit, self)._parse_inputs(skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + + for key in outputs.keys(): + if isdefined(self.inputs.get()[key]): + outputs[key] = os.path.abspath(self.inputs.get()[key]) + + return outputs + + +class SynthesizeInputSpec(AFNICommandInputSpec): + cbucket = File( + desc='Read the dataset output from ' + '3dDeconvolve via the \'-cbucket\' option.', + argstr='-cbucket %s', + copyfile=False, + mandatory=True) + matrix = File( + desc='Read the matrix output from ' + '3dDeconvolve via the \'-x1D\' option.', + argstr='-matrix %s', + copyfile=False, + mandatory=True) + select = traits.List( + Str(desc='selected columns to synthesize'), + argstr='-select %s', + desc='A list of selected columns from the matrix (and the ' + 'corresponding coefficient sub-bricks from the ' + 'cbucket). Valid types include \'baseline\', ' + ' \'polort\', \'allfunc\', \'allstim\', \'all\', ' + 'Can also provide \'something\' where something matches ' + 'a stim_label from 3dDeconvolve, and \'digits\' where digits ' + 'are the numbers of the select matrix columns by ' + 'numbers (starting at 0), or number ranges of the form ' + '\'3..7\' and \'3-7\'.', + mandatory=True) + out_file = File( + name_template='syn', + desc='output dataset prefix name (default \'syn\')', + argstr='-prefix %s') + dry_run = traits.Bool( + desc='Don\'t compute the output, just ' + 'check the inputs.', + argstr='-dry') + TR = traits.Float( + desc='TR to set in the output. The default value of ' + 'TR is read from the header of the matrix file.', + argstr='-TR %f') + cenfill = traits.Enum( + 'zero','nbhr','none', + argstr='-cenfill %s', + desc='Determines how censored time points from the ' + '3dDeconvolve run will be filled. Valid types ' + 'are \'zero\', \'nbhr\' and \'none\'.') + + +class Synthesize(AFNICommand): + """Reads a '-cbucket' dataset and a '.xmat.1D' matrix from 3dDeconvolve, + and synthesizes a fit dataset using user-selected sub-bricks and + matrix columns. + + For complete details, see the `3dSynthesize Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> synthesize = afni.Synthesize() + >>> synthesize.inputs.cbucket = 'functional.nii' + >>> synthesize.inputs.matrix = 'output.1D' + >>> synthesize.inputs.select = ['baseline'] + >>> synthesize.cmdline + '3dSynthesize -cbucket functional.nii -matrix output.1D -select baseline' + >>> syn = synthesize.run() # doctest: +SKIP + """ + + _cmd = '3dSynthesize' + input_spec = SynthesizeInputSpec + output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index 5fd1ab0f21..5d90591953 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -15,14 +15,15 @@ import os import os.path as op -from ...utils.filemanip import (load_json, save_json, split_filename) +from ...utils.filemanip import (load_json, save_json, split_filename, + fname_presuffix) from ..base import ( CommandLineInputSpec, CommandLine, TraitedSpec, traits, isdefined, File, InputMultiPath, Undefined, Str) from .base import ( AFNICommandBase, AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec, - Info, no_afni) + AFNIPythonCommandInputSpec, AFNIPythonCommand, Info, no_afni) class CentralityInputSpec(AFNICommandInputSpec): @@ -46,12 +47,169 @@ class CentralityInputSpec(AFNICommandInputSpec): desc='Mask the dataset to target brain-only voxels', argstr='-automask') +class AlignEpiAnatPyInputSpec(AFNIPythonCommandInputSpec): + in_file = File( + desc='EPI dataset to align', + argstr='-epi %s', + mandatory=True, + exists=True, + copyfile=False) + anat = File( + desc='name of structural dataset', + argstr='-anat %s', + mandatory=True, + exists=True, + copyfile=False) + epi_base = traits.Either( + traits.Range(low=0), + traits.Enum('mean', 'median', 'max'), + desc='the epi base used in alignment' + 'should be one of (0/mean/median/max/subbrick#)', + mandatory=True, + argstr='-epi_base %s') + anat2epi = traits.Bool( + desc='align anatomical to EPI dataset (default)', + argstr='-anat2epi') + epi2anat = traits.Bool( + desc='align EPI to anatomical dataset', + argstr='-epi2anat') + save_skullstrip = traits.Bool( + desc='save skull-stripped (not aligned)', + argstr='-save_skullstrip') + suffix = traits.Str( + '_al', + desc='append suffix to the original anat/epi dataset to use' + 'in the resulting dataset names (default is "_al")', + usedefault=True, + argstr='-suffix %s') + epi_strip = traits.Enum( + ('3dSkullStrip', '3dAutomask', 'None'), + desc='method to mask brain in EPI data' + 'should be one of[3dSkullStrip]/3dAutomask/None)', + argstr='-epi_strip %s') + volreg = traits.Enum( + 'on', 'off', + usedefault=True, + desc='do volume registration on EPI dataset before alignment' + 'should be \'on\' or \'off\', defaults to \'on\'', + argstr='-volreg %s') + tshift = traits.Enum( + 'on', 'off', + usedefault=True, + desc='do time shifting of EPI dataset before alignment' + 'should be \'on\' or \'off\', defaults to \'on\'', + argstr='-tshift %s') + + +class AlignEpiAnatPyOutputSpec(TraitedSpec): + anat_al_orig = File( + desc="A version of the anatomy that is aligned to the EPI") + epi_al_orig = File( + desc="A version of the EPI dataset aligned to the anatomy") + epi_tlrc_al = File( + desc="A version of the EPI dataset aligned to a standard template") + anat_al_mat = File( + desc="matrix to align anatomy to the EPI") + epi_al_mat = File( + desc="matrix to align EPI to anatomy") + epi_vr_al_mat = File( + desc="matrix to volume register EPI") + epi_reg_al_mat = File( + desc="matrix to volume register and align epi to anatomy") + epi_al_tlrc_mat = File( + desc="matrix to volume register and align epi" + "to anatomy and put into standard space") + epi_vr_motion = File( + desc="motion parameters from EPI time-series" + "registration (tsh included in name if slice" + "timing correction is also included).") + skullstrip = File( + desc="skull-stripped (not aligned) volume") + +class AlignEpiAnatPy(AFNIPythonCommand): + """Align EPI to anatomical datasets or vice versa + This Python script computes the alignment between two datasets, typically + an EPI and an anatomical structural dataset, and applies the resulting + transformation to one or the other to bring them into alignment. + + This script computes the transforms needed to align EPI and + anatomical datasets using a cost function designed for this purpose. The + script combines multiple transformations, thereby minimizing the amount of + interpolation applied to the data. + + Basic Usage: + align_epi_anat.py -anat anat+orig -epi epi+orig -epi_base 5 + + The user must provide EPI and anatomical datasets and specify the EPI + sub-brick to use as a base in the alignment. + + Internally, the script always aligns the anatomical to the EPI dataset, + and the resulting transformation is saved to a 1D file. + As a user option, the inverse of this transformation may be applied to the + EPI dataset in order to align it to the anatomical data instead. + + This program generates several kinds of output in the form of datasets + and transformation matrices which can be applied to other datasets if + needed. Time-series volume registration, oblique data transformations and + Talairach (standard template) transformations will be combined as needed + and requested (with options to turn on and off each of the steps) in + order to create the aligned datasets. + + For complete details, see the `align_epi_anat.py' Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> al_ea = afni.AlignEpiAnatPy() + >>> al_ea.inputs.anat = "structural.nii" + >>> al_ea.inputs.in_file = "functional.nii" + >>> al_ea.inputs.epi_base = 0 + >>> al_ea.inputs.epi_strip = '3dAutomask' + >>> al_ea.inputs.volreg = 'off' + >>> al_ea.inputs.tshift = 'off' + >>> al_ea.inputs.save_skullstrip = True + >>> al_ea.cmdline # doctest: +ELLIPSIS + 'python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off' + >>> res = allineate.run() # doctest: +SKIP + """ + _cmd = 'align_epi_anat.py' + input_spec = AlignEpiAnatPyInputSpec + output_spec = AlignEpiAnatPyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + anat_prefix = ''.join(self._gen_fname(self.inputs.anat).split('+')[:-1]) + epi_prefix = ''.join(self._gen_fname(self.inputs.in_file).split('+')[:-1]) + outputtype = self.inputs.outputtype + if outputtype == 'AFNI': + ext = '.HEAD' + else: + Info.output_type_to_ext(outputtype) + matext = '.1D' + suffix = self.inputs.suffix + if self.inputs.anat2epi: + outputs['anat_al_orig'] = self._gen_fname(anat_prefix, suffix=suffix+'+orig', ext=ext) + outputs['anat_al_mat'] = self._gen_fname(anat_prefix, suffix=suffix+'_mat.aff12', ext=matext) + if self.inputs.epi2anat: + outputs['epi_al_orig'] = self._gen_fname(epi_prefix, suffix=suffix+'+orig', ext=ext) + outputs['epi_al_mat'] = self._gen_fname(epi_prefix, suffix=suffix+'_mat.aff12', ext=matext) + if self.inputs.volreg == 'on': + outputs['epi_vr_al_mat'] = self._gen_fname(epi_prefix, suffix='_vr'+suffix+'_mat.aff12', ext=matext) + if self.inputs.tshift == 'on': + outputs['epi_vr_motion'] = self._gen_fname(epi_prefix, suffix='tsh_vr_motion', ext=matext) + elif self.inputs.tshift == 'off': + outputs['epi_vr_motion'] = self._gen_fname(epi_prefix, suffix='vr_motion', ext=matext) + if self.inputs.volreg == 'on' and self.inputs.epi2anat: + outputs['epi_reg_al_mat'] = self._gen_fname(epi_prefix, suffix='_reg'+suffix+'_mat.aff12', ext=matext) + if self.inputs.save_skullstrip: + outputs.skullstrip = self._gen_fname(anat_prefix, suffix='_ns'+'+orig', ext=ext) + return outputs class AllineateInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dAllineate', argstr='-source %s', - position=-1, mandatory=True, exists=True, copyfile=False) @@ -63,25 +221,37 @@ class AllineateInputSpec(AFNICommandInputSpec): out_file = File( desc='output file from 3dAllineate', argstr='-prefix %s', - position=-2, - name_source='%s_allineate', - genfile=True) + genfile=True, + xor=['allcostx']) out_param_file = File( argstr='-1Dparam_save %s', - desc='Save the warp parameters in ASCII (.1D) format.') + desc='Save the warp parameters in ASCII (.1D) format.', + xor=['in_param_file','allcostx']) in_param_file = File( exists=True, argstr='-1Dparam_apply %s', desc='Read warp parameters from file and apply them to ' - 'the source dataset, and produce a new dataset') + 'the source dataset, and produce a new dataset', + xor=['out_param_file']) out_matrix = File( argstr='-1Dmatrix_save %s', - desc='Save the transformation matrix for each volume.') + desc='Save the transformation matrix for each volume.', + xor=['in_matrix','allcostx']) in_matrix = File( desc='matrix to align input file', argstr='-1Dmatrix_apply %s', - position=-3) - + position=-3, + xor=['out_matrix']) + overwrite = traits.Bool( + desc='overwrite output file if it already exists', + argstr='-overwrite') + + allcostx= File( + desc='Compute and print ALL available cost functionals for the un-warped inputs' + 'AND THEN QUIT. If you use this option none of the other expected outputs will be produced', + argstr='-allcostx |& tee %s', + position=-1, + xor=['out_file', 'out_matrix', 'out_param_file', 'out_weight_file']) _cost_funcs = [ 'leastsq', 'ls', 'mutualinfo', 'mi', @@ -144,7 +314,7 @@ class AllineateInputSpec(AFNICommandInputSpec): desc='Use a two pass alignment strategy for all volumes, searching ' 'for a large rotation+shift and then refining the alignment.') two_blur = traits.Float( - argstr='-twoblur', + argstr='-twoblur %f', desc='Set the blurring radius for the first pass in mm.') two_first = traits.Bool( argstr='-twofirst', @@ -187,12 +357,21 @@ class AllineateInputSpec(AFNICommandInputSpec): weight_file = File( argstr='-weight %s', exists=True, + deprecated='1.0.0', new_name='weight', desc='Set the weighting for each voxel in the base dataset; ' 'larger weights mean that voxel count more in the cost function. ' 'Must be defined on the same grid as the base dataset') + weight = traits.Either( + File(exists=True), traits.Float(), + argstr='-weight %s', + desc='Set the weighting for each voxel in the base dataset; ' + 'larger weights mean that voxel count more in the cost function. ' + 'If an image file is given, the volume must be defined on the ' + 'same grid as the base dataset') out_weight_file = traits.File( argstr='-wtprefix %s', - desc='Write the weight volume to disk as a dataset') + desc='Write the weight volume to disk as a dataset', + xor=['allcostx']) source_mask = File( exists=True, argstr='-source_mask %s', @@ -222,6 +401,18 @@ class AllineateInputSpec(AFNICommandInputSpec): 'EPI slices, and the base as comprising anatomically ' '\'true\' images. Only phase-encoding direction image ' 'shearing and scaling will be allowed with this option.') + maxrot = traits.Float( + argstr='-maxrot %f', + desc='Maximum allowed rotation in degrees.') + maxshf = traits.Float( + argstr='-maxshf %f', + desc='Maximum allowed shift in mm.') + maxscl = traits.Float( + argstr='-maxscl %f', + desc='Maximum allowed scaling factor.') + maxshr = traits.Float( + argstr='-maxshr %f', + desc='Maximum allowed shearing factor.') master = File( exists=True, argstr='-master %s', @@ -247,11 +438,20 @@ class AllineateInputSpec(AFNICommandInputSpec): traits.Enum(*_dirs), argstr='-nwarp_fixdep%s', desc='To fix non-linear warp dependency along directions.') + verbose = traits.Bool( + argstr='-verb', + desc='Print out verbose progress reports.') + quiet = traits.Bool( + argstr='-quiet', + desc="Don't print out verbose progress reports.") class AllineateOutputSpec(TraitedSpec): - out_file = File(desc='output image file name') - matrix = File(desc='matrix to align input file') + out_file = File(exists=True, desc='output image file name') + out_matrix = File(exists=True, desc='matrix to align input file') + out_param_file = File(exists=True, desc='warp parameters') + out_weight_file = File(exists=True, desc='weight volume') + allcostx = File(desc='Compute and print ALL available cost functionals for the un-warped inputs') class Allineate(AFNICommand): @@ -268,10 +468,18 @@ class Allineate(AFNICommand): >>> allineate.inputs.in_file = 'functional.nii' >>> allineate.inputs.out_file = 'functional_allineate.nii' >>> allineate.inputs.in_matrix = 'cmatrix.mat' - >>> allineate.cmdline # doctest: +ALLOW_UNICODE - '3dAllineate -1Dmatrix_apply cmatrix.mat -prefix functional_allineate.nii -source functional.nii' + >>> allineate.cmdline + '3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat' >>> res = allineate.run() # doctest: +SKIP + >>> from nipype.interfaces import afni + >>> allineate = afni.Allineate() + >>> allineate.inputs.in_file = 'functional.nii' + >>> allineate.inputs.reference = 'structural.nii' + >>> allineate.inputs.allcostx = 'out.allcostX.txt' + >>> allineate.cmdline + '3dAllineate -source functional.nii -base structural.nii -allcostx |& tee out.allcostX.txt' + >>> res = allineate.run() # doctest: +SKIP """ _cmd = '3dAllineate' @@ -286,20 +494,38 @@ def _format_arg(self, name, trait_spec, value): def _list_outputs(self): outputs = self.output_spec().get() - if not isdefined(self.inputs.out_file): - outputs['out_file'] = self._gen_filename(self.inputs.in_file, - suffix=self.inputs.suffix) - else: - outputs['out_file'] = os.path.abspath(self.inputs.out_file) - if isdefined(self.inputs.out_matrix): - outputs['matrix'] = os.path.abspath(os.path.join(os.getcwd(),\ - self.inputs.out_matrix +'.aff12.1D')) + if self.inputs.out_file: + outputs['out_file'] = op.abspath(self.inputs.out_file) + + if self.inputs.out_weight_file: + outputs['out_weight_file'] = op.abspath(self.inputs.out_weight_file) + + if self.inputs.out_matrix: + path, base, ext = split_filename(self.inputs.out_matrix) + if ext.lower() not in ['.1d', '.1D']: + outputs['out_matrix'] = self._gen_fname(self.inputs.out_matrix, + suffix='.aff12.1D') + else: + outputs['out_matrix'] = op.abspath(self.inputs.out_matrix) + + if self.inputs.out_param_file: + path, base, ext = split_filename(self.inputs.out_param_file) + if ext.lower() not in ['.1d', '.1D']: + outputs['out_param_file'] = self._gen_fname(self.inputs.out_param_file, + suffix='.param.1D') + else: + outputs['out_param_file'] = op.abspath(self.inputs.out_param_file) + + if isdefined(self.inputs.allcostx): + outputs['allcostX'] = os.path.abspath(os.path.join(os.getcwd(), + self.inputs.allcostx)) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] + return None class AutoTcorrelateInputSpec(AFNICommandInputSpec): @@ -354,10 +580,11 @@ class AutoTcorrelate(AFNICommand): >>> corr.inputs.eta2 = True >>> corr.inputs.mask = 'mask.nii' >>> corr.inputs.mask_only_targets = True - >>> corr.cmdline # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> corr.cmdline # doctest: +ELLIPSIS '3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii' >>> res = corr.run() # doctest: +SKIP """ + input_spec = AutoTcorrelateInputSpec output_spec = AFNICommandOutputSpec _cmd = '3dAutoTcorrelate' @@ -422,7 +649,7 @@ class Automask(AFNICommand): >>> automask.inputs.in_file = 'functional.nii' >>> automask.inputs.dilate = 1 >>> automask.inputs.outputtype = 'NIFTI' - >>> automask.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> automask.cmdline # doctest: +ELLIPSIS '3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii' >>> res = automask.run() # doctest: +SKIP @@ -432,6 +659,103 @@ class Automask(AFNICommand): input_spec = AutomaskInputSpec output_spec = AutomaskOutputSpec +class AutoTLRCInputSpec(CommandLineInputSpec): + outputtype = traits.Enum('AFNI', list(Info.ftypes.keys()), + desc='AFNI output filetype') + in_file = File( + desc='Original anatomical volume (+orig).' + 'The skull is removed by this script' + 'unless instructed otherwise (-no_ss).', + argstr='-input %s', + mandatory=True, + exists=True, + copyfile=False) + base = traits.Str( + desc = ' Reference anatomical volume' + ' Usually this volume is in some standard space like' + ' TLRC or MNI space and with afni dataset view of' + ' (+tlrc).' + ' Preferably, this reference volume should have had' + ' the skull removed but that is not mandatory.' + ' AFNI\'s distribution contains several templates.' + ' For a longer list, use "whereami -show_templates"' + 'TT_N27+tlrc --> Single subject, skull stripped volume.' + ' This volume is also known as ' + ' N27_SurfVol_NoSkull+tlrc elsewhere in ' + ' AFNI and SUMA land.' + ' (www.loni.ucla.edu, www.bic.mni.mcgill.ca)' + ' This template has a full set of FreeSurfer' + ' (surfer.nmr.mgh.harvard.edu)' + ' surface models that can be used in SUMA. ' + ' For details, see Talairach-related link:' + ' https://afni.nimh.nih.gov/afni/suma' + 'TT_icbm452+tlrc --> Average volume of 452 normal brains.' + ' Skull Stripped. (www.loni.ucla.edu)' + 'TT_avg152T1+tlrc --> Average volume of 152 normal brains.' + ' Skull Stripped.(www.bic.mni.mcgill.ca)' + 'TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1' + ' TT_avg152 and TT_EPI volume sources are from' + ' SPM\'s distribution. (www.fil.ion.ucl.ac.uk/spm/)' + 'If you do not specify a path for the template, the script' + 'will attempt to locate the template AFNI\'s binaries directory.' + 'NOTE: These datasets have been slightly modified from' + ' their original size to match the standard TLRC' + ' dimensions (Jean Talairach and Pierre Tournoux' + ' Co-Planar Stereotaxic Atlas of the Human Brain' + ' Thieme Medical Publishers, New York, 1988). ' + ' That was done for internal consistency in AFNI.' + ' You may use the original form of these' + ' volumes if you choose but your TLRC coordinates' + ' will not be consistent with AFNI\'s TLRC database' + ' (San Antonio Talairach Daemon database), for example.', + mandatory = True, + argstr='-base %s') + no_ss = traits.Bool( + desc='Do not strip skull of input data set' + '(because skull has already been removed' + 'or because template still has the skull)' + 'NOTE: The -no_ss option is not all that optional.' + ' Here is a table of when you should and should not use -no_ss' + ' Template Template' + ' WITH skull WITHOUT skull' + ' Dset.' + ' WITH skull -no_ss xxx ' + ' ' + ' WITHOUT skull No Cigar -no_ss' + ' ' + ' Template means: Your template of choice' + ' Dset. means: Your anatomical dataset' + ' -no_ss means: Skull stripping should not be attempted on Dset' + ' xxx means: Don\'t put anything, the script will strip Dset' + ' No Cigar means: Don\'t try that combination, it makes no sense.', + argstr='-no_ss') + +class AutoTLRC(AFNICommand): + """A minmal wrapper for the AutoTLRC script + The only option currently supported is no_ss. + For complete details, see the `3dQwarp Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> autoTLRC = afni.AutoTLRC() + >>> autoTLRC.inputs.in_file = 'structural.nii' + >>> autoTLRC.inputs.no_ss = True + >>> autoTLRC.inputs.base = "TT_N27+tlrc" + >>> autoTLRC.cmdline + '@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss' + >>> res = autoTLRC.run() # doctest: +SKIP + + """ + _cmd = '@auto_tlrc' + input_spec = AutoTLRCInputSpec + output_spec = AFNICommandOutputSpec + def _list_outputs(self): + outputs = self.output_spec().get() + ext = '.HEAD' + outputs['out_file'] = os.path.abspath(self._gen_fname(self.inputs.in_file, suffix='+tlrc')+ext) + return outputs class BandpassInputSpec(AFNICommandInputSpec): in_file = File( @@ -531,7 +855,7 @@ class Bandpass(AFNICommand): >>> bandpass.inputs.in_file = 'functional.nii' >>> bandpass.inputs.highpass = 0.005 >>> bandpass.inputs.lowpass = 0.1 - >>> bandpass.cmdline # doctest: +ALLOW_UNICODE + >>> bandpass.cmdline '3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii' >>> res = bandpass.run() # doctest: +SKIP @@ -599,7 +923,7 @@ class BlurInMask(AFNICommand): >>> bim.inputs.in_file = 'functional.nii' >>> bim.inputs.mask = 'mask.nii' >>> bim.inputs.fwhm = 5.0 - >>> bim.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> bim.cmdline # doctest: +ELLIPSIS '3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur' >>> res = bim.run() # doctest: +SKIP @@ -650,7 +974,7 @@ class BlurToFWHM(AFNICommand): >>> blur = afni.preprocess.BlurToFWHM() >>> blur.inputs.in_file = 'epi.nii' >>> blur.inputs.fwhm = 2.5 - >>> blur.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> blur.cmdline # doctest: +ELLIPSIS '3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni' >>> res = blur.run() # doctest: +SKIP @@ -701,7 +1025,7 @@ class ClipLevel(AFNICommandBase): >>> from nipype.interfaces.afni import preprocess >>> cliplevel = preprocess.ClipLevel() >>> cliplevel.inputs.in_file = 'anatomical.nii' - >>> cliplevel.cmdline # doctest: +ALLOW_UNICODE + >>> cliplevel.cmdline '3dClipLevel anatomical.nii' >>> res = cliplevel.run() # doctest: +SKIP @@ -784,7 +1108,7 @@ class DegreeCentrality(AFNICommand): >>> degree.inputs.mask = 'mask.nii' >>> degree.inputs.sparsity = 1 # keep the top one percent of connections >>> degree.inputs.out_file = 'out.nii' - >>> degree.cmdline # doctest: +ALLOW_UNICODE + >>> degree.cmdline '3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii' >>> res = degree.run() # doctest: +SKIP @@ -834,7 +1158,7 @@ class Despike(AFNICommand): >>> from nipype.interfaces import afni >>> despike = afni.Despike() >>> despike.inputs.in_file = 'functional.nii' - >>> despike.cmdline # doctest: +ALLOW_UNICODE + >>> despike.cmdline '3dDespike -prefix functional_despike functional.nii' >>> res = despike.run() # doctest: +SKIP @@ -875,7 +1199,7 @@ class Detrend(AFNICommand): >>> detrend.inputs.in_file = 'functional.nii' >>> detrend.inputs.args = '-polort 2' >>> detrend.inputs.outputtype = 'AFNI' - >>> detrend.cmdline # doctest: +ALLOW_UNICODE + >>> detrend.cmdline '3dDetrend -polort 2 -prefix functional_detrend functional.nii' >>> res = detrend.run() # doctest: +SKIP @@ -947,7 +1271,7 @@ class ECM(AFNICommand): >>> ecm.inputs.mask = 'mask.nii' >>> ecm.inputs.sparsity = 0.1 # keep top 0.1% of connections >>> ecm.inputs.out_file = 'out.nii' - >>> ecm.cmdline # doctest: +ALLOW_UNICODE + >>> ecm.cmdline '3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii' >>> res = ecm.run() # doctest: +SKIP @@ -1004,7 +1328,7 @@ class Fim(AFNICommand): >>> fim.inputs.out_file = 'functional_corr.nii' >>> fim.inputs.out = 'Correlation' >>> fim.inputs.fim_thr = 0.0009 - >>> fim.cmdline # doctest: +ALLOW_UNICODE + >>> fim.cmdline '3dfim+ -input functional.nii -ideal_file seed.1D -fim_thr 0.000900 -out Correlation -bucket functional_corr.nii' >>> res = fim.run() # doctest: +SKIP @@ -1058,7 +1382,7 @@ class Fourier(AFNICommand): >>> fourier.inputs.retrend = True >>> fourier.inputs.highpass = 0.005 >>> fourier.inputs.lowpass = 0.1 - >>> fourier.cmdline # doctest: +ALLOW_UNICODE + >>> fourier.cmdline '3dFourier -highpass 0.005000 -lowpass 0.100000 -prefix functional_fourier -retrend functional.nii' >>> res = fourier.run() # doctest: +SKIP @@ -1131,7 +1455,7 @@ class Hist(AFNICommandBase): >>> from nipype.interfaces import afni >>> hist = afni.Hist() >>> hist.inputs.in_file = 'functional.nii' - >>> hist.cmdline # doctest: +ALLOW_UNICODE + >>> hist.cmdline '3dHist -input functional.nii -prefix functional_hist' >>> res = hist.run() # doctest: +SKIP @@ -1148,7 +1472,7 @@ def __init__(self, **inputs): version = Info.version() # As of AFNI 16.0.00, redirect_x is not needed - if isinstance(version[0], int) and version[0] > 15: + if version[0] > 2015: self._redirect_x = False def _parse_inputs(self, skip=None): @@ -1195,7 +1519,7 @@ class LFCD(AFNICommand): >>> lfcd.inputs.mask = 'mask.nii' >>> lfcd.inputs.thresh = 0.8 # keep all connections with corr >= 0.8 >>> lfcd.inputs.out_file = 'out.nii' - >>> lfcd.cmdline # doctest: +ALLOW_UNICODE + >>> lfcd.cmdline '3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii' >>> res = lfcd.run() # doctest: +SKIP """ @@ -1246,7 +1570,7 @@ class Maskave(AFNICommand): >>> maskave.inputs.in_file = 'functional.nii' >>> maskave.inputs.mask= 'seed_mask.nii' >>> maskave.inputs.quiet= True - >>> maskave.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> maskave.cmdline # doctest: +ELLIPSIS '3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D' >>> res = maskave.run() # doctest: +SKIP @@ -1261,14 +1585,17 @@ class MeansInputSpec(AFNICommandInputSpec): in_file_a = File( desc='input file to 3dMean', argstr='%s', - position=0, + position=-2, mandatory=True, exists=True) in_file_b = File( desc='another input file to 3dMean', argstr='%s', - position=1, + position=-1, exists=True) + datum = traits.Str( + desc='Sets the data type of the output dataset', + argstr='-datum %s') out_file = File( name_template='%s_mean', desc='output image file name', @@ -1314,8 +1641,17 @@ class Means(AFNICommand): >>> means.inputs.in_file_a = 'im1.nii' >>> means.inputs.in_file_b = 'im2.nii' >>> means.inputs.out_file = 'output.nii' - >>> means.cmdline # doctest: +ALLOW_UNICODE - '3dMean im1.nii im2.nii -prefix output.nii' + >>> means.cmdline + '3dMean -prefix output.nii im1.nii im2.nii' + >>> res = means.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> means = afni.Means() + >>> means.inputs.in_file_a = 'im1.nii' + >>> means.inputs.out_file = 'output.nii' + >>> means.inputs.datum = 'short' + >>> means.cmdline + '3dMean -datum short -prefix output.nii im1.nii' >>> res = means.run() # doctest: +SKIP """ @@ -1347,13 +1683,13 @@ class OutlierCountInputSpec(CommandLineInputSpec): False, usedefault=True, argstr='-autoclip', - xor=['in_file'], + xor=['mask'], desc='clip off small voxels') automask = traits.Bool( False, usedefault=True, argstr='-automask', - xor=['in_file'], + xor=['mask'], desc='clip off small voxels') fraction = traits.Bool( False, @@ -1389,28 +1725,19 @@ class OutlierCountInputSpec(CommandLineInputSpec): out_file = File( name_template='%s_outliers', name_source=['in_file'], - argstr='> %s', keep_extension=False, - position=-1, desc='capture standard output') class OutlierCountOutputSpec(TraitedSpec): - out_outliers = File( - exists=True, - desc='output image file name') - out_file = File( - name_template='%s_tqual', - name_source=['in_file'], - argstr='> %s', - keep_extension=False, - position=-1, - desc='capture standard output') + out_outliers = File(exists=True, + desc='output image file name') + out_file = File(desc='capture standard output') class OutlierCount(CommandLine): - """Calculates number of 'outliers' a 3D+time dataset, at each - time point, and writes the results to stdout. + """Calculates number of 'outliers' at each time point of a + a 3D+time dataset. For complete details, see the `3dToutcount Documentation `_ @@ -1421,8 +1748,8 @@ class OutlierCount(CommandLine): >>> from nipype.interfaces import afni >>> toutcount = afni.OutlierCount() >>> toutcount.inputs.in_file = 'functional.nii' - >>> toutcount.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE - '3dToutcount functional.nii > functional_outliers' + >>> toutcount.cmdline # doctest: +ELLIPSIS + '3dToutcount functional.nii' >>> res = toutcount.run() # doctest: +SKIP """ @@ -1430,20 +1757,34 @@ class OutlierCount(CommandLine): _cmd = '3dToutcount' input_spec = OutlierCountInputSpec output_spec = OutlierCountOutputSpec + _terminal_output = 'file_split' def _parse_inputs(self, skip=None): if skip is None: skip = [] + # This is not strictly an input, but needs be + # set before run() is called. + if self.terminal_output == 'none': + self.terminal_output = 'file_split' + if not self.inputs.save_outliers: skip += ['outliers_file'] return super(OutlierCount, self)._parse_inputs(skip) + def _run_interface(self, runtime): + runtime = super(OutlierCount, self)._run_interface(runtime) + + # Read from runtime.stdout or runtime.merged + with open(op.abspath(self.inputs.out_file), 'w') as outfh: + outfh.write(runtime.stdout or runtime.merged) + return runtime + def _list_outputs(self): outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) if self.inputs.save_outliers: outputs['out_outliers'] = op.abspath(self.inputs.outliers_file) - outputs['out_file'] = op.abspath(self.inputs.out_file) return outputs @@ -1520,7 +1861,7 @@ class QualityIndex(CommandLine): >>> from nipype.interfaces import afni >>> tqual = afni.QualityIndex() >>> tqual.inputs.in_file = 'functional.nii' - >>> tqual.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> tqual.cmdline # doctest: +ELLIPSIS '3dTqual functional.nii > functional_tqual' >>> res = tqual.run() # doctest: +SKIP @@ -1551,13 +1892,10 @@ class ROIStatsInputSpec(CommandLineInputSpec): desc='execute quietly', argstr='-quiet', position=1) - terminal_output = traits.Enum( - 'allatonce', + terminal_output = traits.Enum('allatonce', deprecated='1.0.0', desc='Control terminal output:`allatonce` - waits till command is ' 'finished to display output', - nohash=True, - mandatory=True, - usedefault=True) + nohash=True) class ROIStatsOutputSpec(TraitedSpec): @@ -1580,12 +1918,13 @@ class ROIStats(AFNICommandBase): >>> roistats.inputs.in_file = 'functional.nii' >>> roistats.inputs.mask = 'skeleton_mask.nii.gz' >>> roistats.inputs.quiet = True - >>> roistats.cmdline # doctest: +ALLOW_UNICODE + >>> roistats.cmdline '3dROIstats -quiet -mask skeleton_mask.nii.gz functional.nii' >>> res = roistats.run() # doctest: +SKIP """ _cmd = '3dROIstats' + _terminal_output = 'allatonce' input_spec = ROIStatsInputSpec output_spec = ROIStatsOutputSpec @@ -1674,7 +2013,7 @@ class Retroicor(AFNICommand): >>> ret.inputs.card = 'mask.1D' >>> ret.inputs.resp = 'resp.1D' >>> ret.inputs.outputtype = 'NIFTI' - >>> ret.cmdline # doctest: +ALLOW_UNICODE + >>> ret.cmdline '3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii' >>> res = ret.run() # doctest: +SKIP @@ -1757,7 +2096,7 @@ class Seg(AFNICommandBase): >>> seg = preprocess.Seg() >>> seg.inputs.in_file = 'structural.nii' >>> seg.inputs.mask = 'AUTO' - >>> seg.cmdline # doctest: +ALLOW_UNICODE + >>> seg.cmdline '3dSeg -mask AUTO -anat structural.nii' >>> res = seg.run() # doctest: +SKIP @@ -1813,7 +2152,7 @@ class SkullStrip(AFNICommand): >>> skullstrip = afni.SkullStrip() >>> skullstrip.inputs.in_file = 'functional.nii' >>> skullstrip.inputs.args = '-o_ply' - >>> skullstrip.cmdline # doctest: +ALLOW_UNICODE + >>> skullstrip.cmdline '3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip' >>> res = skullstrip.run() # doctest: +SKIP @@ -1825,11 +2164,12 @@ class SkullStrip(AFNICommand): def __init__(self, **inputs): super(SkullStrip, self).__init__(**inputs) + if not no_afni(): v = Info.version() - # As of AFNI 16.0.00, redirect_x is not needed - if isinstance(v[0], int) and v[0] > 15: + # Between AFNI 16.0.00 and 16.2.07, redirect_x is not needed + if v >= (2016, 0, 0) and v < (2016, 2, 7): self._redirect_x = False @@ -1891,7 +2231,7 @@ class TCorr1D(AFNICommand): >>> tcorr1D = afni.TCorr1D() >>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii' >>> tcorr1D.inputs.y_1d = 'seed.1D' - >>> tcorr1D.cmdline # doctest: +ALLOW_UNICODE + >>> tcorr1D.cmdline '3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D' >>> res = tcorr1D.run() # doctest: +SKIP @@ -2033,7 +2373,7 @@ class TCorrMap(AFNICommand): >>> tcm.inputs.in_file = 'functional.nii' >>> tcm.inputs.mask = 'mask.nii' >>> tcm.mean_file = 'functional_meancorr.nii' - >>> tcm.cmdline # doctest: +ALLOW_UNICODE +SKIP + >>> tcm.cmdline # doctest: +SKIP '3dTcorrMap -input functional.nii -mask mask.nii -Mean functional_meancorr.nii' >>> res = tcm.run() # doctest: +SKIP @@ -2101,7 +2441,7 @@ class TCorrelate(AFNICommand): >>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz' >>> tcorrelate.inputs.polort = -1 >>> tcorrelate.inputs.pearson = True - >>> tcorrelate.cmdline # doctest: +ALLOW_UNICODE + >>> tcorrelate.cmdline '3dTcorrelate -prefix functional_tcorrelate.nii.gz -pearson -polort -1 u_rc1s1_Template.nii u_rc1s2_Template.nii' >>> res = tcarrelate.run() # doctest: +SKIP @@ -2112,6 +2452,67 @@ class TCorrelate(AFNICommand): output_spec = AFNICommandOutputSpec +class TNormInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTNorm', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tnorm', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + norm2 = traits.Bool( + desc='L2 normalize (sum of squares = 1) [DEFAULT]', + argstr='-norm2') + normR = traits.Bool( + desc='normalize so sum of squares = number of time points * e.g., so RMS = 1.', + argstr='-normR') + norm1 = traits.Bool( + desc='L1 normalize (sum of absolute values = 1)', + argstr='-norm1') + normx = traits.Bool( + desc='Scale so max absolute value = 1 (L_infinity norm)', + argstr='-normx') + polort = traits.Int( + desc="""Detrend with polynomials of order p before normalizing + [DEFAULT = don't do this] + * Use '-polort 0' to remove the mean, for example""", + argstr='-polort %s') + L1fit = traits.Bool( + desc="""Detrend with L1 regression (L2 is the default) + * This option is here just for the hell of it""", + argstr='-L1fit') + + +class TNorm(AFNICommand): + """Shifts voxel time series from input so that seperate slices are aligned + to the same temporal origin. + + For complete details, see the `3dTnorm Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tnorm = afni.TNorm() + >>> tnorm.inputs.in_file = 'functional.nii' + >>> tnorm.inputs.norm2 = True + >>> tnorm.inputs.out_file = 'rm.errts.unit errts+tlrc' + >>> tnorm.cmdline + '3dTnorm -norm2 -prefix rm.errts.unit errts+tlrc functional.nii' + >>> res = tshift.run() # doctest: +SKIP + + """ + _cmd = '3dTnorm' + input_spec = TNormInputSpec + output_spec = AFNICommandOutputSpec + + class TShiftInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dTShift', @@ -2172,7 +2573,7 @@ class TShift(AFNICommand): >>> tshift.inputs.in_file = 'functional.nii' >>> tshift.inputs.tpattern = 'alt+z' >>> tshift.inputs.tzero = 0.0 - >>> tshift.cmdline # doctest: +ALLOW_UNICODE + >>> tshift.cmdline '3dTshift -prefix functional_tshift -tpattern alt+z -tzero 0.0 functional.nii' >>> res = tshift.run() # doctest: +SKIP @@ -2232,6 +2633,10 @@ class VolregInputSpec(AFNICommandInputSpec): argstr='-1Dmatrix_save %s', keep_extension=True, name_source='in_file') + interp = traits.Enum( + ('Fourier', 'cubic', 'heptic', 'quintic','linear'), + desc='spatial interpolation methods [default = heptic]', + argstr='-%s') class VolregOutputSpec(TraitedSpec): @@ -2264,10 +2669,24 @@ class Volreg(AFNICommand): >>> volreg.inputs.args = '-Fourier -twopass' >>> volreg.inputs.zpad = 4 >>> volreg.inputs.outputtype = 'NIFTI' - >>> volreg.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> volreg.cmdline # doctest: +ELLIPSIS '3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii' >>> res = volreg.run() # doctest: +SKIP + >>> from nipype.interfaces import afni + >>> volreg = afni.Volreg() + >>> volreg.inputs.in_file = 'functional.nii' + >>> volreg.inputs.interp = 'cubic' + >>> volreg.inputs.verbose = True + >>> volreg.inputs.zpad = 1 + >>> volreg.inputs.basefile = 'functional.nii' + >>> volreg.inputs.out_file = 'rm.epi.volreg.r1' + >>> volreg.inputs.oned_file = 'dfile.r1.1D' + >>> volreg.inputs.oned_matrix_save = 'mat.r1.tshift+orig.1D' + >>> volreg.cmdline + '3dvolreg -cubic -1Dfile dfile.r1.1D -1Dmatrix_save mat.r1.tshift+orig.1D -prefix rm.epi.volreg.r1 -verbose -base functional.nii -zpad 1 -maxdisp1D functional_md.1D functional.nii' + >>> res = volreg.run() # doctest: +SKIP + """ _cmd = '3dvolreg' @@ -2298,6 +2717,11 @@ class WarpInputSpec(AFNICommandInputSpec): desc='apply transformation from 3dWarpDrive', argstr='-matparent %s', exists=True) + oblique_parent = File( + desc='Read in the oblique transformation matrix from an oblique ' + 'dataset and make cardinal dataset oblique to match', + argstr='-oblique_parent %s', + exists=True) deoblique = traits.Bool( desc='transform dataset from oblique to cardinal', argstr='-deoblique') @@ -2315,6 +2739,9 @@ class WarpInputSpec(AFNICommandInputSpec): zpad = traits.Int( desc='pad input dataset with N planes of zero on all sides.', argstr='-zpad %d') + verbose = traits.Bool( + desc='Print out some information along the way.', + argstr='-verb') class Warp(AFNICommand): @@ -2331,7 +2758,7 @@ class Warp(AFNICommand): >>> warp.inputs.in_file = 'structural.nii' >>> warp.inputs.deoblique = True >>> warp.inputs.out_file = 'trans.nii.gz' - >>> warp.cmdline # doctest: +ALLOW_UNICODE + >>> warp.cmdline '3dWarp -deoblique -prefix trans.nii.gz structural.nii' >>> res = warp.run() # doctest: +SKIP @@ -2339,7 +2766,7 @@ class Warp(AFNICommand): >>> warp_2.inputs.in_file = 'structural.nii' >>> warp_2.inputs.newgrid = 1.0 >>> warp_2.inputs.out_file = 'trans.nii.gz' - >>> warp_2.cmdline # doctest: +ALLOW_UNICODE + >>> warp_2.cmdline '3dWarp -newgrid 1.000000 -prefix trans.nii.gz structural.nii' >>> res = warp_2.run() # doctest: +SKIP @@ -2362,30 +2789,32 @@ class QwarpPlusMinusInputSpec(CommandLineInputSpec): mandatory=True, exists=True, copyfile=False) - pblur = traits.List(traits.Float(), - desc='The fraction of the patch size that' - 'is used for the progressive blur by providing a ' - 'value between 0 and 0.25. If you provide TWO ' - 'values, the first fraction is used for ' - 'progressively blurring the base image and the ' - 'second for the source image.', - argstr='-pblur %s', - minlen=1, - maxlen=2) - blur = traits.List(traits.Float(), - desc="Gaussian blur the input images by (FWHM) voxels " - "before doing the alignment (the output dataset " - "will not be blurred). The default is 2.345 (for " - "no good reason). Optionally, you can provide 2 " - "values, and then the first one is applied to the " - "base volume, the second to the source volume. A " - "negative blur radius means to use 3D median " - "filtering, rather than Gaussian blurring. This " - "type of filtering will better preserve edges, " - "which can be important in alignment.", - argstr='-blur %s', - minlen=1, - maxlen=2) + pblur = traits.List( + traits.Float(), + desc='The fraction of the patch size that' + 'is used for the progressive blur by providing a ' + 'value between 0 and 0.25. If you provide TWO ' + 'values, the first fraction is used for ' + 'progressively blurring the base image and the ' + 'second for the source image.', + argstr='-pblur %s', + minlen=1, + maxlen=2) + blur = traits.List( + traits.Float(), + desc="Gaussian blur the input images by (FWHM) voxels " + "before doing the alignment (the output dataset " + "will not be blurred). The default is 2.345 (for " + "no good reason). Optionally, you can provide 2 " + "values, and then the first one is applied to the " + "base volume, the second to the source volume. A " + "negative blur radius means to use 3D median " + "filtering, rather than Gaussian blurring. This " + "type of filtering will better preserve edges, " + "which can be important in alignment.", + argstr='-blur %s', + minlen=1, + maxlen=2) noweight = traits.Bool( desc='If you want a binary weight (the old default), use this option.' 'That is, each voxel in the base volume automask will be' @@ -2431,7 +2860,7 @@ class QwarpPlusMinus(CommandLine): >>> qwarp.inputs.source_file = 'sub-01_dir-LR_epi.nii.gz' >>> qwarp.inputs.nopadWARP = True >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' - >>> qwarp.cmdline # doctest: +ALLOW_UNICODE + >>> qwarp.cmdline '3dQwarp -prefix Qwarp.nii.gz -plusminus -base sub-01_dir-RL_epi.nii.gz -nopadWARP -source sub-01_dir-LR_epi.nii.gz' >>> res = warp.run() # doctest: +SKIP @@ -2448,3 +2877,676 @@ def _list_outputs(self): outputs['base_warp'] = os.path.abspath("Qwarp_MINUS_WARP.nii.gz") return outputs + + +class QwarpInputSpec(AFNICommandInputSpec): + in_file = File( + desc='Source image (opposite phase encoding direction than base image).', + argstr='-source %s', + mandatory=True, + exists=True, + copyfile=False) + base_file = File( + desc='Base image (opposite phase encoding direction than source image).', + argstr='-base %s', + mandatory=True, + exists=True, + copyfile=False) + out_file = File(argstr='-prefix %s', + name_template='%s_QW', + name_source=['in_file'], + genfile=True, + desc='out_file ppp' + 'Sets the prefix for the output datasets.' + '* The source dataset is warped to match the base' + 'and gets prefix \'ppp\'. (Except if \'-plusminus\' is used.)' + '* The final interpolation to this output dataset is' + 'done using the \'wsinc5\' method. See the output of' + ' 3dAllineate -HELP' + '(in the "Modifying \'-final wsinc5\'" section) for' + 'the lengthy technical details.' + '* The 3D warp used is saved in a dataset with' + 'prefix \'ppp_WARP\' -- this dataset can be used' + 'with 3dNwarpApply and 3dNwarpCat, for example.' + '* To be clear, this is the warp from source dataset' + ' coordinates to base dataset coordinates, where the' + ' values at each base grid point are the xyz displacments' + ' needed to move that grid point\'s xyz values to the' + ' corresponding xyz values in the source dataset:' + ' base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z)' + ' Another way to think of this warp is that it \'pulls\'' + ' values back from source space to base space.' + '* 3dNwarpApply would use \'ppp_WARP\' to transform datasets' + 'aligned with the source dataset to be aligned with the' + 'base dataset.' + '** If you do NOT want this warp saved, use the option \'-nowarp\'.' + '-->> (However, this warp is usually the most valuable possible output!)' + '* If you want to calculate and save the inverse 3D warp,' + 'use the option \'-iwarp\'. This inverse warp will then be' + 'saved in a dataset with prefix \'ppp_WARPINV\'.' + '* This inverse warp could be used to transform data from base' + 'space to source space, if you need to do such an operation.' + '* You can easily compute the inverse later, say by a command like' + ' 3dNwarpCat -prefix Z_WARPINV \'INV(Z_WARP+tlrc)\'' + 'or the inverse can be computed as needed in 3dNwarpApply, like' + ' 3dNwarpApply -nwarp \'INV(Z_WARP+tlrc)\' -source Dataset.nii ...') + resample = traits.Bool( + desc='This option simply resamples the source dataset to match the' + 'base dataset grid. You can use this if the two datasets' + 'overlap well (as seen in the AFNI GUI), but are not on the' + 'same 3D grid.' + '* If they don\'t overlap well, allineate them first' + '* The reampling here is done with the' + '\'wsinc5\' method, which has very little blurring artifact.' + '* If the base and source datasets ARE on the same 3D grid,' + 'then the -resample option will be ignored.' + '* You CAN use -resample with these 3dQwarp options:' + '-plusminus -inilev -iniwarp -duplo', + argstr='-resample') + allineate = traits.Bool( + desc='This option will make 3dQwarp run 3dAllineate first, to align ' + 'the source dataset to the base with an affine transformation. ' + 'It will then use that alignment as a starting point for the ' + 'nonlinear warping.', + argstr='-allineate') + allineate_opts = traits.Str( + desc='add extra options to the 3dAllineate command to be run by ' + '3dQwarp.', + argstr='-allineate_opts %s', + xand=['allineate']) + nowarp = traits.Bool( + desc='Do not save the _WARP file.', + argstr='-nowarp') + iwarp = traits.Bool( + desc='Do compute and save the _WARPINV file.', + argstr='-iwarp', + xor=['plusminus']) + pear = traits.Bool( + desc='Use strict Pearson correlation for matching.' + '* Not usually recommended, since the \'clipped Pearson\' method' + 'used by default will reduce the impact of outlier values.', + argstr='-pear') + noneg = traits.Bool( + desc='Replace negative values in either input volume with 0.' + '* If there ARE negative input values, and you do NOT use -noneg,' + 'then strict Pearson correlation will be used, since the \'clipped\'' + 'method only is implemented for non-negative volumes.' + '* \'-noneg\' is not the default, since there might be situations where' + 'you want to align datasets with positive and negative values mixed.' + '* But, in many cases, the negative values in a dataset are just the' + 'result of interpolation artifacts (or other peculiarities), and so' + 'they should be ignored. That is what \'-noneg\' is for.', + argstr='-noneg') + nopenalty = traits.Bool( + desc='Replace negative values in either input volume with 0.' + '* If there ARE negative input values, and you do NOT use -noneg,' + 'then strict Pearson correlation will be used, since the \'clipped\'' + 'method only is implemented for non-negative volumes.' + '* \'-noneg\' is not the default, since there might be situations where' + 'you want to align datasets with positive and negative values mixed.' + '* But, in many cases, the negative values in a dataset are just the' + 'result of interpolation artifacts (or other peculiarities), and so' + 'they should be ignored. That is what \'-noneg\' is for.', + argstr='-nopenalty') + penfac = traits.Float( + desc='Use this value to weight the penalty.' + 'The default value is 1.Larger values mean the' + 'penalty counts more, reducing grid distortions,' + 'insha\'Allah; \'-nopenalty\' is the same as \'-penfac 0\'.' + ' -->>* [23 Sep 2013] -- Zhark increased the default value of' + ' the penalty by a factor of 5, and also made it get' + ' progressively larger with each level of refinement.' + ' Thus, warping results will vary from earlier instances' + ' of 3dQwarp.' + ' * The progressive increase in the penalty at higher levels' + ' means that the \'cost function\' can actually look like the' + ' alignment is getting worse when the levels change.' + ' * IF you wish to turn off this progression, for whatever' + ' reason (e.g., to keep compatibility with older results),' + ' use the option \'-penold\'.To be completely compatible with' + ' the older 3dQwarp, you\'ll also have to use \'-penfac 0.2\'.', + argstr='-penfac %f') + noweight = traits.Bool( + desc='If you want a binary weight (the old default), use this option.' + 'That is, each voxel in the base volume automask will be' + 'weighted the same in the computation of the cost functional.', + argstr='-noweight') + weight = File( + desc='Instead of computing the weight from the base dataset,' + 'directly input the weight volume from dataset \'www\'.' + '* Useful if you know what over parts of the base image you' + 'want to emphasize or de-emphasize the matching functional.', + argstr='-weight %s', + exists=True) + wball = traits.List( + traits.Int(), + desc='-wball x y z r f' + 'Enhance automatic weight from \'-useweight\' by a factor' + 'of 1+f*Gaussian(FWHM=r) centered in the base image at' + 'DICOM coordinates (x,y,z) and with radius \'r\'. The' + 'goal of this option is to try and make the alignment' + 'better in a specific part of the brain.' + '* Example: -wball 0 14 6 30 40' + 'to emphasize the thalamic area (in MNI/Talairach space).' + '* The \'r\' parameter must be positive!' + '* The \'f\' parameter must be between 1 and 100 (inclusive).' + '* \'-wball\' does nothing if you input your own weight' + 'with the \'-weight\' option.' + '* \'-wball\' does change the binary weight created by' + 'the \'-noweight\' option.' + '* You can only use \'-wball\' once in a run of 3dQwarp.' + '*** The effect of \'-wball\' is not dramatic. The example' + 'above makes the average brain image across a collection' + 'of subjects a little sharper in the thalamic area, which' + 'might have some small value. If you care enough about' + 'alignment to use \'-wball\', then you should examine the' + 'results from 3dQwarp for each subject, to see if the' + 'alignments are good enough for your purposes.', + argstr='-wball %s', + minlen=5, + maxlen=5) + traits.Tuple( + (traits.Float(), traits.Float()), + argstr='-bpass %f %f') + wmask = traits.Tuple( + (File(exists=True), traits.Float()), + desc='-wmask ws f' + 'Similar to \'-wball\', but here, you provide a dataset \'ws\'' + 'that indicates where to increase the weight.' + '* The \'ws\' dataset must be on the same 3D grid as the base dataset.' + '* \'ws\' is treated as a mask -- it only matters where it' + 'is nonzero -- otherwise, the values inside are not used.' + '* After \'ws\' comes the factor \'f\' by which to increase the' + 'automatically computed weight. Where \'ws\' is nonzero,' + 'the weighting will be multiplied by (1+f).' + '* As with \'-wball\', the factor \'f\' should be between 1 and 100.' + '* You cannot use \'-wball\' and \'-wmask\' together!', + argstr='-wpass %s %f') + out_weight_file = traits.File( + argstr='-wtprefix %s', + desc='Write the weight volume to disk as a dataset') + blur = traits.List( + traits.Float(), + desc='Gaussian blur the input images by \'bb\' (FWHM) voxels before' + 'doing the alignment (the output dataset will not be blurred).' + 'The default is 2.345 (for no good reason).' + '* Optionally, you can provide 2 values for \'bb\', and then' + 'the first one is applied to the base volume, the second' + 'to the source volume.' + '-->>* e.g., \'-blur 0 3\' to skip blurring the base image' + '(if the base is a blurry template, for example).' + '* A negative blur radius means to use 3D median filtering,' + 'rather than Gaussian blurring. This type of filtering will' + 'better preserve edges, which can be important in alignment.' + '* If the base is a template volume that is already blurry,' + 'you probably don\'t want to blur it again, but blurring' + 'the source volume a little is probably a good idea, to' + 'help the program avoid trying to match tiny features.' + '* Note that -duplo will blur the volumes some extra' + 'amount for the initial small-scale warping, to make' + 'that phase of the program converge more rapidly.', + argstr='-blur %s', + minlen=1, + maxlen=2) + pblur = traits.List( + traits.Float(), + desc='Use progressive blurring; that is, for larger patch sizes,' + 'the amount of blurring is larger. The general idea is to' + 'avoid trying to match finer details when the patch size' + 'and incremental warps are coarse. When \'-blur\' is used' + 'as well, it sets a minimum amount of blurring that will' + 'be used. [06 Aug 2014 -- \'-pblur\' may become the default someday].' + '* You can optionally give the fraction of the patch size that' + 'is used for the progressive blur by providing a value between' + '0 and 0.25 after \'-pblur\'. If you provide TWO values, the' + 'the first fraction is used for progressively blurring the' + 'base image and the second for the source image. The default' + 'parameters when just \'-pblur\' is given is the same as giving' + 'the options as \'-pblur 0.09 0.09\'.' + '* \'-pblur\' is useful when trying to match 2 volumes with high' + 'amounts of detail; e.g, warping one subject\'s brain image to' + 'match another\'s, or trying to warp to match a detailed template.' + '* Note that using negative values with \'-blur\' means that the' + 'progressive blurring will be done with median filters, rather' + 'than Gaussian linear blurring.' + '-->>*** The combination of the -allineate and -pblur options will make' + 'the results of using 3dQwarp to align to a template somewhat' + 'less sensitive to initial head position and scaling.', + argstr='-pblur %s', + minlen=1, + maxlen=2) + emask = File( + desc='Here, \'ee\' is a dataset to specify a mask of voxels' + 'to EXCLUDE from the analysis -- all voxels in \'ee\'' + 'that are NONZERO will not be used in the alignment.' + '* The base image always automasked -- the emask is' + 'extra, to indicate voxels you definitely DON\'T want' + 'included in the matching process, even if they are' + 'inside the brain.', + argstr='-emask %s', + exists=True, + copyfile=False) + noXdis = traits.Bool( + desc='Warp will not displace in x directoin', + argstr='-noXdis') + noYdis = traits.Bool( + desc='Warp will not displace in y directoin', + argstr='-noYdis') + noZdis = traits.Bool( + desc='Warp will not displace in z directoin', + argstr='-noZdis') + iniwarp = traits.List( + File(exists=True, copyfile=False), + desc='A dataset with an initial nonlinear warp to use.' + '* If this option is not used, the initial warp is the identity.' + '* You can specify a catenation of warps (in quotes) here, as in' + 'program 3dNwarpApply.' + '* As a special case, if you just input an affine matrix in a .1D' + 'file, that will work also -- it is treated as giving the initial' + 'warp via the string "IDENT(base_dataset) matrix_file.aff12.1D".' + '* You CANNOT use this option with -duplo !!' + '* -iniwarp is usually used with -inilev to re-start 3dQwarp from' + 'a previous stopping point.', + argstr='-iniwarp %s', + xor=['duplo']) + inilev = traits.Int( + desc='The initial refinement \'level\' at which to start.' + '* Usually used with -iniwarp; CANNOT be used with -duplo.' + '* The combination of -inilev and -iniwarp lets you take the' + 'results of a previous 3dQwarp run and refine them further:' + 'Note that the source dataset in the second run is the SAME as' + 'in the first run. If you don\'t see why this is necessary,' + 'then you probably need to seek help from an AFNI guru.', + argstr='-inilev %d', + xor=['duplo']) + minpatch = traits.Int( + desc='* The value of mm should be an odd integer.' + '* The default value of mm is 25.' + '* For more accurate results than mm=25, try 19 or 13.' + '* The smallest allowed patch size is 5.' + '* You may want stop at a larger patch size (say 7 or 9) and use' + 'the -Qfinal option to run that final level with quintic warps,' + 'which might run faster and provide the same degree of warp detail.' + '* Trying to make two different brain volumes match in fine detail' + 'is usually a waste of time, especially in humans. There is too' + 'much variability in anatomy to match gyrus to gyrus accurately.' + 'For this reason, the default minimum patch size is 25 voxels.' + 'Using a smaller \'-minpatch\' might try to force the warp to' + 'match features that do not match, and the result can be useless' + 'image distortions -- another reason to LOOK AT THE RESULTS.', + argstr='-minpatch %d') + maxlev = traits.Int( + desc='The initial refinement \'level\' at which to start.' + '* Usually used with -iniwarp; CANNOT be used with -duplo.' + '* The combination of -inilev and -iniwarp lets you take the' + 'results of a previous 3dQwarp run and refine them further:' + 'Note that the source dataset in the second run is the SAME as' + 'in the first run. If you don\'t see why this is necessary,' + 'then you probably need to seek help from an AFNI guru.', + argstr='-maxlev %d', + xor=['duplo'], + position=-1) + gridlist = File( + desc='This option provides an alternate way to specify the patch' + 'grid sizes used in the warp optimization process. \'gl\' is' + 'a 1D file with a list of patches to use -- in most cases,' + 'you will want to use it in the following form:' + '-gridlist \'1D: 0 151 101 75 51\'' + '* Here, a 0 patch size means the global domain. Patch sizes' + 'otherwise should be odd integers >= 5.' + '* If you use the \'0\' patch size again after the first position,' + 'you will actually get an iteration at the size of the' + 'default patch level 1, where the patch sizes are 75% of' + 'the volume dimension. There is no way to force the program' + 'to literally repeat the sui generis step of lev=0.' + '* You cannot use -gridlist with -duplo or -plusminus!', + argstr='-gridlist %s', + exists=True, + copyfile=False, + xor=['duplo', 'plusminus']) + allsave = traits.Bool( + desc='This option lets you save the output warps from each level' + 'of the refinement process. Mostly used for experimenting.' + '* Cannot be used with -nopadWARP, -duplo, or -plusminus.' + '* Will only save all the outputs if the program terminates' + 'normally -- if it crashes, or freezes, then all these' + 'warps are lost.', + argstr='-allsave', + xor=['nopadWARP', 'duplo', 'plusminus']) + duplo = traits.Bool( + desc='Start off with 1/2 scale versions of the volumes,' + 'for getting a speedy coarse first alignment.' + '* Then scales back up to register the full volumes.' + 'The goal is greater speed, and it seems to help this' + 'positively piggish program to be more expeditious.' + '* However, accuracy is somewhat lower with \'-duplo\',' + 'for reasons that currenly elude Zhark; for this reason,' + 'the Emperor does not usually use \'-duplo\'.', + argstr='-duplo', + xor=['gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', 'allsave']) + workhard = traits.Bool( + desc='Iterate more times, which can help when the volumes are' + 'hard to align at all, or when you hope to get a more precise' + 'alignment.' + '* Slows the program down (possibly a lot), of course.' + '* When you combine \'-workhard\' with \'-duplo\', only the' + 'full size volumes get the extra iterations.' + '* For finer control over which refinement levels work hard,' + 'you can use this option in the form (for example)' + ' -workhard:4:7' + 'which implies the extra iterations will be done at levels' + '4, 5, 6, and 7, but not otherwise.' + '* You can also use \'-superhard\' to iterate even more, but' + 'this extra option will REALLY slow things down.' + '-->>* Under most circumstances, you should not need to use either' + '-workhard or -superhard.' + '-->>* The fastest way to register to a template image is via the' + '-duplo option, and without the -workhard or -superhard options.' + '-->>* If you use this option in the form \'-Workhard\' (first letter' + 'in upper case), then the second iteration at each level is' + 'done with quintic polynomial warps.', + argstr='-workhard', + xor=['boxopt', 'ballopt']) + Qfinal = traits.Bool( + desc='At the finest patch size (the final level), use Hermite' + 'quintic polynomials for the warp instead of cubic polynomials.' + '* In a 3D \'patch\', there are 2x2x2x3=24 cubic polynomial basis' + 'function parameters over which to optimize (2 polynomials' + 'dependent on each of the x,y,z directions, and 3 different' + 'directions of displacement).' + '* There are 3x3x3x3=81 quintic polynomial parameters per patch.' + '* With -Qfinal, the final level will have more detail in' + 'the allowed warps, at the cost of yet more CPU time.' + '* However, no patch below 7x7x7 in size will be done with quintic' + 'polynomials.' + '* This option is also not usually needed, and is experimental.', + argstr='-Qfinal') + Qonly = traits.Bool( + desc='Use Hermite quintic polynomials at all levels.' + '* Very slow (about 4 times longer). Also experimental.' + '* Will produce a (discrete representation of a) C2 warp.', + argstr='-Qonly') + plusminus = traits.Bool( + desc='Normally, the warp displacements dis(x) are defined to match' + 'base(x) to source(x+dis(x)). With this option, the match' + 'is between base(x-dis(x)) and source(x+dis(x)) -- the two' + 'images \'meet in the middle\'.' + '* One goal is to mimic the warping done to MRI EPI data by' + 'field inhomogeneities, when registering between a \'blip up\'' + 'and a \'blip down\' down volume, which will have opposite' + 'distortions.' + '* Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since' + 'base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x))' + 'wherever we see x, we have base(x) matches source(Wp(INV(Wm(x))));' + 'that is, the warp V(x) that one would get from the \'usual\' way' + 'of running 3dQwarp is V(x) = Wp(INV(Wm(x))).' + '* Conversely, we can calculate Wp(x) in terms of V(x) as follows:' + 'If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2;' + 'then Wp(x) = V(INV(Vh(x)))' + '* With the above formulas, it is possible to compute Wp(x) from' + 'V(x) and vice-versa, using program 3dNwarpCalc. The requisite' + 'commands are left as an exercise for the aspiring AFNI Jedi Master.' + '* You can use the semi-secret \'-pmBASE\' option to get the V(x)' + 'warp and the source dataset warped to base space, in addition to' + 'the Wp(x) \'_PLUS\' and Wm(x) \'_MINUS\' warps.' + '-->>* Alas: -plusminus does not work with -duplo or -allineate :-(' + '* However, you can use -iniwarp with -plusminus :-)' + '-->>* The outputs have _PLUS (from the source dataset) and _MINUS' + '(from the base dataset) in their filenames, in addition to' + 'the prefix. The -iwarp option, if present, will be ignored.', + argstr='-plusminus', + xor=['duplo', 'allsave', 'iwarp']) + nopad = traits.Bool( + desc='Do NOT use zero-padding on the 3D base and source images.' + '[Default == zero-pad, if needed]' + '* The underlying model for deformations goes to zero at the' + 'edge of the volume being warped. However, if there is' + 'significant data near an edge of the volume, then it won\'t' + 'get displaced much, and so the results might not be good.' + '* Zero padding is designed as a way to work around this potential' + 'problem. You should NOT need the \'-nopad\' option for any' + 'reason that Zhark can think of, but it is here to be symmetrical' + 'with 3dAllineate.' + '* Note that the output (warped from source) dataset will be on the' + 'base dataset grid whether or not zero-padding is allowed. However,' + 'unless you use the following option, allowing zero-padding (i.e.,' + 'the default operation) will make the output WARP dataset(s) be' + 'on a larger grid (also see \'-expad\' below).', + argstr='-nopad') + nopadWARP = traits.Bool( + desc='If for some reason you require the warp volume to' + 'match the base volume, then use this option to have the output' + 'WARP dataset(s) truncated.', + argstr='-nopadWARP', + xor=['allsave', 'expad']) + expad = traits.Int( + desc='This option instructs the program to pad the warp by an extra' + '\'EE\' voxels (and then 3dQwarp starts optimizing it).' + '* This option is seldom needed, but can be useful if you' + 'might later catenate the nonlinear warp -- via 3dNwarpCat --' + 'with an affine transformation that contains a large shift.' + 'Under that circumstance, the nonlinear warp might be shifted' + 'partially outside its original grid, so expanding that grid' + 'can avoid this problem.' + '* Note that this option perforce turns off \'-nopadWARP\'.', + argstr='-expad %d', + xor=['nopadWARP']) + ballopt = traits.Bool( + desc='Normally, the incremental warp parameters are optimized inside' + 'a rectangular \'box\' (24 dimensional for cubic patches, 81 for' + 'quintic patches), whose limits define the amount of distortion' + 'allowed at each step. Using \'-ballopt\' switches these limits' + 'to be applied to a \'ball\' (interior of a hypersphere), which' + 'can allow for larger incremental displacements. Use this' + 'option if you think things need to be able to move farther.', + argstr='-ballopt', + xor=['workhard', 'boxopt']) + baxopt = traits.Bool( + desc='Use the \'box\' optimization limits instead of the \'ball\'' + '[this is the default at present].' + '* Note that if \'-workhard\' is used, then ball and box optimization' + 'are alternated in the different iterations at each level, so' + 'these two options have no effect in that case.', + argstr='-boxopt', + xor=['workhard', 'ballopt']) + verb = traits.Bool( + desc='more detailed description of the process', + argstr='-verb', + xor=['quiet']) + quiet = traits.Bool( + desc='Cut out most of the fun fun fun progress messages :-(', + argstr='-quiet', + xor=['verb']) + # Hidden and semi-hidden options + overwrite = traits.Bool( + desc='Overwrite outputs', + argstr='-overwrite') + lpc = traits.Bool( + desc='Local Pearson minimization (i.e., EPI-T1 registration)' + 'This option has not be extensively tested' + 'If you use \'-lpc\', then \'-maxlev 0\' is automatically set.' + 'If you want to go to more refined levels, you can set \'-maxlev\'' + 'This should be set up to have lpc as the second to last argument' + 'and maxlev as the second to last argument, as needed by AFNI' + 'Using maxlev > 1 is not recommended for EPI-T1 alignment.', + argstr='-lpc', + xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], + position=-2) + lpa = traits.Bool( + desc='Local Pearson maximization' + 'This option has not be extensively tested', + argstr='-lpa', + xor=['nmi', 'mi', 'lpc', 'hel', 'pear']) + hel = traits.Bool( + desc='Hellinger distance: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-hel', + xor=['nmi', 'mi', 'lpc', 'lpa', 'pear']) + mi = traits.Bool( + desc='Mutual Information: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-mi', + xor=['mi', 'hel', 'lpc', 'lpa', 'pear']) + nmi = traits.Bool( + desc='Normalized Mutual Information: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-nmi', + xor=['nmi', 'hel', 'lpc', 'lpa', 'pear']) + + + +class QwarpOutputSpec(TraitedSpec): + warped_source = File( + desc='Warped source file. If plusminus is used, this is the undistorted' + 'source file.') + warped_base = File(desc='Undistorted base file.') + source_warp = File( + desc="Displacement in mm for the source image." + "If plusminus is used this is the field suceptibility correction" + "warp (in 'mm') for source image.") + base_warp = File( + desc="Displacement in mm for the base image." + "If plus minus is used, this is the field suceptibility correction" + "warp (in 'mm') for base image. This is only output if plusminus" + "or iwarp options are passed") + weights = File( + desc="Auto-computed weight volume.") + + +class Qwarp(AFNICommand): + """A version of 3dQwarp + Allineate your images prior to passing them to this workflow. + + For complete details, see the `3dQwarp Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' + >>> qwarp.inputs.nopadWARP = True + >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' + >>> qwarp.inputs.plusminus = True + >>> qwarp.cmdline + '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix sub-01_dir-LR_epi_QW -plusminus' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.resample = True + >>> qwarp.cmdline + '3dQwarp -base mni.nii -source structural.nii -prefix structural_QW -resample' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'epi.nii' + >>> qwarp.inputs.out_file = 'anatSSQ.nii.gz' + >>> qwarp.inputs.resample = True + >>> qwarp.inputs.lpc = True + >>> qwarp.inputs.verb = True + >>> qwarp.inputs.iwarp = True + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.cmdline + '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz -resample -verb -lpc' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.duplo = True + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.cmdline + '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix structural_QW' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.duplo = True + >>> qwarp.inputs.minpatch = 25 + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.inputs.out_file = 'Q25' + >>> qwarp.cmdline + '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -minpatch 25 -prefix Q25' + >>> res = qwarp.run() # doctest: +SKIP + >>> qwarp2 = afni.Qwarp() + >>> qwarp2.inputs.in_file = 'structural.nii' + >>> qwarp2.inputs.base_file = 'mni.nii' + >>> qwarp2.inputs.blur = [0,2] + >>> qwarp2.inputs.out_file = 'Q11' + >>> qwarp2.inputs.inilev = 7 + >>> qwarp2.inputs.iniwarp = ['Q25_warp+tlrc.HEAD'] + >>> qwarp2.cmdline + '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_warp+tlrc.HEAD -prefix Q11' + >>> res2 = qwarp2.run() # doctest: +SKIP + >>> res2 = qwarp2.run() # doctest: +SKIP + >>> qwarp3 = afni.Qwarp() + >>> qwarp3.inputs.in_file = 'structural.nii' + >>> qwarp3.inputs.base_file = 'mni.nii' + >>> qwarp3.inputs.allineate = True + >>> qwarp3.inputs.allineate_opts = '-cose lpa -verb' + >>> qwarp3.cmdline + "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii -prefix structural_QW" + >>> res3 = qwarp3.run() # doctest: +SKIP """ + _cmd = '3dQwarp' + input_spec = QwarpInputSpec + output_spec = QwarpOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'allineate_opts': + return spec.argstr % ("'" + value + "'") + return super(Qwarp, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + + if not isdefined(self.inputs.out_file): + prefix = self._gen_fname(self.inputs.in_file, suffix='_QW') + ext = '.HEAD' + suffix ='+tlrc' + else: + prefix = self.inputs.out_file + ext_ind = max([prefix.lower().rfind('.nii.gz'), + prefix.lower().rfind('.nii.')]) + if ext_ind == -1: + ext = '.HEAD' + suffix = '+tlrc' + else: + ext = prefix[ext_ind:] + suffix = '' + outputs['warped_source'] = fname_presuffix(prefix, suffix=suffix, + use_ext=False) + ext + if not self.inputs.nowarp: + outputs['source_warp'] = fname_presuffix(prefix, + suffix='_WARP' + suffix, use_ext=False) + ext + if self.inputs.iwarp: + outputs['base_warp'] = fname_presuffix(prefix, + suffix='_WARPINV' + suffix, use_ext=False) + ext + if isdefined(self.inputs.out_weight_file): + outputs['weights'] = os.path.abspath(self.inputs.out_weight_file) + + if self.inputs.plusminus: + outputs['warped_source'] = fname_presuffix(prefix, + suffix='_PLUS' + suffix, use_ext=False) + ext + outputs['warped_base'] = fname_presuffix(prefix, + suffix='_MINUS' + suffix, use_ext=False) + ext + outputs['source_warp'] = fname_presuffix(prefix, + suffix='_PLUS_WARP' + suffix, use_ext=False) + ext + outputs['base_warp'] = fname_presuffix(prefix, + suffix='_MINUS_WARP' + suffix, use_ext=False) + ext + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.source_file, suffix='_QW') diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py new file mode 100644 index 0000000000..9baf98d246 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ABoverlap + + +def test_ABoverlap_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file_a=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-3, + ), + in_file_b=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + no_automask=dict(argstr='-no_automask', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr=' |& tee %s', + position=-1, + ), + outputtype=dict(), + quiet=dict(argstr='-quiet', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + ), + ) + inputs = ABoverlap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ABoverlap_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = ABoverlap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py index aef42ee585..0f670fa2f4 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -9,7 +9,11 @@ def test_AFNICommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + num_threads=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='-prefix %s', @@ -17,7 +21,8 @@ def test_AFNICommand_inputs(): name_template='%s_afni', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AFNICommand.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py index 37efbcee2d..8ab19a670a 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -9,10 +9,12 @@ def test_AFNICommandBase_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AFNICommandBase.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py new file mode 100644 index 0000000000..664237b4d7 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import AFNIPythonCommand + + +def test_AFNIPythonCommand_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = AFNIPythonCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py index 5fe66e9df7..e01cc9dd28 100644 --- a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -11,7 +11,8 @@ def test_AFNItoNIFTI_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -22,6 +23,9 @@ def test_AFNItoNIFTI_inputs(): newid=dict(argstr='-newid', xor=['oldid'], ), + num_threads=dict(nohash=True, + usedefault=True, + ), oldid=dict(argstr='-oldid', xor=['newid'], ), @@ -33,7 +37,8 @@ def test_AFNItoNIFTI_inputs(): outputtype=dict(), pure=dict(argstr='-pure', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AFNItoNIFTI.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py new file mode 100644 index 0000000000..adf8750b1c --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import AlignEpiAnatPy + + +def test_AlignEpiAnatPy_inputs(): + input_map = dict(anat=dict(argstr='-anat %s', + copyfile=False, + mandatory=True, + ), + anat2epi=dict(argstr='-anat2epi', + ), + args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + epi2anat=dict(argstr='-epi2anat', + ), + epi_base=dict(argstr='-epi_base %s', + mandatory=True, + ), + epi_strip=dict(argstr='-epi_strip %s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-epi %s', + copyfile=False, + mandatory=True, + ), + outputtype=dict(), + py27_path=dict(usedefault=True, + ), + save_skullstrip=dict(argstr='-save_skullstrip', + ), + suffix=dict(argstr='-suffix %s', + usedefault=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + tshift=dict(argstr='-tshift %s', + usedefault=True, + ), + volreg=dict(argstr='-volreg %s', + usedefault=True, + ), + ) + inputs = AlignEpiAnatPy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_AlignEpiAnatPy_outputs(): + output_map = dict(anat_al_mat=dict(), + anat_al_orig=dict(), + epi_al_mat=dict(), + epi_al_orig=dict(), + epi_al_tlrc_mat=dict(), + epi_reg_al_mat=dict(), + epi_tlrc_al=dict(), + epi_vr_al_mat=dict(), + epi_vr_motion=dict(), + skullstrip=dict(), + ) + outputs = AlignEpiAnatPy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py index 0bf37ea8cd..73ecc66414 100644 --- a/nipype/interfaces/afni/tests/test_auto_Allineate.py +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -4,7 +4,11 @@ def test_Allineate_inputs(): - input_map = dict(args=dict(argstr='%s', + input_map = dict(allcostx=dict(argstr='-allcostx |& tee %s', + position=-1, + xor=['out_file', 'out_matrix', 'out_param_file', 'out_weight_file'], + ), + args=dict(argstr='%s', ), autobox=dict(argstr='-autobox', ), @@ -29,23 +33,33 @@ def test_Allineate_inputs(): ), fine_blur=dict(argstr='-fineblur %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-source %s', copyfile=False, mandatory=True, - position=-1, ), in_matrix=dict(argstr='-1Dmatrix_apply %s', position=-3, + xor=['out_matrix'], ), in_param_file=dict(argstr='-1Dparam_apply %s', + xor=['out_param_file'], ), interpolation=dict(argstr='-interp %s', ), master=dict(argstr='-master %s', ), + maxrot=dict(argstr='-maxrot %f', + ), + maxscl=dict(argstr='-maxscl %f', + ), + maxshf=dict(argstr='-maxshf %f', + ), + maxshr=dict(argstr='-maxshr %f', + ), newgrid=dict(argstr='-newgrid %f', ), nmatch=dict(argstr='-nmatch %d', @@ -54,6 +68,9 @@ def test_Allineate_inputs(): ), nomask=dict(argstr='-nomask', ), + num_threads=dict(nohash=True, + usedefault=True, + ), nwarp=dict(argstr='-nwarp %s', ), nwarp_fixdep=dict(argstr='-nwarp_fixdep%s', @@ -64,16 +81,22 @@ def test_Allineate_inputs(): ), out_file=dict(argstr='-prefix %s', genfile=True, - name_source='%s_allineate', - position=-2, + xor=['allcostx'], ), out_matrix=dict(argstr='-1Dmatrix_save %s', + xor=['in_matrix', 'allcostx'], ), out_param_file=dict(argstr='-1Dparam_save %s', + xor=['in_param_file', 'allcostx'], ), out_weight_file=dict(argstr='-wtprefix %s', + xor=['allcostx'], ), outputtype=dict(), + overwrite=dict(argstr='-overwrite', + ), + quiet=dict(argstr='-quiet', + ), reference=dict(argstr='-base %s', ), replacebase=dict(argstr='-replacebase', @@ -84,11 +107,12 @@ def test_Allineate_inputs(): ), source_mask=dict(argstr='-source_mask %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), two_best=dict(argstr='-twobest %d', ), - two_blur=dict(argstr='-twoblur', + two_blur=dict(argstr='-twoblur %f', ), two_first=dict(argstr='-twofirst', ), @@ -96,11 +120,17 @@ def test_Allineate_inputs(): ), usetemp=dict(argstr='-usetemp', ), + verbose=dict(argstr='-verb', + ), warp_type=dict(argstr='-warp %s', ), warpfreeze=dict(argstr='-warpfreeze', ), + weight=dict(argstr='-weight %s', + ), weight_file=dict(argstr='-weight %s', + deprecated='1.0.0', + new_name='weight', ), zclip=dict(argstr='-zclip', ), @@ -113,8 +143,11 @@ def test_Allineate_inputs(): def test_Allineate_outputs(): - output_map = dict(matrix=dict(), + output_map = dict(allcostx=dict(), out_file=dict(), + out_matrix=dict(), + out_param_file=dict(), + out_weight_file=dict(), ) outputs = Allineate.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py new file mode 100644 index 0000000000..cc87a813b3 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import AutoTLRC + + +def test_AutoTLRC_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + base=dict(argstr='-base %s', + mandatory=True, + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-input %s', + copyfile=False, + mandatory=True, + ), + no_ss=dict(argstr='-no_ss', + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = AutoTLRC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_AutoTLRC_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = AutoTLRC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py index f7a3d89278..dab38ec832 100644 --- a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -11,7 +11,8 @@ def test_AutoTcorrelate_inputs(): ), eta2=dict(argstr='-eta2', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,6 +28,9 @@ def test_AutoTcorrelate_inputs(): mask_source=dict(argstr='-mask_source %s', xor=['mask_only_targets'], ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_similarity_matrix.1D', @@ -34,7 +38,8 @@ def test_AutoTcorrelate_inputs(): outputtype=dict(), polort=dict(argstr='-polort %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AutoTcorrelate.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py index 91479c241d..968bcf5839 100644 --- a/nipype/interfaces/afni/tests/test_auto_Autobox.py +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -9,7 +9,8 @@ def test_Autobox_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -18,6 +19,9 @@ def test_Autobox_inputs(): ), no_clustering=dict(argstr='-noclust', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_autobox', @@ -25,7 +29,8 @@ def test_Autobox_inputs(): outputtype=dict(), padding=dict(argstr='-npad %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Autobox.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py index f0a76037c2..afc4c7dad5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Automask.py +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -19,7 +19,8 @@ def test_Automask_inputs(): ), erode=dict(argstr='-erode %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,12 +28,16 @@ def test_Automask_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_mask', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Automask.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py new file mode 100644 index 0000000000..2b8f1a76be --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Axialize + + +def test_Axialize_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + axial=dict(argstr='-axial', + xor=['coronal', 'sagittal'], + ), + coronal=dict(argstr='-coronal', + xor=['sagittal', 'axial'], + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + orientation=dict(argstr='-orient %s', + ), + out_file=dict(argstr='-prefix %s', + name_source='in_file', + name_template='%s_axialize', + ), + outputtype=dict(), + sagittal=dict(argstr='-sagittal', + xor=['coronal', 'axial'], + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + ), + ) + inputs = Axialize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Axialize_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Axialize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py index 5310eaa256..289c5abe4f 100644 --- a/nipype/interfaces/afni/tests/test_auto_Bandpass.py +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -19,7 +19,8 @@ def test_Bandpass_inputs(): mandatory=True, position=-3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -44,6 +45,9 @@ def test_Bandpass_inputs(): ), notrans=dict(argstr='-notrans', ), + num_threads=dict(nohash=True, + usedefault=True, + ), orthogonalize_dset=dict(argstr='-dsort %s', ), orthogonalize_file=dict(argstr='-ort %s', @@ -55,7 +59,8 @@ def test_Bandpass_inputs(): position=1, ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tr=dict(argstr='-dt %f', ), diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py index eb4a571079..1311e237cf 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -16,7 +16,8 @@ def test_BlurInMask_inputs(): fwhm=dict(argstr='-FWHM %f', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -28,6 +29,9 @@ def test_BlurInMask_inputs(): ), multimask=dict(argstr='-Mmask %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), options=dict(argstr='%s', position=2, ), @@ -39,7 +43,8 @@ def test_BlurInMask_inputs(): outputtype=dict(), preserve=dict(argstr='-preserve', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BlurInMask.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py index bf4d2a194c..5b5c2a34b9 100644 --- a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -17,7 +17,8 @@ def test_BlurToFWHM_inputs(): ), fwhmxy=dict(argstr='-FWHMxy %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -25,12 +26,16 @@ def test_BlurToFWHM_inputs(): ), mask=dict(argstr='-blurmaster %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BlurToFWHM.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py index fc095e5fa3..55abae4c6d 100644 --- a/nipype/interfaces/afni/tests/test_auto_BrickStat.py +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -9,7 +9,8 @@ def test_BrickStat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -19,10 +20,23 @@ def test_BrickStat_inputs(): mask=dict(argstr='-mask %s', position=2, ), + max=dict(argstr='-max', + ), + mean=dict(argstr='-mean', + ), min=dict(argstr='-min', position=1, ), - terminal_output=dict(nohash=True, + percentile=dict(argstr='-percentile %.3f %.3f %.3f', + ), + slow=dict(argstr='-slow', + ), + sum=dict(argstr='-sum', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + var=dict(argstr='-var', ), ) inputs = BrickStat.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py new file mode 100644 index 0000000000..63ebbc6d5c --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Bucket + + +def test_Bucket_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='%s', + mandatory=True, + position=-1, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_template='buck', + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = Bucket.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Bucket_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Bucket.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py index af790bd5d2..09fd99c753 100644 --- a/nipype/interfaces/afni/tests/test_auto_Calc.py +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -13,7 +13,8 @@ def test_Calc_inputs(): mandatory=True, position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file_a=dict(argstr='-a %s', @@ -26,6 +27,9 @@ def test_Calc_inputs(): in_file_c=dict(argstr='-c %s', position=2, ), + num_threads=dict(nohash=True, + usedefault=True, + ), other=dict(argstr='', ), out_file=dict(argstr='-prefix %s', @@ -33,12 +37,15 @@ def test_Calc_inputs(): name_template='%s_calc', ), outputtype=dict(), + overwrite=dict(argstr='-overwrite', + ), single_idx=dict(), start_idx=dict(requires=['stop_idx'], ), stop_idx=dict(requires=['start_idx'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Calc.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py new file mode 100644 index 0000000000..857c9ade60 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Cat + + +def test_Cat_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s', + mandatory=True, + position=-2, + ), + keepfree=dict(argstr='-nonfixed', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + omitconst=dict(argstr='-nonconst', + ), + out_cint=dict(xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_int'], + ), + out_double=dict(argstr='-d', + xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], + ), + out_file=dict(argstr='> %s', + mandatory=True, + position=-1, + ), + out_fint=dict(argstr='-f', + xor=['out_format', 'out_nice', 'out_double', 'out_int', 'out_cint'], + ), + out_format=dict(argstr='-form %s', + xor=['out_int', 'out_nice', 'out_double', 'out_fint', 'out_cint'], + ), + out_int=dict(argstr='-i', + xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_cint'], + ), + out_nice=dict(argstr='-n', + xor=['out_format', 'out_int', 'out_double', 'out_fint', 'out_cint'], + ), + outputtype=dict(), + sel=dict(argstr='-sel %s', + ), + stack=dict(argstr='-stack', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = Cat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Cat_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Cat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py new file mode 100644 index 0000000000..e94fa12df3 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CatMatvec + + +def test_CatMatvec_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + fourxfour=dict(argstr='-4x4', + descr='Output matrix in augmented form (last row is 0 0 0 1)This option does not work with -MATRIX or -ONELINE', + xor=['matrix', 'oneline'], + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='%s', + descr='list of tuples of mfiles and associated opkeys', + mandatory=True, + position=-2, + ), + matrix=dict(argstr='-MATRIX', + descr="indicates that the resulting matrix willbe written to outfile in the 'MATRIX(...)' format (FORM 3).This feature could be used, with clever scripting, to inputa matrix directly on the command line to program 3dWarp.", + xor=['oneline', 'fourxfour'], + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + oneline=dict(argstr='-ONELINE', + descr='indicates that the resulting matrixwill simply be written as 12 numbers on one line.', + xor=['matrix', 'fourxfour'], + ), + out_file=dict(argstr=' > %s', + descr='File to write concattenated matvecs to', + mandatory=True, + position=-1, + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = CatMatvec.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_CatMatvec_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = CatMatvec.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py new file mode 100644 index 0000000000..34d6d5ec2f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CenterMass + + +def test_CenterMass_inputs(): + input_map = dict(all_rois=dict(argstr='-all_rois', + ), + args=dict(argstr='%s', + ), + automask=dict(argstr='-automask', + ), + cm_file=dict(argstr='> %s', + descr='File to write center of mass to', + hash_files=False, + keep_extension=False, + name_source='in_file', + name_template='%s_cm.out', + position=-1, + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + local_ijk=dict(argstr='-local_ijk', + ), + mask_file=dict(argstr='-mask %s', + ), + roi_vals=dict(argstr='-roi_vals %s', + ), + set_cm=dict(argstr='-set %f %f %f', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = CenterMass.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_CenterMass_outputs(): + output_map = dict(cm=dict(), + cm_file=dict(), + out_file=dict(), + ) + outputs = CenterMass.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py index 8bd4cd346a..815d8530ce 100644 --- a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -17,7 +17,8 @@ def test_ClipLevel_inputs(): position=3, xor='doall', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_ClipLevel_inputs(): mfrac=dict(argstr='-mfrac %s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ClipLevel.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py index 80338ccc57..43fa537eb4 100644 --- a/nipype/interfaces/afni/tests/test_auto_Copy.py +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -9,7 +9,8 @@ def test_Copy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -17,13 +18,19 @@ def test_Copy_inputs(): mandatory=True, position=-2, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='%s', name_source='in_file', name_template='%s_copy', position=-1, ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verbose=dict(argstr='-verb', ), ) inputs = Copy.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py new file mode 100644 index 0000000000..972b9a0128 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -0,0 +1,142 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Deconvolve + + +def test_Deconvolve_inputs(): + input_map = dict(STATmask=dict(argstr='-STATmask %s', + ), + TR_1D=dict(argstr='-TR_1D %f', + ), + allzero_OK=dict(argstr='-allzero_OK', + ), + args=dict(argstr='%s', + ), + automask=dict(argstr='-automask', + ), + cbucket=dict(argstr='-cbucket %s', + ), + censor=dict(argstr='-censor %s', + ), + dmbase=dict(argstr='-dmbase', + ), + dname=dict(argstr='-D%s=%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + force_TR=dict(argstr='-force_TR %d', + ), + fout=dict(argstr='-fout', + ), + global_times=dict(argstr='-global_times', + xor=['local_times'], + ), + glt_label=dict(argstr='-glt_label %d %s...', + position=-1, + requires=['gltsym'], + ), + gltsym=dict(argstr="-gltsym 'SYM: %s'...", + position=-2, + ), + goforit=dict(argstr='-GOFORIT %i', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='-input %s', + copyfile=False, + position=0, + sep=' ', + ), + input1D=dict(argstr='-input1D %s', + ), + jobs=dict(argstr='-jobs %d', + ), + legendre=dict(argstr='-legendre', + ), + local_times=dict(argstr='-local_times', + xor=['global_times'], + ), + mask=dict(argstr='-mask %s', + ), + noblock=dict(argstr='-noblock', + ), + nocond=dict(argstr='-nocond', + ), + nodmbase=dict(argstr='-nodmbase', + ), + nolegendre=dict(argstr='-nolegendre', + ), + nosvd=dict(argstr='-nosvd', + ), + num_glt=dict(argstr='-num_glt %d', + position=-3, + ), + num_stimts=dict(argstr='-num_stimts %d', + position=-6, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + ortvec=dict(argstr='ortvec %s', + ), + out_file=dict(argstr='-bucket %s', + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', + ), + rmsmin=dict(argstr='-rmsmin %f', + ), + rout=dict(argstr='-rout', + ), + sat=dict(argstr='-sat', + xor=['trans'], + ), + singvals=dict(argstr='-singvals', + ), + stim_label=dict(argstr='-stim_label %d %s...', + position=-4, + requires=['stim_times'], + ), + stim_times=dict(argstr="-stim_times %d %s '%s'...", + position=-5, + ), + stim_times_subtract=dict(argstr='-stim_times_subtract %f', + ), + svd=dict(argstr='-svd', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + tout=dict(argstr='-tout', + ), + trans=dict(argstr='-trans', + xor=['sat'], + ), + vout=dict(argstr='-vout', + ), + x1D=dict(argstr='-x1D %s', + ), + x1D_stop=dict(argstr='-x1D_stop', + ), + ) + inputs = Deconvolve.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Deconvolve_outputs(): + output_map = dict(cbucket=dict(), + out_file=dict(), + reml_script=dict(), + x1D=dict(), + ) + outputs = Deconvolve.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py index cd4146a7b9..a9b41f29ad 100644 --- a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -13,7 +13,8 @@ def test_DegreeCentrality_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -23,6 +24,9 @@ def test_DegreeCentrality_inputs(): ), mask=dict(argstr='-mask %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), oned_file=dict(argstr='-out1D %s', ), out_file=dict(argstr='-prefix %s', @@ -34,7 +38,8 @@ def test_DegreeCentrality_inputs(): ), sparsity=dict(argstr='-sparsity %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresh=dict(argstr='-thresh %f', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py index aedb50b684..21891fb546 100644 --- a/nipype/interfaces/afni/tests/test_auto_Despike.py +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -9,7 +9,8 @@ def test_Despike_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -17,12 +18,16 @@ def test_Despike_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_despike', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Despike.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py index 3fb771cbfc..16a1d1fd77 100644 --- a/nipype/interfaces/afni/tests/test_auto_Detrend.py +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -9,7 +9,8 @@ def test_Detrend_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -17,12 +18,16 @@ def test_Detrend_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_detrend', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Detrend.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py new file mode 100644 index 0000000000..9042c7a8f7 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Dot + + +def test_Dot_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + demean=dict(argstr='-demean', + ), + docoef=dict(argstr='-docoef', + ), + docor=dict(argstr='-docor', + ), + dodice=dict(argstr='-dodice', + ), + dodot=dict(argstr='-dodot', + ), + doeta2=dict(argstr='-doeta2', + ), + dosums=dict(argstr='-dosums', + ), + environ=dict(nohash=True, + usedefault=True, + ), + full=dict(argstr='-full', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s ...', + position=-2, + ), + mask=dict(argstr='-mask %s', + ), + mrange=dict(argstr='-mrange %s %s', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr=' |& tee %s', + position=-1, + ), + outputtype=dict(), + show_labels=dict(argstr='-show_labels', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + upper=dict(argstr='-upper', + ), + ) + inputs = Dot.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Dot_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Dot.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py index 39bdefe0ba..4c923ea494 100644 --- a/nipype/interfaces/afni/tests/test_auto_ECM.py +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -19,7 +19,8 @@ def test_ECM_inputs(): ), full=dict(argstr='-full', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -33,6 +34,9 @@ def test_ECM_inputs(): ), memory=dict(argstr='-memory %f', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', @@ -46,7 +50,8 @@ def test_ECM_inputs(): ), sparsity=dict(argstr='-sparsity %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresh=dict(argstr='-thresh %f', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py new file mode 100644 index 0000000000..7889b82551 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Edge3 + + +def test_Edge3_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + datum=dict(argstr='-datum %s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + fscale=dict(argstr='-fscale', + xor=['gscale', 'nscale', 'scale_floats'], + ), + gscale=dict(argstr='-gscale', + xor=['fscale', 'nscale', 'scale_floats'], + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-input %s', + copyfile=False, + mandatory=True, + position=0, + ), + nscale=dict(argstr='-nscale', + xor=['fscale', 'gscale', 'scale_floats'], + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + position=-1, + ), + outputtype=dict(), + scale_floats=dict(argstr='-scale_floats %f', + xor=['fscale', 'gscale', 'nscale'], + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verbose=dict(argstr='-verbose', + ), + ) + inputs = Edge3.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Edge3_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Edge3.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py index 490b09e486..d317f48627 100644 --- a/nipype/interfaces/afni/tests/test_auto_Eval.py +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -13,7 +13,8 @@ def test_Eval_inputs(): mandatory=True, position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file_a=dict(argstr='-a %s', @@ -26,6 +27,9 @@ def test_Eval_inputs(): in_file_c=dict(argstr='-c %s', position=2, ), + num_threads=dict(nohash=True, + usedefault=True, + ), other=dict(argstr='', ), out1D=dict(argstr='-1D', @@ -40,7 +44,8 @@ def test_Eval_inputs(): ), stop_idx=dict(requires=['start_idx'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Eval.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py index 527c7fdb22..669ea278e8 100644 --- a/nipype/interfaces/afni/tests/test_auto_FWHMx.py +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -32,7 +32,8 @@ def test_FWHMx_inputs(): geom=dict(argstr='-geom', xor=['arith'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -56,7 +57,8 @@ def test_FWHMx_inputs(): name_source='in_file', name_template='%s_subbricks.out', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unif=dict(argstr='-unif', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py index e80adb6801..e9a60c1d3e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fim.py +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -16,7 +16,8 @@ def test_Fim_inputs(): mandatory=True, position=2, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -24,6 +25,9 @@ def test_Fim_inputs(): mandatory=True, position=1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out=dict(argstr='-out %s', position=4, ), @@ -32,7 +36,8 @@ def test_Fim_inputs(): name_template='%s_fim', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Fim.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py index 0573252de4..02cb81bccc 100644 --- a/nipype/interfaces/afni/tests/test_auto_Fourier.py +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -12,7 +12,8 @@ def test_Fourier_inputs(): highpass=dict(argstr='-highpass %f', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -23,6 +24,9 @@ def test_Fourier_inputs(): lowpass=dict(argstr='-lowpass %f', mandatory=True, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_fourier', @@ -30,7 +34,8 @@ def test_Fourier_inputs(): outputtype=dict(), retrend=dict(argstr='-retrend', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Fourier.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py index 5cc9bf390c..b4f2679556 100644 --- a/nipype/interfaces/afni/tests/test_auto_GCOR.py +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -9,7 +9,8 @@ def test_GCOR_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -24,7 +25,8 @@ def test_GCOR_inputs(): ), no_demean=dict(argstr='-no_demean', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GCOR.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py index 91f4238834..64952d334e 100644 --- a/nipype/interfaces/afni/tests/test_auto_Hist.py +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -11,7 +11,8 @@ def test_Hist_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -41,7 +42,8 @@ def test_Hist_inputs(): showhist=dict(argstr='-showhist', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Hist.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py index c3690b8fd5..7cacf5f728 100644 --- a/nipype/interfaces/afni/tests/test_auto_LFCD.py +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -13,7 +13,8 @@ def test_LFCD_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -23,6 +24,9 @@ def test_LFCD_inputs(): ), mask=dict(argstr='-mask %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', @@ -30,7 +34,8 @@ def test_LFCD_inputs(): outputtype=dict(), polort=dict(argstr='-polort %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresh=dict(argstr='-thresh %f', ), diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py index 0121d68d7d..eeaef55562 100644 --- a/nipype/interfaces/afni/tests/test_auto_MaskTool.py +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -25,7 +25,8 @@ def test_MaskTool_inputs(): ), frac=dict(argstr='-frac %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -35,15 +36,21 @@ def test_MaskTool_inputs(): ), inter=dict(argstr='-inter', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_mask', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), union=dict(argstr='-union', ), + verbose=dict(argstr='-verb %s', + ), ) inputs = MaskTool.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py index 9c58ea432b..ef7ef9f983 100644 --- a/nipype/interfaces/afni/tests/test_auto_Maskave.py +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -9,7 +9,8 @@ def test_Maskave_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -20,6 +21,9 @@ def test_Maskave_inputs(): mask=dict(argstr='-mask %s', position=1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='> %s', keep_extension=True, name_source='in_file', @@ -30,7 +34,8 @@ def test_Maskave_inputs(): quiet=dict(argstr='-quiet', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Maskave.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py index 5bbbde8c94..b1ab57c9b9 100644 --- a/nipype/interfaces/afni/tests/test_auto_Means.py +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -8,18 +8,21 @@ def test_Means_inputs(): ), count=dict(argstr='-count', ), + datum=dict(argstr='-datum %s', + ), environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file_a=dict(argstr='%s', mandatory=True, - position=0, + position=-2, ), in_file_b=dict(argstr='%s', - position=1, + position=-1, ), mask_inter=dict(argstr='-mask_inter', ), @@ -27,6 +30,9 @@ def test_Means_inputs(): ), non_zero=dict(argstr='-non_zero', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file_a', name_template='%s_mean', @@ -40,7 +46,8 @@ def test_Means_inputs(): ), summ=dict(argstr='-sum', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Means.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py index f943128da9..efb4a8f301 100644 --- a/nipype/interfaces/afni/tests/test_auto_Merge.py +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -14,7 +14,8 @@ def test_Merge_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -22,12 +23,16 @@ def test_Merge_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_merge', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Merge.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py index ca08111696..1724c1c7a5 100644 --- a/nipype/interfaces/afni/tests/test_auto_Notes.py +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -16,7 +16,8 @@ def test_Notes_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -24,6 +25,9 @@ def test_Notes_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='%s', ), outputtype=dict(), @@ -32,7 +36,8 @@ def test_Notes_inputs(): ), ses=dict(argstr='-ses', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Notes.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py new file mode 100644 index 0000000000..1df8eb30ea --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import NwarpApply + + +def test_NwarpApply_inputs(): + input_map = dict(ainterp=dict(argstr='-ainterp %s', + ), + args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-source %s', + mandatory=True, + ), + interp=dict(argstr='-interp %s', + ), + inv_warp=dict(argstr='-iwarp', + ), + master=dict(argstr='-master %s', + ), + out_file=dict(argstr='-prefix %s', + name_source='in_file', + name_template='%s_Nwarp', + ), + quiet=dict(argstr='-quiet', + xor=['verb'], + ), + short=dict(argstr='-short', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + xor=['quiet'], + ), + warp=dict(argstr='-nwarp %s', + mandatory=True, + ), + ) + inputs = NwarpApply.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_NwarpApply_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = NwarpApply.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py new file mode 100644 index 0000000000..fcfae528cf --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import NwarpCat + + +def test_NwarpCat_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + expad=dict(argstr='-expad %d', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s', + descr='list of tuples of 3D warps and associated functions', + mandatory=True, + position=-1, + ), + interp=dict(argstr='-interp %s', + ), + inv_warp=dict(argstr='-iwarp', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_source='in_files', + name_template='%s_NwarpCat', + ), + outputtype=dict(), + space=dict(argstr='-space %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + ), + ) + inputs = NwarpCat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_NwarpCat_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = NwarpCat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py new file mode 100644 index 0000000000..ae62f3924a --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -0,0 +1,66 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import OneDToolPy + + +def test_OneDToolPy_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + censor_motion=dict(argstr='-censor_motion %f %s', + ), + censor_prev_TR=dict(argstr='-censor_prev_TR', + ), + demean=dict(argstr='-demean', + ), + derivative=dict(argstr='-derivative', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-infile %s', + mandatory=True, + ), + out_file=dict(argstr='-write %s', + xor=['show_cormat_warnings'], + ), + outputtype=dict(), + py27_path=dict(usedefault=True, + ), + set_nruns=dict(argstr='-set_nruns %d', + ), + show_censor_count=dict(argstr='-show_censor_count', + ), + show_cormat_warnings=dict(argstr='-show_cormat_warnings |& tee %s', + position=-1, + usedefault=False, + xor=['out_file'], + ), + show_indices_interest=dict(argstr='-show_indices_interest', + ), + show_trs_run=dict(argstr='-show_trs_run %d', + ), + show_trs_uncensored=dict(argstr='-show_trs_uncensored %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = OneDToolPy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_OneDToolPy_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = OneDToolPy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py index 23f768f3dd..7d8b28ab65 100644 --- a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -8,11 +8,11 @@ def test_OutlierCount_inputs(): ), autoclip=dict(argstr='-autoclip', usedefault=True, - xor=['in_file'], + xor=['mask'], ), automask=dict(argstr='-automask', usedefault=True, - xor=['in_file'], + xor=['mask'], ), environ=dict(nohash=True, usedefault=True, @@ -20,7 +20,8 @@ def test_OutlierCount_inputs(): fraction=dict(argstr='-fraction', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,11 +37,9 @@ def test_OutlierCount_inputs(): mask=dict(argstr='-mask %s', xor=['autoclip', 'automask'], ), - out_file=dict(argstr='> %s', - keep_extension=False, + out_file=dict(keep_extension=False, name_source=['in_file'], name_template='%s_outliers', - position=-1, ), outliers_file=dict(argstr='-save %s', keep_extension=True, @@ -54,7 +53,8 @@ def test_OutlierCount_inputs(): ), save_outliers=dict(usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = OutlierCount.input_spec() @@ -65,12 +65,7 @@ def test_OutlierCount_inputs(): def test_OutlierCount_outputs(): - output_map = dict(out_file=dict(argstr='> %s', - keep_extension=False, - name_source=['in_file'], - name_template='%s_tqual', - position=-1, - ), + output_map = dict(out_file=dict(), out_outliers=dict(), ) outputs = OutlierCount.output_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py index 2659fc8d91..09a3855b92 100644 --- a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -19,7 +19,8 @@ def test_QualityIndex_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -44,7 +45,8 @@ def test_QualityIndex_inputs(): spearman=dict(argstr='-spearman', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = QualityIndex.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py new file mode 100644 index 0000000000..8a40687a10 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -0,0 +1,168 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Qwarp + + +def test_Qwarp_inputs(): + input_map = dict(Qfinal=dict(argstr='-Qfinal', + ), + Qonly=dict(argstr='-Qonly', + ), + allineate=dict(argstr='-allineate', + ), + allineate_opts=dict(argstr='-allineate_opts %s', + xand=['allineate'], + ), + allsave=dict(argstr='-allsave', + xor=['nopadWARP', 'duplo', 'plusminus'], + ), + args=dict(argstr='%s', + ), + ballopt=dict(argstr='-ballopt', + xor=['workhard', 'boxopt'], + ), + base_file=dict(argstr='-base %s', + copyfile=False, + mandatory=True, + ), + baxopt=dict(argstr='-boxopt', + xor=['workhard', 'ballopt'], + ), + blur=dict(argstr='-blur %s', + ), + duplo=dict(argstr='-duplo', + xor=['gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', 'allsave'], + ), + emask=dict(argstr='-emask %s', + copyfile=False, + ), + environ=dict(nohash=True, + usedefault=True, + ), + expad=dict(argstr='-expad %d', + xor=['nopadWARP'], + ), + gridlist=dict(argstr='-gridlist %s', + copyfile=False, + xor=['duplo', 'plusminus'], + ), + hel=dict(argstr='-hel', + xor=['nmi', 'mi', 'lpc', 'lpa', 'pear'], + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-source %s', + copyfile=False, + mandatory=True, + ), + inilev=dict(argstr='-inilev %d', + xor=['duplo'], + ), + iniwarp=dict(argstr='-iniwarp %s', + xor=['duplo'], + ), + iwarp=dict(argstr='-iwarp', + xor=['plusminus'], + ), + lpa=dict(argstr='-lpa', + xor=['nmi', 'mi', 'lpc', 'hel', 'pear'], + ), + lpc=dict(argstr='-lpc', + position=-2, + xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], + ), + maxlev=dict(argstr='-maxlev %d', + position=-1, + xor=['duplo'], + ), + mi=dict(argstr='-mi', + xor=['mi', 'hel', 'lpc', 'lpa', 'pear'], + ), + minpatch=dict(argstr='-minpatch %d', + ), + nmi=dict(argstr='-nmi', + xor=['nmi', 'hel', 'lpc', 'lpa', 'pear'], + ), + noXdis=dict(argstr='-noXdis', + ), + noYdis=dict(argstr='-noYdis', + ), + noZdis=dict(argstr='-noZdis', + ), + noneg=dict(argstr='-noneg', + ), + nopad=dict(argstr='-nopad', + ), + nopadWARP=dict(argstr='-nopadWARP', + xor=['allsave', 'expad'], + ), + nopenalty=dict(argstr='-nopenalty', + ), + nowarp=dict(argstr='-nowarp', + ), + noweight=dict(argstr='-noweight', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + genfile=True, + name_source=['in_file'], + name_template='%s_QW', + ), + out_weight_file=dict(argstr='-wtprefix %s', + ), + outputtype=dict(), + overwrite=dict(argstr='-overwrite', + ), + pblur=dict(argstr='-pblur %s', + ), + pear=dict(argstr='-pear', + ), + penfac=dict(argstr='-penfac %f', + ), + plusminus=dict(argstr='-plusminus', + xor=['duplo', 'allsave', 'iwarp'], + ), + quiet=dict(argstr='-quiet', + xor=['verb'], + ), + resample=dict(argstr='-resample', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + xor=['quiet'], + ), + wball=dict(argstr='-wball %s', + ), + weight=dict(argstr='-weight %s', + ), + wmask=dict(argstr='-wpass %s %f', + ), + workhard=dict(argstr='-workhard', + xor=['boxopt', 'ballopt'], + ), + ) + inputs = Qwarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Qwarp_outputs(): + output_map = dict(base_warp=dict(), + source_warp=dict(), + warped_base=dict(), + warped_source=dict(), + weights=dict(), + ) + outputs = Qwarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py new file mode 100644 index 0000000000..b0c4f00c74 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import QwarpPlusMinus + + +def test_QwarpPlusMinus_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + base_file=dict(argstr='-base %s', + copyfile=False, + mandatory=True, + ), + blur=dict(argstr='-blur %s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + minpatch=dict(argstr='-minpatch %d', + ), + nopadWARP=dict(argstr='-nopadWARP', + ), + noweight=dict(argstr='-noweight', + ), + pblur=dict(argstr='-pblur %s', + ), + source_file=dict(argstr='-source %s', + copyfile=False, + mandatory=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = QwarpPlusMinus.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_QwarpPlusMinus_outputs(): + output_map = dict(base_warp=dict(), + source_warp=dict(), + warped_base=dict(), + warped_source=dict(), + ) + outputs = QwarpPlusMinus.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py index 1e5de5806f..2b720fb0c6 100644 --- a/nipype/interfaces/afni/tests/test_auto_ROIStats.py +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -9,7 +9,8 @@ def test_ROIStats_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,9 +26,8 @@ def test_ROIStats_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(mandatory=True, + terminal_output=dict(deprecated='1.0.0', nohash=True, - usedefault=True, ), ) inputs = ROIStats.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py index a1416e8d96..81b72ea2cb 100644 --- a/nipype/interfaces/afni/tests/test_auto_Refit.py +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -6,12 +6,23 @@ def test_Refit_inputs(): input_map = dict(args=dict(argstr='%s', ), + atrcopy=dict(argstr='-atrcopy %s %s', + ), + atrfloat=dict(argstr='-atrfloat %s %s', + ), + atrint=dict(argstr='-atrint %s %s', + ), + atrstring=dict(argstr='-atrstring %s %s', + ), deoblique=dict(argstr='-deoblique', ), + duporigin_file=dict(argstr='-duporigin %s', + ), environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -19,14 +30,21 @@ def test_Refit_inputs(): mandatory=True, position=-1, ), + nosaveatr=dict(argstr='-nosaveatr', + ), + saveatr=dict(argstr='-saveatr', + ), space=dict(argstr='-space %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xdel=dict(argstr='-xdel %f', ), xorigin=dict(argstr='-xorigin %s', ), + xyzscale=dict(argstr='-xyzscale %f', + ), ydel=dict(argstr='-ydel %f', ), yorigin=dict(argstr='-yorigin %s', diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py new file mode 100644 index 0000000000..3511414f11 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -0,0 +1,132 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Remlfit + + +def test_Remlfit_inputs(): + input_map = dict(STATmask=dict(argstr='-STATmask %s', + ), + addbase=dict(argstr='-addbase %s', + copyfile=False, + sep=' ', + ), + args=dict(argstr='%s', + ), + automask=dict(argstr='-automask', + usedefault=True, + ), + dsort=dict(argstr='-dsort %s', + copyfile=False, + ), + dsort_nods=dict(argstr='-dsort_nods', + requires=['dsort'], + ), + environ=dict(nohash=True, + usedefault=True, + ), + errts_file=dict(argstr='-Rerrts %s', + ), + fitts_file=dict(argstr='-Rfitts %s', + ), + fout=dict(argstr='-fout', + ), + glt_file=dict(argstr='-Rglt %s', + ), + gltsym=dict(argstr='-gltsym "%s" %s...', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='-input "%s"', + copyfile=False, + mandatory=True, + sep=' ', + ), + mask=dict(argstr='-mask %s', + ), + matim=dict(argstr='-matim %s', + xor=['matrix'], + ), + matrix=dict(argstr='-matrix %s', + mandatory=True, + ), + nobout=dict(argstr='-nobout', + ), + nodmbase=dict(argstr='-nodmbase', + requires=['addbase', 'dsort'], + ), + nofdr=dict(argstr='-noFDR', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + obeta=dict(argstr='-Obeta %s', + ), + obuck=dict(argstr='-Obuck %s', + ), + oerrts=dict(argstr='-Oerrts %s', + ), + ofitts=dict(argstr='-Ofitts %s', + ), + oglt=dict(argstr='-Oglt %s', + ), + out_file=dict(argstr='-Rbuck %s', + ), + outputtype=dict(), + ovar=dict(argstr='-Ovar %s', + ), + polort=dict(argstr='-polort %d', + xor=['matrix'], + ), + quiet=dict(argstr='-quiet', + ), + rbeta_file=dict(argstr='-Rbeta %s', + ), + rout=dict(argstr='-rout', + ), + slibase=dict(argstr='-slibase %s', + ), + slibase_sm=dict(argstr='-slibase_sm %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + tout=dict(argstr='-tout', + ), + usetemp=dict(argstr='-usetemp', + ), + var_file=dict(argstr='-Rvar %s', + ), + verb=dict(argstr='-verb', + ), + wherr_file=dict(argstr='-Rwherr %s', + ), + ) + inputs = Remlfit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Remlfit_outputs(): + output_map = dict(errts_file=dict(), + fitts_file=dict(), + glt_file=dict(), + obeta=dict(), + obuck=dict(), + oerrts=dict(), + ofitts=dict(), + oglt=dict(), + out_file=dict(), + ovar=dict(), + rbeta_file=dict(), + var_file=dict(), + wherr_file=dict(), + ) + outputs = Remlfit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py index 4fabc2749c..21215f56f1 100644 --- a/nipype/interfaces/afni/tests/test_auto_Resample.py +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -9,7 +9,8 @@ def test_Resample_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inset %s', @@ -19,6 +20,9 @@ def test_Resample_inputs(): ), master=dict(argstr='-master %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), orientation=dict(argstr='-orient %s', ), out_file=dict(argstr='-prefix %s', @@ -28,7 +32,8 @@ def test_Resample_inputs(): outputtype=dict(), resample_mode=dict(argstr='-rmode %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_size=dict(argstr='-dxyz %f %f %f', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py index 6822425f00..57b23f6a18 100644 --- a/nipype/interfaces/afni/tests/test_auto_Retroicor.py +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -16,7 +16,8 @@ def test_Retroicor_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -24,6 +25,9 @@ def test_Retroicor_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), order=dict(argstr='-order %s', position=-5, ), @@ -40,7 +44,8 @@ def test_Retroicor_inputs(): hash_files=False, position=-7, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-threshold %d', position=-4, diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py index 496f947a28..dbda1b28c5 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTest.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -11,7 +11,8 @@ def test_SVMTest_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-testvol %s', @@ -26,13 +27,17 @@ def test_SVMTest_inputs(): ), nopredcensord=dict(argstr='-nopredcensord', ), + num_threads=dict(nohash=True, + usedefault=True, + ), options=dict(argstr='%s', ), out_file=dict(argstr='-predictions %s', name_template='%s_predictions', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), testlabels=dict(argstr='-testlabels %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py index 25973372e6..a0ba4dba42 100644 --- a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -16,7 +16,8 @@ def test_SVMTrain_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-trainvol %s', @@ -38,6 +39,9 @@ def test_SVMTrain_inputs(): ), nomodelmask=dict(argstr='-nomodelmask', ), + num_threads=dict(nohash=True, + usedefault=True, + ), options=dict(argstr='%s', ), out_file=dict(argstr='-bucket %s', @@ -46,7 +50,8 @@ def test_SVMTrain_inputs(): suffix='_bucket', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trainlabels=dict(argstr='-trainlabels %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py index e8114e5838..1069121f79 100644 --- a/nipype/interfaces/afni/tests/test_auto_Seg.py +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -19,7 +19,8 @@ def test_Seg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-anat %s', @@ -39,7 +40,8 @@ def test_Seg_inputs(): ), prefix=dict(argstr='-prefix %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Seg.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py index 37b24cfb76..97277301c3 100644 --- a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -9,7 +9,8 @@ def test_SkullStrip_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -17,12 +18,16 @@ def test_SkullStrip_inputs(): mandatory=True, position=1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_skullstrip', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SkullStrip.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py new file mode 100644 index 0000000000..bd95eca3a5 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Synthesize + + +def test_Synthesize_inputs(): + input_map = dict(TR=dict(argstr='-TR %f', + ), + args=dict(argstr='%s', + ), + cbucket=dict(argstr='-cbucket %s', + copyfile=False, + mandatory=True, + ), + cenfill=dict(argstr='-cenfill %s', + ), + dry_run=dict(argstr='-dry', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + matrix=dict(argstr='-matrix %s', + copyfile=False, + mandatory=True, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_template='syn', + ), + outputtype=dict(), + select=dict(argstr='-select %s', + mandatory=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = Synthesize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Synthesize_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Synthesize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py index 9c72dcd545..f74a122828 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCat.py +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -9,7 +9,8 @@ def test_TCat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr=' %s', @@ -17,6 +18,9 @@ def test_TCat_inputs(): mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_files', name_template='%s_tcat', @@ -25,7 +29,10 @@ def test_TCat_inputs(): rlt=dict(argstr='-rlt%s', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verbose=dict(argstr='-verb', ), ) inputs = TCat.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py new file mode 100644 index 0000000000..58fc2108e3 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TCatSubBrick + + +def test_TCatSubBrick_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s%s ...', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + genfile=True, + ), + outputtype=dict(), + rlt=dict(argstr='-rlt%s', + position=1, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = TCatSubBrick.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_TCatSubBrick_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = TCatSubBrick.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py index e42ac2b7d5..a57d8a6a10 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -9,13 +9,17 @@ def test_TCorr1D_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ktaub=dict(argstr=' -ktaub', position=1, xor=['pearson', 'spearman', 'quadrant'], ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', keep_extension=True, name_source='xset', @@ -34,7 +38,8 @@ def test_TCorr1D_inputs(): position=1, xor=['pearson', 'quadrant', 'ktaub'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xset=dict(argstr=' %s', copyfile=False, diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py index 8c80f15080..17f5e4ff19 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -42,7 +42,8 @@ def test_TCorrMap_inputs(): suffix='_hist', ), histogram_bin_numbers=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-input %s', @@ -55,6 +56,9 @@ def test_TCorrMap_inputs(): name_source='in_file', suffix='_mean', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source=['in_file'], name_template='%s_afni', @@ -83,7 +87,8 @@ def test_TCorrMap_inputs(): suffix='_sexpr', xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresholds=dict(), var_absolute_threshold=dict(argstr='-VarThresh %f %f %f %s', diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py index e2e100cdb7..8857a2affe 100644 --- a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -9,7 +9,11 @@ def test_TCorrelate_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + num_threads=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='-prefix %s', @@ -21,7 +25,8 @@ def test_TCorrelate_inputs(): ), polort=dict(argstr='-polort %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xset=dict(argstr='%s', copyfile=False, diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py new file mode 100644 index 0000000000..fbb11ec746 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TNorm + + +def test_TNorm_inputs(): + input_map = dict(L1fit=dict(argstr='-L1fit', + ), + args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + norm1=dict(argstr='-norm1', + ), + norm2=dict(argstr='-norm2', + ), + normR=dict(argstr='-normR', + ), + normx=dict(argstr='-normx', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_source='in_file', + name_template='%s_tnorm', + ), + outputtype=dict(), + polort=dict(argstr='-polort %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = TNorm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_TNorm_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = TNorm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py index e167205995..ee7663bbd6 100644 --- a/nipype/interfaces/afni/tests/test_auto_TShift.py +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -11,7 +11,8 @@ def test_TShift_inputs(): ), ignore=dict(argstr='-ignore %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -21,6 +22,9 @@ def test_TShift_inputs(): ), interp=dict(argstr='-%s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_tshift', @@ -30,7 +34,8 @@ def test_TShift_inputs(): ), rltplus=dict(argstr='-rlt+', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tpattern=dict(argstr='-tpattern %s', ), diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py index f09fb5b4af..2315d81512 100644 --- a/nipype/interfaces/afni/tests/test_auto_TStat.py +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -9,7 +9,8 @@ def test_TStat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -19,6 +20,9 @@ def test_TStat_inputs(): ), mask=dict(argstr='-mask %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), options=dict(argstr='%s', ), out_file=dict(argstr='-prefix %s', @@ -26,7 +30,8 @@ def test_TStat_inputs(): name_template='%s_tstat', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TStat.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py index 0df075d87f..5f39148167 100644 --- a/nipype/interfaces/afni/tests/test_auto_To3D.py +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -17,13 +17,17 @@ def test_To3D_inputs(): ), funcparams=dict(argstr='-time:zt %s alt+z2', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_folder=dict(argstr='%s/*.dcm', mandatory=True, position=-1, ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source=['in_folder'], name_template='%s', @@ -31,7 +35,8 @@ def test_To3D_inputs(): outputtype=dict(), skipoutliers=dict(argstr='-skip_outliers', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = To3D.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py new file mode 100644 index 0000000000..b5f1041b60 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Undump + + +def test_Undump_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + coordinates_specification=dict(argstr='-%s', + ), + datatype=dict(argstr='-datum %s', + ), + default_value=dict(argstr='-dval %f', + ), + environ=dict(nohash=True, + usedefault=True, + ), + fill_value=dict(argstr='-fval %f', + ), + head_only=dict(argstr='-head_only', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-master %s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask_file=dict(argstr='-mask %s', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + orient=dict(argstr='-orient %s', + ), + out_file=dict(argstr='-prefix %s', + name_source='in_file', + ), + outputtype=dict(), + srad=dict(argstr='-srad %f', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = Undump.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Undump_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Undump.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py new file mode 100644 index 0000000000..1d82e5aed5 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -0,0 +1,70 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Unifize + + +def test_Unifize_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + cl_frac=dict(argstr='-clfrac %f', + ), + environ=dict(nohash=True, + usedefault=True, + ), + epi=dict(argstr='-EPI', + requires=['no_duplo', 't2'], + xor=['gm'], + ), + gm=dict(argstr='-GM', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-input %s', + copyfile=False, + mandatory=True, + position=-1, + ), + no_duplo=dict(argstr='-noduplo', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_source='in_file', + name_template='%s_unifized', + ), + outputtype=dict(), + quiet=dict(argstr='-quiet', + ), + rbt=dict(argstr='-rbt %f %f %f', + ), + scale_file=dict(argstr='-ssave %s', + ), + t2=dict(argstr='-T2', + ), + t2_up=dict(argstr='-T2up %f', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + urad=dict(argstr='-Urad %s', + ), + ) + inputs = Unifize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Unifize_outputs(): + output_map = dict(out_file=dict(), + scale_file=dict(), + ) + outputs = Unifize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py index 915000e5b1..a8aa8a8832 100644 --- a/nipype/interfaces/afni/tests/test_auto_Volreg.py +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -14,7 +14,8 @@ def test_Volreg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -22,12 +23,17 @@ def test_Volreg_inputs(): mandatory=True, position=-1, ), + interp=dict(argstr='-%s', + ), md1d_file=dict(argstr='-maxdisp1D %s', keep_extension=True, name_source='in_file', name_template='%s_md.1D', position=-4, ), + num_threads=dict(nohash=True, + usedefault=True, + ), oned_file=dict(argstr='-1Dfile %s', keep_extension=True, name_source='in_file', @@ -43,7 +49,8 @@ def test_Volreg_inputs(): name_template='%s_volreg', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timeshift=dict(argstr='-tshift 0', ), diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py index e370d32058..ef2d23d460 100644 --- a/nipype/interfaces/afni/tests/test_auto_Warp.py +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -13,7 +13,8 @@ def test_Warp_inputs(): ), gridset=dict(argstr='-gridset %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -29,15 +30,23 @@ def test_Warp_inputs(): ), newgrid=dict(argstr='-newgrid %f', ), + num_threads=dict(nohash=True, + usedefault=True, + ), + oblique_parent=dict(argstr='-oblique_parent %s', + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_warp', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tta2mni=dict(argstr='-tta2mni', ), + verbose=dict(argstr='-verb', + ), zpad=dict(argstr='-zpad %d', ), ) diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py index 8019b1dcf8..70cb544bc5 100644 --- a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -9,7 +9,8 @@ def test_ZCutUp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -19,12 +20,16 @@ def test_ZCutUp_inputs(): ), keep=dict(argstr='-keep %s', ), + num_threads=dict(nohash=True, + usedefault=True, + ), out_file=dict(argstr='-prefix %s', name_source='in_file', name_template='%s_zcutup', ), outputtype=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ZCutUp.input_spec() diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py new file mode 100644 index 0000000000..6de330b5ae --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Zcat + + +def test_Zcat_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + datum=dict(argstr='-datum %s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + fscale=dict(argstr='-fscale', + xor=['nscale'], + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + nscale=dict(argstr='-nscale', + xor=['fscale'], + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_template='zcat', + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + verb=dict(argstr='-verb', + ), + ) + inputs = Zcat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Zcat_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Zcat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py new file mode 100644 index 0000000000..5a23c1a46d --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -0,0 +1,82 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Zeropad + + +def test_Zeropad_inputs(): + input_map = dict(A=dict(argstr='-A %i', + xor=['master'], + ), + AP=dict(argstr='-AP %i', + xor=['master'], + ), + I=dict(argstr='-I %i', + xor=['master'], + ), + IS=dict(argstr='-IS %i', + xor=['master'], + ), + L=dict(argstr='-L %i', + xor=['master'], + ), + P=dict(argstr='-P %i', + xor=['master'], + ), + R=dict(argstr='-R %i', + xor=['master'], + ), + RL=dict(argstr='-RL %i', + xor=['master'], + ), + S=dict(argstr='-S %i', + xor=['master'], + ), + args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + master=dict(argstr='-master %s', + xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], + ), + mm=dict(argstr='-mm', + xor=['master'], + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + out_file=dict(argstr='-prefix %s', + name_template='zeropad', + ), + outputtype=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + z=dict(argstr='-z %i', + xor=['master'], + ), + ) + inputs = Zeropad.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Zeropad_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = Zeropad.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 0e6455496c..8ddc340858 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -25,9 +25,64 @@ CommandLineInputSpec, CommandLine, Directory, TraitedSpec, traits, isdefined, File, InputMultiPath, Undefined, Str) from ...external.due import BibTeX - from .base import ( - AFNICommandBase, AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec) + AFNICommandBase, AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec, + AFNIPythonCommandInputSpec, AFNIPythonCommand) + +class ABoverlapInputSpec(AFNICommandInputSpec): + in_file_a = File( + desc='input file A', + argstr='%s', + position=-3, + mandatory=True, + exists=True, + copyfile=False) + in_file_b = File( + desc='input file B', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='collect output to a file', + argstr=' |& tee %s', + position=-1) + no_automask = traits.Bool( + desc='consider input datasets as masks', + argstr='-no_automask') + quiet = traits.Bool( + desc='be as quiet as possible (without being entirely mute)', + argstr='-quiet') + verb = traits.Bool( + desc='print out some progress reports (to stderr)', + argstr='-verb') + + +class ABoverlap(AFNICommand): + """Output (to screen) is a count of various things about how + the automasks of datasets A and B overlap or don't overlap. + + For complete details, see the `3dABoverlap Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> aboverlap = afni.ABoverlap() + >>> aboverlap.inputs.in_file_a = 'functional.nii' + >>> aboverlap.inputs.in_file_b = 'structural.nii' + >>> aboverlap.inputs.out_file = 'out.mask_ae_overlap.txt' + >>> aboverlap.cmdline + '3dABoverlap functional.nii structural.nii |& tee out.mask_ae_overlap.txt' + >>> res = aboverlap.run() # doctest: +SKIP + + """ + + _cmd = '3dABoverlap' + input_spec = ABoverlapInputSpec + output_spec = AFNICommandOutputSpec class AFNItoNIFTIInputSpec(AFNICommandInputSpec): @@ -84,7 +139,7 @@ class AFNItoNIFTI(AFNICommand): >>> a2n = afni.AFNItoNIFTI() >>> a2n.inputs.in_file = 'afni_output.3D' >>> a2n.inputs.out_file = 'afni_output.nii' - >>> a2n.cmdline # doctest: +ALLOW_UNICODE + >>> a2n.cmdline '3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D' >>> res = a2n.run() # doctest: +SKIP @@ -152,7 +207,7 @@ class Autobox(AFNICommand): >>> abox = afni.Autobox() >>> abox.inputs.in_file = 'structural.nii' >>> abox.inputs.padding = 5 - >>> abox.cmdline # doctest: +ALLOW_UNICODE + >>> abox.cmdline '3dAutobox -input structural.nii -prefix structural_autobox -npad 5' >>> res = abox.run() # doctest: +SKIP @@ -191,6 +246,26 @@ class BrickStatInputSpec(CommandLineInputSpec): desc='print the minimum value in dataset', argstr='-min', position=1) + slow = traits.Bool( + desc='read the whole dataset to find the min and max values', + argstr='-slow') + max = traits.Bool( + desc='print the maximum value in the dataset', + argstr='-max') + mean = traits.Bool( + desc='print the mean value in the dataset', + argstr='-mean') + sum = traits.Bool( + desc='print the sum of values in the dataset', + argstr='-sum') + var = traits.Bool( + desc='print the variance in the dataset', + argstr='-var') + percentile = traits.Tuple(traits.Float, traits.Float, traits.Float, + desc='p0 ps p1 write the percentile values starting ' + 'at p0% and ending at p1% at a step of ps%. ' + 'only one sub-brick is accepted.', + argstr='-percentile %.3f %.3f %.3f') class BrickStatOutputSpec(TraitedSpec): @@ -213,7 +288,7 @@ class BrickStat(AFNICommandBase): >>> brickstat.inputs.in_file = 'functional.nii' >>> brickstat.inputs.mask = 'skeleton_mask.nii.gz' >>> brickstat.inputs.min = True - >>> brickstat.cmdline # doctest: +ALLOW_UNICODE + >>> brickstat.cmdline '3dBrickStat -min -mask skeleton_mask.nii.gz functional.nii' >>> res = brickstat.run() # doctest: +SKIP @@ -250,6 +325,90 @@ def aggregate_outputs(self, runtime=None, needed_outputs=None): return outputs +class BucketInputSpec(AFNICommandInputSpec): + in_file = traits.List( + traits.Tuple( + (File( + exists=True, + copyfile=False), + traits.Str(argstr="'%s'")), + artstr="%s%s"), + position=-1, + mandatory=True, + argstr="%s", + desc='List of tuples of input datasets and subbrick selection strings' + 'as described in more detail in the following afni help string' + 'Input dataset specified using one of these forms:' + ' \'prefix+view\', \'prefix+view.HEAD\', or \'prefix+view.BRIK\'.' + 'You can also add a sub-brick selection list after the end of the' + 'dataset name. This allows only a subset of the sub-bricks to be' + 'included into the output (by default, all of the input dataset' + 'is copied into the output). A sub-brick selection list looks like' + 'one of the following forms:' + ' fred+orig[5] ==> use only sub-brick #5' + ' fred+orig[5,9,17] ==> use #5, #9, and #17' + ' fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8' + ' fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13' + 'Sub-brick indexes start at 0. You can use the character \'$\'' + 'to indicate the last sub-brick in a dataset; for example, you' + 'can select every third sub-brick by using the selection list' + ' fred+orig[0..$(3)]' + 'N.B.: The sub-bricks are output in the order specified, which may' + ' not be the order in the original datasets. For example, using' + ' fred+orig[0..$(2),1..$(2)]' + ' will cause the sub-bricks in fred+orig to be output into the' + ' new dataset in an interleaved fashion. Using' + ' fred+orig[$..0]' + ' will reverse the order of the sub-bricks in the output.' + 'N.B.: Bucket datasets have multiple sub-bricks, but do NOT have' + ' a time dimension. You can input sub-bricks from a 3D+time dataset' + ' into a bucket dataset. You can use the \'3dinfo\' program to see' + ' how many sub-bricks a 3D+time or a bucket dataset contains.' + 'N.B.: In non-bucket functional datasets (like the \'fico\' datasets' + ' output by FIM, or the \'fitt\' datasets output by 3dttest), sub-brick' + ' [0] is the \'intensity\' and sub-brick [1] is the statistical parameter' + ' used as a threshold. Thus, to create a bucket dataset using the' + ' intensity from dataset A and the threshold from dataset B, and' + ' calling the output dataset C, you would type' + ' 3dbucket -prefix C -fbuc \'A+orig[0]\' -fbuc \'B+orig[1]\'' + 'WARNING: using this program, it is possible to create a dataset that' + ' has different basic datum types for different sub-bricks' + ' (e.g., shorts for brick 0, floats for brick 1).' + ' Do NOT do this! Very few AFNI programs will work correctly' + ' with such datasets!') + out_file = File( + argstr='-prefix %s', + name_template='buck') + + +class Bucket(AFNICommand): + """Concatenate sub-bricks from input datasets into one big + 'bucket' dataset. + + For complete details, see the `3dbucket Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> bucket = afni.Bucket() + >>> bucket.inputs.in_file = [('functional.nii',"{2..$}"), ('functional.nii',"{1}")] + >>> bucket.inputs.out_file = 'vr_base' + >>> bucket.cmdline + "3dbucket -prefix vr_base functional.nii'{2..$}' functional.nii'{1}'" + >>> res = bucket.run() # doctest: +SKIP + + """ + + _cmd = '3dbucket' + input_spec = BucketInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_file': + return spec.argstr%(' '.join([i[0]+"'"+i[1]+"'" for i in value])) + return super(Bucket, self)._format_arg(name, spec, value) class CalcInputSpec(AFNICommandInputSpec): in_file_a = File( @@ -286,6 +445,9 @@ class CalcInputSpec(AFNICommandInputSpec): requires=['start_idx']) single_idx = traits.Int( desc='volume index for in_file_a') + overwrite = traits.Bool( + desc='overwrite output', + argstr='-overwrite') other = File( desc='other options', argstr='') @@ -307,10 +469,20 @@ class Calc(AFNICommand): >>> calc.inputs.expr='a*b' >>> calc.inputs.out_file = 'functional_calc.nii.gz' >>> calc.inputs.outputtype = 'NIFTI' - >>> calc.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> calc.cmdline # doctest: +ELLIPSIS '3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz' >>> res = calc.run() # doctest: +SKIP + >>> from nipype.interfaces import afni + >>> calc = afni.Calc() + >>> calc.inputs.in_file_a = 'functional.nii' + >>> calc.inputs.expr = '1' + >>> calc.inputs.out_file = 'rm.epi.all1' + >>> calc.inputs.overwrite = True + >>> calc.cmdline + '3dcalc -a functional.nii -expr "1" -prefix rm.epi.all1 -overwrite' + >>> res = calc.run() # doctest: +SKIP + """ _cmd = '3dcalc' @@ -335,6 +507,236 @@ def _parse_inputs(self, skip=None): skip=('start_idx', 'stop_idx', 'other')) +class CatInputSpec(AFNICommandInputSpec): + in_files = traits.List(File(exists=True), argstr="%s", + mandatory=True, position=-2) + out_file = File( + argstr='> %s', + default='catout.1d', + desc='output (concatenated) file name', + position=-1, + mandatory=True) + omitconst = traits.Bool( + desc='Omit columns that are identically constant from output.', + argstr='-nonconst') + keepfree = traits.Bool( + desc='Keep only columns that are marked as \'free\' in the ' + '3dAllineate header from \'-1Dparam_save\'. ' + 'If there is no such header, all columns are kept.', + argstr='-nonfixed') + out_format = traits.Enum( + 'int','nice','double','fint','cint', + argstr='-form %s', + desc='specify data type for output. Valid types are \'int\', ' + '\'nice\', \'double\', \'fint\', and \'cint\'.', + xor=['out_int','out_nice','out_double','out_fint','out_cint']) + stack = traits.Bool( + desc='Stack the columns of the resultant matrix in the output.', + argstr='-stack') + sel = traits.Str( + desc='Apply the same column/row selection string to all filenames ' + 'on the command line.', + argstr='-sel %s') + out_int = traits.Bool( + desc='specifiy int data type for output', + argstr='-i', + xor=['out_format','out_nice','out_double','out_fint','out_cint']) + out_nice = traits.Bool( + desc='specifiy nice data type for output', + argstr='-n', + xor=['out_format','out_int','out_double','out_fint','out_cint']) + out_double = traits.Bool( + desc='specifiy double data type for output', + argstr='-d', + xor=['out_format','out_nice','out_int','out_fint','out_cint']) + out_fint = traits.Bool( + desc='specifiy int, rounded down, data type for output', + argstr='-f', + xor=['out_format','out_nice','out_double','out_int','out_cint']) + out_cint = traits.Bool( + desc='specifiy int, rounded up, data type for output', + xor=['out_format','out_nice','out_double','out_fint','out_int']) + + +class Cat(AFNICommand): + """1dcat takes as input one or more 1D files, and writes out a 1D file + containing the side-by-side concatenation of all or a subset of the + columns from the input files. + + For complete details, see the `1dcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cat1d = afni.Cat() + >>> cat1d.inputs.sel = "'[0,2]'" + >>> cat1d.inputs.in_files = ['f1.1D', 'f2.1D'] + >>> cat1d.inputs.out_file = 'catout.1d' + >>> cat1d.cmdline + "1dcat -sel '[0,2]' f1.1D f2.1D > catout.1d" + >>> res = cat1d.run() # doctest: +SKIP + + """ + + _cmd = '1dcat' + input_spec = CatInputSpec + output_spec = AFNICommandOutputSpec + +class CatMatvecInputSpec(AFNICommandInputSpec): + in_file = traits.List( + traits.Tuple(traits.Str(), traits.Str()), + descr="list of tuples of mfiles and associated opkeys", + mandatory=True, + argstr="%s", + position=-2) + out_file = File( + descr="File to write concattenated matvecs to", + argstr=" > %s", + position=-1, + mandatory=True) + matrix = traits.Bool( + descr="indicates that the resulting matrix will" + "be written to outfile in the 'MATRIX(...)' format (FORM 3)." + "This feature could be used, with clever scripting, to input" + "a matrix directly on the command line to program 3dWarp.", + argstr="-MATRIX", + xor=['oneline', 'fourxfour']) + oneline = traits.Bool( + descr="indicates that the resulting matrix" + "will simply be written as 12 numbers on one line.", + argstr="-ONELINE", + xor=['matrix', 'fourxfour']) + fourxfour = traits.Bool( + descr="Output matrix in augmented form (last row is 0 0 0 1)" + "This option does not work with -MATRIX or -ONELINE", + argstr="-4x4", + xor=['matrix','oneline']) + +class CatMatvec(AFNICommand): + """Catenates 3D rotation+shift matrix+vector transformations. + + For complete details, see the `cat_matvec Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cmv = afni.CatMatvec() + >>> cmv.inputs.in_file = [('structural.BRIK::WARP_DATA','I')] + >>> cmv.inputs.out_file = 'warp.anat.Xat.1D' + >>> cmv.cmdline + 'cat_matvec structural.BRIK::WARP_DATA -I > warp.anat.Xat.1D' + >>> res = cmv.run() # doctest: +SKIP + + """ + + _cmd = 'cat_matvec' + input_spec = CatMatvecInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_file': + return spec.argstr%(' '.join([i[0]+' -'+i[1] for i in value])) + return super(CatMatvec, self)._format_arg(name, spec, value) + + +class CenterMassInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dCM', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=True) + cm_file = File( + name_source='in_file', + name_template='%s_cm.out', + hash_files=False, + keep_extension=False, + descr="File to write center of mass to", + argstr="> %s", + position=-1) + mask_file = File( + desc='Only voxels with nonzero values in the provided mask will be ' + 'averaged.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + desc='Generate the mask automatically', + argstr='-automask') + set_cm = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc='After computing the center of mass, set the origin fields in ' + 'the header so that the center of mass will be at (x,y,z) in ' + 'DICOM coords.', + argstr='-set %f %f %f') + local_ijk = traits.Bool( + desc='Output values as (i,j,k) in local orienation', + argstr='-local_ijk') + roi_vals = traits.List( + traits.Int, + desc='Compute center of mass for each blob with voxel value of v0, ' + 'v1, v2, etc. This option is handy for getting ROI centers of ' + 'mass.', + argstr='-roi_vals %s') + all_rois = traits.Bool( + desc='Don\'t bother listing the values of ROIs you want: The program ' + 'will find all of them and produce a full list', + argstr='-all_rois') + + +class CenterMassOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc='output file') + cm_file = File( + desc='file with the center of mass coordinates') + cm = traits.List( + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + desc='center of mass') + + +class CenterMass(AFNICommandBase): + """Computes center of mass using 3dCM command + + .. note:: + + By default, the output is (x,y,z) values in DICOM coordinates. But + as of Dec, 2016, there are now command line switches for other options. + + + For complete details, see the `3dCM Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cm = afni.CenterMass() + >>> cm.inputs.in_file = 'structural.nii' + >>> cm.inputs.cm_file = 'cm.txt' + >>> cm.inputs.roi_vals = [2, 10] + >>> cm.cmdline + '3dCM -roi_vals 2 10 structural.nii > cm.txt' + >>> res = 3dcm.run() # doctest: +SKIP + """ + + _cmd = '3dCM' + input_spec = CenterMassInputSpec + output_spec = CenterMassOutputSpec + + def _list_outputs(self): + outputs = super(CenterMass, self)._list_outputs() + outputs['out_file'] = os.path.abspath(self.inputs.in_file) + outputs['cm_file'] = os.path.abspath(self.inputs.cm_file) + sout = np.loadtxt(outputs['cm_file'], ndmin=2) # pylint: disable=E1101 + outputs['cm'] = [tuple(s) for s in sout] + return outputs + + class CopyInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dcopy', @@ -349,6 +751,9 @@ class CopyInputSpec(AFNICommandInputSpec): argstr='%s', position=-1, name_source='in_file') + verbose = traits.Bool( + desc='print progress reports', + argstr='-verb') class Copy(AFNICommand): @@ -364,26 +769,26 @@ class Copy(AFNICommand): >>> from nipype.interfaces import afni >>> copy3d = afni.Copy() >>> copy3d.inputs.in_file = 'functional.nii' - >>> copy3d.cmdline # doctest: +ALLOW_UNICODE + >>> copy3d.cmdline '3dcopy functional.nii functional_copy' >>> res = copy3d.run() # doctest: +SKIP >>> from copy import deepcopy >>> copy3d_2 = deepcopy(copy3d) >>> copy3d_2.inputs.outputtype = 'NIFTI' - >>> copy3d_2.cmdline # doctest: +ALLOW_UNICODE + >>> copy3d_2.cmdline '3dcopy functional.nii functional_copy.nii' >>> res = copy3d_2.run() # doctest: +SKIP >>> copy3d_3 = deepcopy(copy3d) >>> copy3d_3.inputs.outputtype = 'NIFTI_GZ' - >>> copy3d_3.cmdline # doctest: +ALLOW_UNICODE + >>> copy3d_3.cmdline '3dcopy functional.nii functional_copy.nii.gz' >>> res = copy3d_3.run() # doctest: +SKIP >>> copy3d_4 = deepcopy(copy3d) >>> copy3d_4.inputs.out_file = 'new_func.nii' - >>> copy3d_4.cmdline # doctest: +ALLOW_UNICODE + >>> copy3d_4.cmdline '3dcopy functional.nii new_func.nii' >>> res = copy3d_4.run() # doctest: +SKIP @@ -393,6 +798,169 @@ class Copy(AFNICommand): input_spec = CopyInputSpec output_spec = AFNICommandOutputSpec +class DotInputSpec(AFNICommandInputSpec): + in_files = traits.List( + (File()), + desc="list of input files, possibly with subbrick selectors", + argstr="%s ...", + position=-2) + out_file = File( + desc='collect output to a file', + argstr=' |& tee %s', + position=-1) + mask = File( + desc='Use this dataset as a mask', + argstr='-mask %s') + mrange = traits.Tuple((traits.Float(),traits.Float()), + desc='Means to further restrict the voxels from \'mset\' so that' + 'only those mask values within this range (inclusive) willbe used.', + argstr='-mrange %s %s') + demean = traits.Bool( + desc='Remove the mean from each volume prior to computing the correlation', + argstr='-demean') + docor = traits.Bool( + desc='Return the correlation coefficient (default).', + argstr='-docor') + dodot = traits.Bool( + desc='Return the dot product (unscaled).', + argstr='-dodot') + docoef = traits.Bool( + desc='Return the least square fit coefficients {{a,b}} so that dset2 is approximately a + b*dset1', + argstr='-docoef') + dosums = traits.Bool( + desc='Return the 6 numbers xbar= ybar= <(x-xbar)^2> <(y-ybar)^2> <(x-xbar)(y-ybar)> and the correlation coefficient.', + argstr='-dosums') + dodice = traits.Bool( + desc='Return the Dice coefficient (the Sorensen-Dice index).', + argstr='-dodice') + doeta2 = traits.Bool( + desc='Return eta-squared (Cohen, NeuroImage 2008).', + argstr='-doeta2') + full = traits.Bool( + desc='Compute the whole matrix. A waste of time, but handy for parsing.', + argstr='-full') + show_labels = traits.Bool( + desc='Print sub-brick labels to help identify what is being correlated. This option is useful when' + 'you have more than 2 sub-bricks at input.', + argstr='-show_labels') + upper = traits.Bool( + desc='Compute upper triangular matrix', + argstr='-upper') + +class Dot(AFNICommand): + """Correlation coefficient between sub-brick pairs. + All datasets in in_files list will be concatenated. + You can use sub-brick selectors in the file specification. + Note: This program is not efficient when more than two subbricks are input. + For complete details, see the `3ddot Documentation. + `_ + + >>> from nipype.interfaces import afni + >>> dot = afni.Dot() + >>> dot.inputs.in_files = ['functional.nii[0]', 'structural.nii'] + >>> dot.inputs.dodice = True + >>> dot.inputs.out_file = 'out.mask_ae_dice.txt' + >>> dot.cmdline + '3dDot -dodice functional.nii[0] structural.nii |& tee out.mask_ae_dice.txt' + >>> res = copy3d.run() # doctest: +SKIP + + """ + _cmd = '3dDot' + input_spec = DotInputSpec + output_spec = AFNICommandOutputSpec + +class Edge3InputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dedge3', + argstr='-input %s', + position=0, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='output image file name', + position=-1, + argstr='-prefix %s') + datum = traits.Enum( + 'byte', 'short', 'float', + argstr='-datum %s', + desc='specify data type for output. Valid types are \'byte\', ' + '\'short\' and \'float\'.') + fscale = traits.Bool( + desc='Force scaling of the output to the maximum integer range.', + argstr='-fscale', + xor=['gscale', 'nscale', 'scale_floats']) + gscale = traits.Bool( + desc='Same as \'-fscale\', but also forces each output sub-brick to ' + 'to get the same scaling factor.', + argstr='-gscale', + xor=['fscale', 'nscale', 'scale_floats']) + nscale = traits.Bool( + desc='Don\'t do any scaling on output to byte or short datasets.', + argstr='-nscale', + xor=['fscale', 'gscale', 'scale_floats']) + scale_floats = traits.Float( + desc='Multiply input by VAL, but only if the input datum is ' + 'float. This is needed when the input dataset ' + 'has a small range, like 0 to 2.0 for instance. ' + 'With such a range, very few edges are detected due to ' + 'what I suspect to be truncation problems. ' + 'Multiplying such a dataset by 10000 fixes the problem ' + 'and the scaling is undone at the output.', + argstr='-scale_floats %f', + xor=['fscale', 'gscale', 'nscale']) + verbose = traits.Bool( + desc='Print out some information along the way.', + argstr='-verbose') + + +class Edge3(AFNICommand): + """Does 3D Edge detection using the library 3DEdge + by Gregoire Malandain (gregoire.malandain@sophia.inria.fr). + + For complete details, see the `3dedge3 Documentation. + `_ + + references_ = [{'entry': BibTeX('@article{Deriche1987,' + 'author={R. Deriche},' + 'title={Optimal edge detection using recursive filtering},' + 'journal={International Journal of Computer Vision},' + 'volume={2},', + 'pages={167-187},' + 'year={1987},' + '}'), + 'tags': ['method'], + }, + {'entry': BibTeX('@article{MongaDericheMalandainCocquerez1991,' + 'author={O. Monga, R. Deriche, G. Malandain, J.P. Cocquerez},' + 'title={Recursive filtering and edge tracking: two primary tools for 3D edge detection},' + 'journal={Image and vision computing},' + 'volume={9},', + 'pages={203-214},' + 'year={1991},' + '}'), + 'tags': ['method'], + }, + ] + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> edge3 = afni.Edge3() + >>> edge3.inputs.in_file = 'functional.nii' + >>> edge3.inputs.out_file = 'edges.nii' + >>> edge3.inputs.datum = 'byte' + >>> edge3.cmdline + '3dedge3 -input functional.nii -datum byte -prefix edges.nii' + >>> res = edge3.run() # doctest: +SKIP + + """ + + _cmd = '3dedge3' + input_spec = Edge3InputSpec + output_spec = AFNICommandOutputSpec + class EvalInputSpec(AFNICommandInputSpec): in_file_a = File( @@ -454,7 +1022,7 @@ class Eval(AFNICommand): >>> eval.inputs.expr = 'a*b' >>> eval.inputs.out1D = True >>> eval.inputs.out_file = 'data_calc.1D' - >>> eval.cmdline # doctest: +ALLOW_UNICODE + >>> eval.cmdline '1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D' >>> res = eval.run() # doctest: +SKIP @@ -605,7 +1173,7 @@ class FWHMx(AFNICommandBase): >>> from nipype.interfaces import afni >>> fwhm = afni.FWHMx() >>> fwhm.inputs.in_file = 'functional.nii' - >>> fwhm.cmdline # doctest: +ALLOW_UNICODE + >>> fwhm.cmdline '3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out' >>> res = fwhm.run() # doctest: +SKIP @@ -812,6 +1380,9 @@ class MaskToolInputSpec(AFNICommandInputSpec): 'or using the labels in {R,L,A,P,I,S}.', argstr='-fill_dirs %s', requires=['fill_holes']) + verbose = traits.Int( + desc='specify verbosity level, for 0 to 3', + argstr='-verb %s') class MaskToolOutputSpec(TraitedSpec): @@ -832,7 +1403,7 @@ class MaskTool(AFNICommand): >>> masktool = afni.MaskTool() >>> masktool.inputs.in_file = 'functional.nii' >>> masktool.inputs.outputtype = 'NIFTI' - >>> masktool.cmdline # doctest: +ALLOW_UNICODE + >>> masktool.cmdline '3dmask_tool -prefix functional_mask.nii -input functional.nii' >>> res = automask.run() # doctest: +SKIP @@ -881,7 +1452,7 @@ class Merge(AFNICommand): >>> merge.inputs.blurfwhm = 4 >>> merge.inputs.doall = True >>> merge.inputs.out_file = 'e7.nii' - >>> merge.cmdline # doctest: +ALLOW_UNICODE + >>> merge.cmdline '3dmerge -1blur_fwhm 4 -doall -prefix e7.nii functional.nii functional2.nii' >>> res = merge.run() # doctest: +SKIP @@ -936,7 +1507,7 @@ class Notes(CommandLine): >>> notes.inputs.in_file = 'functional.HEAD' >>> notes.inputs.add = 'This note is added.' >>> notes.inputs.add_history = 'This note is added to history.' - >>> notes.cmdline # doctest: +ALLOW_UNICODE + >>> notes.cmdline '3dNotes -a "This note is added." -h "This note is added to history." functional.HEAD' >>> res = notes.run() # doctest: +SKIP """ @@ -951,6 +1522,277 @@ def _list_outputs(self): return outputs +class NwarpApplyInputSpec(CommandLineInputSpec): + in_file = traits.Either(File(exists=True), traits.List(File(exists=True)), + mandatory=True, + argstr='-source %s', + desc='the name of the dataset to be warped ' + 'can be multiple datasets') + warp = traits.String( + desc='the name of the warp dataset. ' + 'multiple warps can be concatenated (make sure they exist)', + argstr='-nwarp %s', + mandatory=True) + inv_warp = traits.Bool( + desc='After the warp specified in \'-nwarp\' is computed, invert it', + argstr='-iwarp') + master = traits.File(exists=True, + desc='the name of the master dataset, which defines the output grid', + argstr='-master %s') + interp = traits.Enum('NN','nearestneighbour','nearestneighbor','linear', + 'trilinear','cubic','tricubic','quintic','triquintic','wsinc5', + desc='defines interpolation method to use during warp', + argstr='-interp %s', + default='wsinc5') + ainterp = traits.Enum('NN','nearestneighbour','nearestneighbor','linear', + 'trilinear','cubic','tricubic','quintic','triquintic','wsinc5', + desc='specify a different interpolation method than might ' + 'be used for the warp', + argstr='-ainterp %s', + default='wsinc5') + out_file = File( + name_template='%s_Nwarp', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + short = traits.Bool( + desc='Write output dataset using 16-bit short integers, rather than ' + 'the usual 32-bit floats.', + argstr='-short') + quiet = traits.Bool( + desc='don\'t be verbose :(', + argstr='-quiet', + xor=['verb']) + verb = traits.Bool( + desc='be extra verbose :)', + argstr='-verb', + xor=['quiet']) + + +class NwarpApply(AFNICommandBase): + """Program to apply a nonlinear 3D warp saved from 3dQwarp + (or 3dNwarpCat, etc.) to a 3D dataset, to produce a warped + version of the source dataset. + + For complete details, see the `3dNwarpApply Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> nwarp = afni.NwarpApply() + >>> nwarp.inputs.in_file = 'Fred+orig' + >>> nwarp.inputs.master = 'NWARP' + >>> nwarp.inputs.warp = "'Fred_WARP+tlrc Fred.Xaff12.1D'" + >>> nwarp.cmdline + "3dNwarpApply -source Fred+orig -master NWARP -prefix Fred+orig_Nwarp -nwarp \'Fred_WARP+tlrc Fred.Xaff12.1D\'" + >>> res = nwarp.run() # doctest: +SKIP + + """ + _cmd = '3dNwarpApply' + input_spec = NwarpApplyInputSpec + output_spec = AFNICommandOutputSpec + + +class NwarpCatInputSpec(AFNICommandInputSpec): + in_files = traits.List( + traits.Either( + traits.File(), + traits.Tuple(traits.Enum('IDENT', 'INV', 'SQRT', 'SQRTINV'), + traits.File())), + descr="list of tuples of 3D warps and associated functions", + mandatory=True, + argstr="%s", + position=-1) + space = traits.String( + desc='string to attach to the output dataset as its atlas space ' + 'marker.', + argstr='-space %s') + inv_warp = traits.Bool( + desc='invert the final warp before output', + argstr='-iwarp') + interp = traits.Enum( + 'linear', 'quintic', 'wsinc5', + desc='specify a different interpolation method than might ' + 'be used for the warp', + argstr='-interp %s', + default='wsinc5') + expad = traits.Int( + desc='Pad the nonlinear warps by the given number of voxels voxels in ' + 'all directions. The warp displacements are extended by linear ' + 'extrapolation from the faces of the input grid..', + argstr='-expad %d') + out_file = File( + name_template='%s_NwarpCat', + desc='output image file name', + argstr='-prefix %s', + name_source='in_files') + verb = traits.Bool( + desc='be verbose', + argstr='-verb') + + +class NwarpCat(AFNICommand): + """Catenates (composes) 3D warps defined on a grid, OR via a matrix. + + .. note:: + + * All transformations are from DICOM xyz (in mm) to DICOM xyz. + + * Matrix warps are in files that end in '.1D' or in '.txt'. A matrix + warp file should have 12 numbers in it, as output (for example), by + '3dAllineate -1Dmatrix_save'. + + * Nonlinear warps are in dataset files (AFNI .HEAD/.BRIK or NIfTI .nii) + with 3 sub-bricks giving the DICOM order xyz grid displacements in mm. + + * If all the input warps are matrices, then the output is a matrix + and will be written to the file 'prefix.aff12.1D'. + Unless the prefix already contains the string '.1D', in which case + the filename is just the prefix. + + * If 'prefix' is just 'stdout', then the output matrix is written + to standard output. + In any of these cases, the output format is 12 numbers in one row. + + * If any of the input warps are datasets, they must all be defined on + the same 3D grid! + And of course, then the output will be a dataset on the same grid. + However, you can expand the grid using the '-expad' option. + + * The order of operations in the final (output) warp is, for the + case of 3 input warps: + + OUTPUT(x) = warp3( warp2( warp1(x) ) ) + + That is, warp1 is applied first, then warp2, et cetera. + The 3D x coordinates are taken from each grid location in the + first dataset defined on a grid. + + For complete details, see the `3dNwarpCat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> nwarpcat = afni.NwarpCat() + >>> nwarpcat.inputs.in_files = ['Q25_warp+tlrc.HEAD', ('IDENT', 'structural.nii')] + >>> nwarpcat.inputs.out_file = 'Fred_total_WARP' + >>> nwarpcat.cmdline + "3dNwarpCat -prefix Fred_total_WARP Q25_warp+tlrc.HEAD 'IDENT(structural.nii)'" + >>> res = nwarpcat.run() # doctest: +SKIP + + """ + _cmd = '3dNwarpCat' + input_spec = NwarpCatInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_files': + return spec.argstr % (' '.join(["'" + v[0] + "(" + v[1] + ")'" + if isinstance(v, tuple) else v + for v in value])) + return super(NwarpCat, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.in_files[0][0], + suffix='_NwarpCat') + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + else: + outputs['out_file'] = os.path.abspath(self._gen_fname( + self.inputs.in_files[0], suffix='_NwarpCat+tlrc', ext='.HEAD')) + return outputs + + +class OneDToolPyInputSpec(AFNIPythonCommandInputSpec): + in_file = File( + desc='input file to OneDTool', + argstr='-infile %s', + mandatory=True, + exists=True) + set_nruns = traits.Int( + desc='treat the input data as if it has nruns', + argstr='-set_nruns %d') + derivative = traits.Bool( + desc='take the temporal derivative of each vector (done as first backward difference)', + argstr='-derivative') + demean = traits.Bool( + desc='demean each run (new mean of each run = 0.0)', + argstr='-demean') + out_file = File( + desc='write the current 1D data to FILE', + argstr='-write %s', + xor=['show_cormat_warnings']) + show_censor_count = traits.Bool( + desc='display the total number of censored TRs Note : if input is a valid xmat.1D dataset,' + 'then the count will come from the header. Otherwise the input is assumed to be a binary censor' + 'file, and zeros are simply counted.', + argstr="-show_censor_count") + censor_motion = traits.Tuple( + (traits.Float(),File()), + desc='Tuple of motion limit and outfile prefix. need to also set set_nruns -r set_run_lengths', + argstr="-censor_motion %f %s") + censor_prev_TR = traits.Bool( + desc='for each censored TR, also censor previous', + argstr='-censor_prev_TR') + show_trs_uncensored = traits.Enum('comma','space','encoded','verbose', + desc='display a list of TRs which were not censored in the specified style', + argstr='-show_trs_uncensored %s') + show_cormat_warnings = traits.File( + desc='Write cormat warnings to a file', + argstr="-show_cormat_warnings |& tee %s", + default="out.cormat_warn.txt", + usedefault=False, + position=-1, + xor=['out_file']) + show_indices_interest = traits.Bool( + desc="display column indices for regs of interest", + argstr="-show_indices_interest") + show_trs_run = traits.Int( + desc="restrict -show_trs_[un]censored to the given 1-based run", + argstr="-show_trs_run %d") + +class OneDToolPyOutputSpec(AFNICommandOutputSpec): + out_file = File(desc='output of 1D_tool.py') + +class OneDToolPy(AFNIPythonCommand): + """This program is meant to read/manipulate/write/diagnose 1D datasets. + Input can be specified using AFNI sub-brick[]/time{} selectors. + + >>> from nipype.interfaces import afni + >>> odt = afni.OneDToolPy() + >>> odt.inputs.in_file = 'f1.1D' + >>> odt.inputs.set_nruns = 3 + >>> odt.inputs.demean = True + >>> odt.inputs.out_file = 'motion_dmean.1D' + >>> odt.cmdline # doctest: +ELLIPSIS + 'python2 ...1d_tool.py -demean -infile f1.1D -write motion_dmean.1D -set_nruns 3' + >>> res = odt.run() # doctest: +SKIP +""" + + _cmd = '1d_tool.py' + + input_spec = OneDToolPyInputSpec + output_spec = OneDToolPyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + if isdefined(self.inputs.out_file): + outputs['out_file']=os.path.join(os.getcwd(), self.inputs.out_file) + if isdefined(self.inputs.show_cormat_warnings): + outputs['out_file']=os.path.join(os.getcwd(), self.inputs.show_cormat_warnings) + if isdefined(self.inputs.censor_motion): + outputs['out_file']=os.path.join(os.getcwd(), self.inputs.censor_motion[1]) + return outputs + class RefitInputSpec(CommandLineInputSpec): in_file = File( desc='input file to 3drefit', @@ -971,6 +1813,11 @@ class RefitInputSpec(CommandLineInputSpec): zorigin = Str( desc='z distance for edge voxel offset', argstr='-zorigin %s') + duporigin_file = File( + argstr='-duporigin %s', + exists=True, + desc='Copies the xorigin, yorigin, and zorigin values from the header ' + 'of the given dataset') xdel = traits.Float( desc='new x voxel dimension in mm', argstr='-xdel %f') @@ -980,11 +1827,54 @@ class RefitInputSpec(CommandLineInputSpec): zdel = traits.Float( desc='new z voxel dimension in mm', argstr='-zdel %f') + xyzscale = traits.Float( + desc='Scale the size of the dataset voxels by the given factor', + argstr='-xyzscale %f') space = traits.Enum( 'TLRC', 'MNI', 'ORIG', argstr='-space %s', desc='Associates the dataset with a specific template type, e.g. ' 'TLRC, MNI, ORIG') + atrcopy = traits.Tuple( + traits.File(exists=True), traits.Str(), + argstr='-atrcopy %s %s', + desc='Copy AFNI header attribute from the given file into the header ' + 'of the dataset(s) being modified. For more information on AFNI ' + 'header attributes, see documentation file README.attributes. ' + 'More than one \'-atrcopy\' option can be used. For AFNI ' + 'advanced users only. Do NOT use -atrcopy or -atrstring with ' + 'other modification options. See also -copyaux.') + atrstring = traits.Tuple( + traits.Str(), traits.Str(), + argstr='-atrstring %s %s', + desc='Copy the last given string into the dataset(s) being modified, ' + 'giving it the attribute name given by the last string.' + 'To be safe, the last string should be in quotes.') + atrfloat = traits.Tuple( + traits.Str(), traits.Str(), + argstr='-atrfloat %s %s', + desc='Create or modify floating point attributes. ' + 'The input values may be specified as a single string in quotes ' + 'or as a 1D filename or string, example ' + '\'1 0.2 0 0 -0.2 1 0 0 0 0 1 0\' or ' + 'flipZ.1D or \'1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0\'') + atrint = traits.Tuple( + traits.Str(), traits.Str(), + argstr='-atrint %s %s', + desc='Create or modify integer attributes. ' + 'The input values may be specified as a single string in quotes ' + 'or as a 1D filename or string, example ' + '\'1 0 0 0 0 1 0 0 0 0 1 0\' or ' + 'flipZ.1D or \'1D:1,0,2@0,-0,1,2@0,2@0,1,0\'') + saveatr = traits.Bool( + argstr='-saveatr', + desc='(default) Copy the attributes that are known to AFNI into ' + 'the dset->dblk structure thereby forcing changes to known ' + 'attributes to be present in the output. This option only makes ' + 'sense with -atrcopy.') + nosaveatr = traits.Bool( + argstr='-nosaveatr', + desc='Opposite of -saveatr') class Refit(AFNICommandBase): @@ -1000,10 +1890,16 @@ class Refit(AFNICommandBase): >>> refit = afni.Refit() >>> refit.inputs.in_file = 'structural.nii' >>> refit.inputs.deoblique = True - >>> refit.cmdline # doctest: +ALLOW_UNICODE + >>> refit.cmdline '3drefit -deoblique structural.nii' >>> res = refit.run() # doctest: +SKIP + >>> refit_2 = afni.Refit() + >>> refit_2.inputs.in_file = 'structural.nii' + >>> refit_2.inputs.atrfloat = ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") + >>> refit_2.cmdline + "3drefit -atrfloat IJK_TO_DICOM_REAL '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' structural.nii" + >>> res = refit_2.run() # doctest: +SKIP """ _cmd = '3drefit' input_spec = RefitInputSpec @@ -1061,7 +1957,7 @@ class Resample(AFNICommand): >>> resample.inputs.in_file = 'functional.nii' >>> resample.inputs.orientation= 'RPI' >>> resample.inputs.outputtype = 'NIFTI' - >>> resample.cmdline # doctest: +ALLOW_UNICODE + >>> resample.cmdline '3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii' >>> res = resample.run() # doctest: +SKIP @@ -1095,6 +1991,9 @@ class TCatInputSpec(AFNICommandInputSpec): 'dataset mean back in. Option -rlt++ adds overall mean of all ' 'dataset timeseries back in.', position=1) + verbose = traits.Bool( + desc='Print out some verbose output as the program', + argstr='-verb') class TCat(AFNICommand): @@ -1114,7 +2013,7 @@ class TCat(AFNICommand): >>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii'] >>> tcat.inputs.out_file= 'functional_tcat.nii' >>> tcat.inputs.rlt = '+' - >>> tcat.cmdline # doctest: +ALLOW_UNICODE +NORMALIZE_WHITESPACE + >>> tcat.cmdline '3dTcat -rlt+ -prefix functional_tcat.nii functional.nii functional2.nii' >>> res = tcat.run() # doctest: +SKIP @@ -1124,6 +2023,60 @@ class TCat(AFNICommand): input_spec = TCatInputSpec output_spec = AFNICommandOutputSpec +class TCatSBInputSpec(AFNICommandInputSpec): + in_files = traits.List( + traits.Tuple(File(exists=True),Str()), + desc='List of tuples of file names and subbrick selectors as strings.' + 'Don\'t forget to protect the single quotes in the subbrick selector' + 'so the contents are protected from the command line interpreter.', + argstr='%s%s ...', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + desc='output image file name', + argstr='-prefix %s', + genfile=True) + rlt = traits.Enum( + '', '+', '++', + argstr='-rlt%s', + desc='Remove linear trends in each voxel time series loaded from each ' + 'input dataset, SEPARATELY. Option -rlt removes the least squares ' + 'fit of \'a+b*t\' to each voxel time series. Option -rlt+ adds ' + 'dataset mean back in. Option -rlt++ adds overall mean of all ' + 'dataset timeseries back in.', + position=1) + + +class TCatSubBrick(AFNICommand): + """Hopefully a temporary function to allow sub-brick selection until + afni file managment is improved. + + For complete details, see the `3dTcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tcsb = afni.TCatSubBrick() + >>> tcsb.inputs.in_files = [('functional.nii', "'{2..$}'"), ('functional2.nii', "'{2..$}'")] + >>> tcsb.inputs.out_file= 'functional_tcat.nii' + >>> tcsb.inputs.rlt = '+' + >>> tcsb.cmdline + "3dTcat -rlt+ -prefix functional_tcat.nii functional.nii'{2..$}' functional2.nii'{2..$}' " + >>> res = tcsb.run() # doctest: +SKIP + + """ + + _cmd = '3dTcat' + input_spec = TCatSBInputSpec + output_spec = AFNICommandOutputSpec + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.in_files[0][0], suffix='_tcat') + class TStatInputSpec(AFNICommandInputSpec): in_file = File( @@ -1161,7 +2114,7 @@ class TStat(AFNICommand): >>> tstat.inputs.in_file = 'functional.nii' >>> tstat.inputs.args = '-mean' >>> tstat.inputs.out_file = 'stats' - >>> tstat.cmdline # doctest: +ALLOW_UNICODE + >>> tstat.cmdline '3dTstat -mean -prefix stats functional.nii' >>> res = tstat.run() # doctest: +SKIP @@ -1223,7 +2176,7 @@ class To3D(AFNICommand): >>> to3d.inputs.in_folder = '.' >>> to3d.inputs.out_file = 'dicomdir.nii' >>> to3d.inputs.filetype = 'anat' - >>> to3d.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> to3d.cmdline # doctest: +ELLIPSIS 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' >>> res = to3d.run() # doctest: +SKIP @@ -1234,6 +2187,110 @@ class To3D(AFNICommand): output_spec = AFNICommandOutputSpec +class UndumpInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dUndump, whose geometry will determine' + 'the geometry of the output', + argstr='-master %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + mask_file = File( + desc='mask image file name. Only voxels that are nonzero in the mask ' + 'can be set.', + argstr='-mask %s') + datatype = traits.Enum( + 'short', 'float', 'byte', + desc='set output file datatype', + argstr='-datum %s') + default_value = traits.Float( + desc='default value stored in each input voxel that does not have ' + 'a value supplied in the input file', + argstr='-dval %f') + fill_value = traits.Float( + desc='value, used for each voxel in the output dataset that is NOT ' + 'listed in the input file', + argstr='-fval %f') + coordinates_specification = traits.Enum( + 'ijk', 'xyz', + desc='Coordinates in the input file as index triples (i, j, k) ' + 'or spatial coordinates (x, y, z) in mm', + argstr='-%s') + srad = traits.Float( + desc='radius in mm of the sphere that will be filled about each input ' + '(x,y,z) or (i,j,k) voxel. If the radius is not given, or is 0, ' + 'then each input data line sets the value in only one voxel.', + argstr='-srad %f') + orient = traits.Tuple( + traits.Enum('R', 'L'), traits.Enum('A', 'P'), traits.Enum('I', 'S'), + desc='Specifies the coordinate order used by -xyz. ' + 'The code must be 3 letters, one each from the pairs ' + '{R,L} {A,P} {I,S}. The first letter gives the ' + 'orientation of the x-axis, the second the orientation ' + 'of the y-axis, the third the z-axis: ' + 'R = right-to-left L = left-to-right ' + 'A = anterior-to-posterior P = posterior-to-anterior ' + 'I = inferior-to-superior S = superior-to-inferior ' + 'If -orient isn\'t used, then the coordinate order of the ' + '-master (in_file) dataset is used to interpret (x,y,z) inputs.', + argstr='-orient %s') + head_only = traits.Bool( + desc='create only the .HEAD file which gets exploited by ' + 'the AFNI matlab library function New_HEAD.m', + argstr='-head_only') + + +class UndumpOutputSpec(TraitedSpec): + out_file = File(desc='assembled file', exists=True) + + +class Undump(AFNICommand): + """3dUndump - Assembles a 3D dataset from an ASCII list of coordinates and + (optionally) values. + + The input file(s) are ASCII files, with one voxel specification per + line. A voxel specification is 3 numbers (-ijk or -xyz coordinates), + with an optional 4th number giving the voxel value. For example: + + 1 2 3 + 3 2 1 5 + 5.3 6.2 3.7 + // this line illustrates a comment + + The first line puts a voxel (with value given by '-dval') at point + (1,2,3). The second line puts a voxel (with value 5) at point (3,2,1). + The third line puts a voxel (with value given by '-dval') at point + (5.3,6.2,3.7). If -ijk is in effect, and fractional coordinates + are given, they will be rounded to the nearest integers; for example, + the third line would be equivalent to (i,j,k) = (5,6,4). + + + For complete details, see the `3dUndump Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> unndump = afni.Undump() + >>> unndump.inputs.in_file = 'structural.nii' + >>> unndump.inputs.out_file = 'structural_undumped.nii' + >>> unndump.cmdline + '3dUndump -prefix structural_undumped.nii -master structural.nii' + >>> res = unndump.run() # doctest: +SKIP + + """ + + _cmd = '3dUndump' + input_spec = UndumpInputSpec + output_spec = UndumpOutputSpec + + class UnifizeInputSpec(AFNICommandInputSpec): in_file = File( desc='input file to 3dUnifize', @@ -1243,6 +2300,7 @@ class UnifizeInputSpec(AFNICommandInputSpec): exists=True, copyfile=False) out_file = File( + name_template='%s_unifized', desc='output image file name', argstr='-prefix %s', name_source='in_file') @@ -1281,6 +2339,30 @@ class UnifizeInputSpec(AFNICommandInputSpec): argstr='-EPI', requires=['no_duplo', 't2'], xor=['gm']) + rbt = traits.Tuple( + traits.Float(), traits.Float(), traits.Float(), + desc='Option for AFNI experts only.' + 'Specify the 3 parameters for the algorithm:\n' + 'R = radius; same as given by option \'-Urad\', [default=18.3]\n' + 'b = bottom percentile of normalizing data range, [default=70.0]\n' + 'r = top percentile of normalizing data range, [default=80.0]\n', + argstr='-rbt %f %f %f') + t2_up = traits.Float( + desc='Option for AFNI experts only.' + 'Set the upper percentile point used for T2-T1 inversion. ' + 'Allowed to be anything between 90 and 100 (inclusive), with ' + 'default to 98.5 (for no good reason).', + argstr='-T2up %f') + cl_frac = traits.Float( + desc='Option for AFNI experts only.' + 'Set the automask \'clip level fraction\'. Must be between ' + '0.1 and 0.9. A small fraction means to make the initial ' + 'threshold for clipping (a la 3dClipLevel) smaller, which ' + 'will tend to make the mask larger. [default=0.1]', + argstr='-clfrac %f') + quiet = traits.Bool( + desc='Don\'t print the progress messages.', + argstr='-quiet') class UnifizeOutputSpec(TraitedSpec): @@ -1326,7 +2408,7 @@ class Unifize(AFNICommand): >>> unifize = afni.Unifize() >>> unifize.inputs.in_file = 'structural.nii' >>> unifize.inputs.out_file = 'structural_unifized.nii' - >>> unifize.cmdline # doctest: +ALLOW_UNICODE + >>> unifize.cmdline '3dUnifize -prefix structural_unifized.nii -input structural.nii' >>> res = unifize.run() # doctest: +SKIP @@ -1369,7 +2451,7 @@ class ZCutUp(AFNICommand): >>> zcutup.inputs.in_file = 'functional.nii' >>> zcutup.inputs.out_file = 'functional_zcutup.nii' >>> zcutup.inputs.keep= '0 10' - >>> zcutup.cmdline # doctest: +ALLOW_UNICODE + >>> zcutup.cmdline '3dZcutup -keep 0 10 -prefix functional_zcutup.nii functional.nii' >>> res = zcutup.run() # doctest: +SKIP @@ -1421,7 +2503,7 @@ class GCOR(CommandLine): >>> gcor = afni.GCOR() >>> gcor.inputs.in_file = 'structural.nii' >>> gcor.inputs.nfirst = 4 - >>> gcor.cmdline # doctest: +ALLOW_UNICODE + >>> gcor.cmdline '@compute_gcor -nfirst 4 -input structural.nii' >>> res = gcor.run() # doctest: +SKIP @@ -1441,3 +2523,227 @@ def _run_interface(self, runtime): def _list_outputs(self): return {'out': getattr(self, '_gcor')} + +class AxializeInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3daxialize', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_axialize', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + verb = traits.Bool( + desc='Print out a progerss report', + argstr='-verb') + sagittal = traits.Bool( + desc='Do sagittal slice order [-orient ASL]', + argstr='-sagittal', + xor=['coronal', 'axial']) + coronal = traits.Bool( + desc='Do coronal slice order [-orient RSA]', + argstr='-coronal', + xor=['sagittal', 'axial']) + axial = traits.Bool( + desc='Do axial slice order [-orient RAI]' + 'This is the default AFNI axial order, and' + 'is the one currently required by the' + 'volume rendering plugin; this is also' + 'the default orientation output by this' + "program (hence the program's name).", + argstr='-axial', + xor=['coronal', 'sagittal']) + orientation = Str( + desc='new orientation code', + argstr='-orient %s') + + +class Axialize(AFNICommand): + """Read in a dataset and write it out as a new dataset + with the data brick oriented as axial slices. + + For complete details, see the `3dcopy Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> axial3d = afni.Axialize() + >>> axial3d.inputs.in_file = 'functional.nii' + >>> axial3d.inputs.out_file = 'axialized.nii' + >>> axial3d.cmdline + '3daxialize -prefix axialized.nii functional.nii' + >>> res = axial3d.run() # doctest: +SKIP + + """ + + _cmd = '3daxialize' + input_spec = AxializeInputSpec + output_spec = AFNICommandOutputSpec + + +class ZcatInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File( + desc='input files to 3dZcat', + exists=True), + argstr='%s', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + name_template='zcat', + desc='output dataset prefix name (default \'zcat\')', + argstr='-prefix %s') + datum = traits.Enum( + 'byte','short','float', + argstr='-datum %s', + desc='specify data type for output. Valid types are \'byte\', ' + '\'short\' and \'float\'.') + verb = traits.Bool( + desc='print out some verbositiness as the program proceeds.', + argstr='-verb') + fscale = traits.Bool( + desc='Force scaling of the output to the maximum integer ' + 'range. This only has effect if the output datum is ' + 'byte or short (either forced or defaulted). This ' + 'option is sometimes necessary to eliminate ' + 'unpleasant truncation artifacts.', + argstr='-fscale', + xor=['nscale']) + nscale = traits.Bool( + desc='Don\'t do any scaling on output to byte or short ' + 'datasets. This may be especially useful when ' + 'operating on mask datasets whose output values ' + 'are only 0\'s and 1\'s.', + argstr='-nscale', + xor=['fscale']) + +class Zcat(AFNICommand): + """Copies an image of one type to an image of the same + or different type using 3dZcat command + + For complete details, see the `3dZcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> zcat = afni.Zcat() + >>> zcat.inputs.in_files = ['functional2.nii', 'functional3.nii'] + >>> zcat.inputs.out_file = 'cat_functional.nii' + >>> zcat.cmdline + '3dZcat -prefix cat_functional.nii functional2.nii functional3.nii' + >>> res = zcat.run() # doctest: +SKIP + """ + + _cmd = '3dZcat' + input_spec = ZcatInputSpec + output_spec = AFNICommandOutputSpec + +class ZeropadInputSpec(AFNICommandInputSpec): + in_files = File( + desc='input dataset', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='zeropad', + desc='output dataset prefix name (default \'zeropad\')', + argstr='-prefix %s') + I = traits.Int( + desc='adds \'n\' planes of zero at the Inferior edge', + argstr='-I %i', + xor=['master']) + S = traits.Int( + desc='adds \'n\' planes of zero at the Superior edge', + argstr='-S %i', + xor=['master']) + A = traits.Int( + desc='adds \'n\' planes of zero at the Anterior edge', + argstr='-A %i', + xor=['master']) + P = traits.Int( + desc='adds \'n\' planes of zero at the Posterior edge', + argstr='-P %i', + xor=['master']) + L = traits.Int( + desc='adds \'n\' planes of zero at the Left edge', + argstr='-L %i', + xor=['master']) + R = traits.Int( + desc='adds \'n\' planes of zero at the Right edge', + argstr='-R %i', + xor=['master']) + z = traits.Int( + desc='adds \'n\' planes of zero on EACH of the ' + 'dataset z-axis (slice-direction) faces', + argstr='-z %i', + xor=['master']) + RL = traits.Int(desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the right-left direction', + argstr='-RL %i', + xor=['master']) + AP = traits.Int(desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the anterior-posterior direction', + argstr='-AP %i', + xor=['master']) + IS = traits.Int(desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the inferior-superior direction', + argstr='-IS %i', + xor=['master']) + mm = traits.Bool(desc='pad counts \'n\' are in mm instead of slices, ' + 'where each \'n\' is an integer and at least \'n\' ' + 'mm of slices will be added/removed; e.g., n = 3 ' + 'and slice thickness = 2.5 mm ==> 2 slices added', + argstr='-mm', + xor=['master']) + master = traits.File(desc='match the volume described in dataset ' + '\'mset\', where mset must have the same ' + 'orientation and grid spacing as dataset to be ' + 'padded. the goal of -master is to make the ' + 'output dataset from 3dZeropad match the ' + 'spatial \'extents\' of mset by adding or ' + 'subtracting slices as needed. You can\'t use ' + '-I,-S,..., or -mm with -master', + argstr='-master %s', + xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', + 'RL', 'AP', 'IS', 'mm']) + +class Zeropad(AFNICommand): + """Adds planes of zeros to a dataset (i.e., pads it out). + + For complete details, see the `3dZeropad Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> zeropad = afni.Zeropad() + >>> zeropad.inputs.in_files = 'functional.nii' + >>> zeropad.inputs.out_file = 'pad_functional.nii' + >>> zeropad.inputs.I = 10 + >>> zeropad.inputs.S = 10 + >>> zeropad.inputs.A = 10 + >>> zeropad.inputs.P = 10 + >>> zeropad.inputs.R = 10 + >>> zeropad.inputs.L = 10 + >>> zeropad.cmdline + '3dZeropad -A 10 -I 10 -L 10 -P 10 -R 10 -S 10 -prefix pad_functional.nii functional.nii' + >>> res = zeropad.run() # doctest: +SKIP + """ + + _cmd = '3dZeropad' + input_spec = ZeropadInputSpec + output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/ants/__init__.py b/nipype/interfaces/ants/__init__.py index 11c7ae724a..e8096cc8e0 100644 --- a/nipype/interfaces/ants/__init__.py +++ b/nipype/interfaces/ants/__init__.py @@ -5,7 +5,7 @@ """Top-level namespace for ants.""" # Registraiton programs -from .registration import ANTS, Registration +from .registration import ANTS, Registration, MeasureImageSimilarity # Resampling Programs from .resampling import (ApplyTransforms, ApplyTransformsToPoints, WarpImageMultiTransform, @@ -20,4 +20,5 @@ # Utility Programs from .utils import (AverageAffineTransform, AverageImages, MultiplyImages, - CreateJacobianDeterminantImage, AffineInitializer) + CreateJacobianDeterminantImage, AffineInitializer, + ComposeMultiTransform) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py index 208cae8c25..00553fd977 100644 --- a/nipype/interfaces/ants/base.py +++ b/nipype/interfaces/ants/base.py @@ -4,10 +4,14 @@ """The ants module provides basic functions for interfacing with ANTS tools.""" from __future__ import print_function, division, unicode_literals, absolute_import from builtins import str + +import os + # Local imports -from ... import logging -from ..base import CommandLine, CommandLineInputSpec, traits, isdefined -logger = logging.getLogger('interface') +from ... import logging, LooseVersion +from ..base import (CommandLine, CommandLineInputSpec, traits, isdefined, + PackageInfo) +iflogger = logging.getLogger('interface') # -Using -1 gives primary responsibilty to ITKv4 to do the correct # thread limitings. @@ -25,6 +29,29 @@ ALT_ITKv4_THREAD_LIMIT_VARIABLE = 'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS' +class Info(PackageInfo): + version_cmd = os.path.join(os.getenv('ANTSPATH', ''), + 'antsRegistration') + ' --version' + + @staticmethod + def parse_version(raw_info): + for line in raw_info.splitlines(): + if line.startswith('ANTs Version: '): + v_string = line.split()[2] + break + else: + return None + + # -githash may or may not be appended + v_string = v_string.split('-')[0] + + # 2.2.0-equivalent version string + if 'post' in v_string and LooseVersion(v_string) >= LooseVersion('2.1.0.post789'): + return '2.2.0' + else: + return '.'.join(v_string.split('.')[:3]) + + class ANTSCommandInputSpec(CommandLineInputSpec): """Base Input Specification for all ANTS Commands """ @@ -84,3 +111,7 @@ def set_default_num_threads(cls, num_threads): .inputs.num_threads """ cls._num_threads = num_threads + + @property + def version(self): + return Info.version() diff --git a/nipype/interfaces/ants/legacy.py b/nipype/interfaces/ants/legacy.py index 3019f27c22..7df1731fa1 100644 --- a/nipype/interfaces/ants/legacy.py +++ b/nipype/interfaces/ants/legacy.py @@ -86,7 +86,7 @@ class antsIntroduction(ANTSCommand): >>> warp.inputs.reference_image = 'Template_6.nii' >>> warp.inputs.input_image = 'structural.nii' >>> warp.inputs.max_iterations = [30,90,20] - >>> warp.cmdline # doctest: +ALLOW_UNICODE + >>> warp.cmdline 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' """ @@ -129,8 +129,8 @@ class GenWarpFields(antsIntroduction): class buildtemplateparallelInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum(3, 2, argstr='-d %d', usedefault=True, - desc='image dimension (2 or 3)', position=1) + dimension = traits.Enum(3, 2, 4, argstr='-d %d', usedefault=True, + desc='image dimension (2, 3 or 4)', position=1) out_prefix = traits.Str('antsTMPL_', argstr='-o %s', usedefault=True, desc=('Prefix that is prepended to all output ' 'files (default = antsTMPL_)')) @@ -204,7 +204,7 @@ class buildtemplateparallel(ANTSCommand): >>> tmpl = buildtemplateparallel() >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] >>> tmpl.inputs.max_iterations = [30, 90, 20] - >>> tmpl.cmdline # doctest: +ALLOW_UNICODE + >>> tmpl.cmdline 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' """ diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 27e3caa1cc..a5fd0e63da 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -"""The ants module provides basic functions for interfacing with ants functions. +"""The ants module provides basic functions for interfacing with ants + functions. Change directory to provide relative paths for doctests >>> import os @@ -7,10 +8,12 @@ >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ -from __future__ import print_function, division, unicode_literals, absolute_import +from __future__ import (print_function, division, unicode_literals, + absolute_import) from builtins import range, str import os +from ...utils.filemanip import filename_to_list from ..base import TraitedSpec, File, Str, traits, InputMultiPath, isdefined from .base import ANTSCommand, ANTSCommandInputSpec @@ -19,17 +22,19 @@ class ANTSInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, position=1, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, - desc=('image to which the moving image is warped')) + desc=('image to which the moving image is ' + 'warped')) moving_image = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, - desc=('image to apply transformation to (generally a coregistered ' + desc=('image to apply transformation to ' + '(generally a coregistered' 'functional)')) # Not all metrics are appropriate for all modalities. Also, not all metrics -# are efficeint or appropriate at all resolution levels, Some metrics perform -# well for gross global registraiton, but do poorly for small changes (i.e. -# Mattes), and some metrics do well for small changes but don't work well for -# gross level changes (i.e. 'CC'). +# are efficeint or appropriate at all resolution levels, Some metrics +# perform well for gross global registraiton, but do poorly for small +# changes (i.e. Mattes), and some metrics do well for small changes but +# don't work well for gross level changes (i.e. 'CC'). # # This is a two stage registration. in the first stage # [ 'Mattes', .................] @@ -48,12 +53,19 @@ class ANTSInputSpec(ANTSCommandInputSpec): metric = traits.List(traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ', 'PSE'), mandatory=True, desc='') - metric_weight = traits.List(traits.Float(), requires=['metric'], desc='') - radius = traits.List(traits.Int(), requires=['metric'], desc='') + metric_weight = traits.List(traits.Float(), value=[1.0], usedefault=True, + requires=['metric'], mandatory=True, + desc='the metric weight(s) for each stage. ' + 'The weights must sum to 1 per stage.') + + radius = traits.List(traits.Int(), requires=['metric'], mandatory=True, + desc='radius of the region (i.e. number of layers' + ' around a voxel point)' + ' that is used for computing cross correlation') output_transform_prefix = Str('out', usedefault=True, - argstr='--output-naming %s', - mandatory=True, desc='') + argstr='--output-naming %s', + mandatory=True, desc='') transformation_model = traits.Enum('Diff', 'Elast', 'Exp', 'Greedy Exp', 'SyN', argstr='%s', mandatory=True, desc='') @@ -119,7 +131,7 @@ class ANTS(ANTSCommand): >>> ants.inputs.regularization_gradient_field_sigma = 3 >>> ants.inputs.regularization_deformation_field_sigma = 0 >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] - >>> ants.cmdline # doctest: +ALLOW_UNICODE + >>> ants.cmdline 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations \ 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] \ --transformation-model SyN[0.25] --use-Histogram-Matching 1' @@ -181,7 +193,8 @@ def _affine_gradient_descent_option_constructor(self): defaults[ii] = values[ii] except IndexError: break - parameters = self._format_xarray([('%g' % defaults[index]) for index in range(4)]) + parameters = self._format_xarray( + [('%g' % defaults[index]) for index in range(4)]) retval = ['--affine-gradient-descent-option', parameters] return ' '.join(retval) @@ -218,27 +231,52 @@ class RegistrationInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='--dimensionality %d', usedefault=True, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, - desc='image to apply transformation to (generally a coregistered functional)') - fixed_image_mask = File(argstr='%s', exists=True, - desc='mask used to limit metric sampling region of the fixed image') + desc='Image to which the moving_image should be transformed' + '(usually a structural image)') + fixed_image_mask = File( + exists=True, argstr='%s', max_ver='2.1.0', xor=['fixed_image_masks'], + desc='Mask used to limit metric sampling region of the fixed image' + 'in all stages') + fixed_image_masks = InputMultiPath( + traits.Either('NULL', File(exists=True)), min_ver='2.2.0', xor=['fixed_image_mask'], + desc='Masks used to limit metric sampling region of the fixed image, defined per registration stage' + '(Use "NULL" to omit a mask at a given stage)') moving_image = InputMultiPath(File(exists=True), mandatory=True, - desc='image to apply transformation to (generally a coregistered functional)') - moving_image_mask = File(requires=['fixed_image_mask'], - exists=True, desc='mask used to limit metric sampling region of the moving image') + desc='Image that will be registered to the space of fixed_image. This is the' + 'image on which the transformations will be applied to') + moving_image_mask = File( + exists=True, requires=['fixed_image_mask'], max_ver='2.1.0', xor=['moving_image_masks'], + desc='mask used to limit metric sampling region of the moving image' + 'in all stages') + moving_image_masks = InputMultiPath( + traits.Either('NULL', File(exists=True)), min_ver='2.2.0', xor=['moving_image_mask'], + desc='Masks used to limit metric sampling region of the moving image, defined per registration stage' + '(Use "NULL" to omit a mask at a given stage)') save_state = File(argstr='--save-state %s', exists=False, desc='Filename for saving the internal restorable state of the registration') restore_state = File(argstr='--restore-state %s', exists=True, desc='Filename for restoring the internal restorable state of the registration') - initial_moving_transform = File(argstr='%s', exists=True, desc='', - xor=['initial_moving_transform_com']) - invert_initial_moving_transform = traits.Bool(requires=["initial_moving_transform"], - desc='', xor=['initial_moving_transform_com']) + initial_moving_transform = InputMultiPath(argstr='%s', + exists=True, + desc='A transform or a list of transforms that should be applied' + 'before the registration begins. Note that, when a list is given,' + 'the transformations are applied in reverse order.', + xor=['initial_moving_transform_com']) + invert_initial_moving_transform = InputMultiPath(traits.Bool(), + requires=[ + "initial_moving_transform"], + desc='One boolean or a list of booleans that indicate' + 'whether the inverse(s) of the transform(s) defined' + 'in initial_moving_transform should be used.', + xor=['initial_moving_transform_com']) initial_moving_transform_com = traits.Enum(0, 1, 2, argstr='%s', default=0, xor=['initial_moving_transform'], - desc="Use center of mass for moving transform") + desc="Align the moving_image nad fixed_image befor registration using" + "the geometric center of the images (=0), the image intensities (=1)," + "or the origin of the images (=2)") metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", "Mattes") metric_stage_trait = traits.Either( @@ -280,7 +318,8 @@ class RegistrationInputSpec(ANTSCommandInputSpec): use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='') use_histogram_matching = traits.Either( traits.Bool, traits.List(traits.Bool(argstr='%s')), - default=True, usedefault=True) + default=True, usedefault=True, + desc='Histogram match the images before registration.') interpolation = traits.Enum( 'Linear', 'NearestNeighbor', 'CosineWindowedSinc', 'WelchWindowedSinc', 'HammingWindowedSinc', 'LanczosWindowedSinc', 'BSpline', 'MultiLabel', 'Gaussian', @@ -405,13 +444,64 @@ class RegistrationOutputSpec(TraitedSpec): warped_image = File(desc="Outputs warped image") inverse_warped_image = File(desc="Outputs the inverse of the warped image") save_state = File(desc="The saved registration state to be restored") + metric_value = traits.Float(desc='the final value of metric') + elapsed_time = traits.Float(desc='the total elapsed time as reported by ANTs') class Registration(ANTSCommand): """ + `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, + using a predefined (sequence of) cost function(s) and transformation operations. + The cost function is defined using one or more 'metrics', specifically + local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), + global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). + + ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, + or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, + ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, + ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple + *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear + (Syn)-transformation. + + antsRegistration can be initialized using one ore more transforms from moving_image + to fixed_image with the ``initial_moving_transform``-input. For example, when you + already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, + that you want to apply before an Affine registration to a structural image. + You could put this transform into 'intial_moving_transform'. + + The Registration-interface can output the resulting transform(s) that map moving_image to + fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` + is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output + inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using + ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' + order: the first element should be applied first, the last element should be applied last. + + Note, however, that ANTS tools always apply lists of transformations in reverse order (the last + transformation in the list is applied first). Therefore, if the output forward_transforms + is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To + make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``, + you have to provide the list of transformations in reverse order from ``forward_transforms``. + ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for + this purpose. Note also that, because ``composite_transform`` is always a single file, this + output is preferred for most use-cases. + + More information can be found in the `ANTS + manual `_. + + See below for some useful examples. + Examples -------- + + Set up a Registation node with some default settings. This Node registers + 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and + then a non-linear 'SyN' transformation, both using the Mutual Information-cost + metric. + + The registration is initailized by first applying the (linear) transform + trans.mat. + >>> import copy, pprint >>> from nipype.interfaces.ants import Registration >>> reg = Registration() @@ -439,7 +529,7 @@ class Registration(ANTSCommand): >>> reg.inputs.use_estimate_learning_rate_once = [True, True] >>> reg.inputs.use_histogram_matching = [True, True] # This is the default >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - >>> reg.cmdline # doctest: +ALLOW_UNICODE + >>> reg.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -450,10 +540,12 @@ class Registration(ANTSCommand): --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' >>> reg.run() # doctest: +SKIP + Same as reg1, but first invert the initial transform ('trans.mat') before applying it. + >>> reg.inputs.invert_initial_moving_transform = True >>> reg1 = copy.deepcopy(reg) >>> reg1.inputs.winsorize_lower_quantile = 0.025 - >>> reg1.cmdline # doctest: +ALLOW_UNICODE + >>> reg1.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -464,9 +556,12 @@ class Registration(ANTSCommand): --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' >>> reg1.run() # doctest: +SKIP + Clip extremely high intensity data points using winsorize_upper_quantile. All data points + higher than the 0.975 quantile are set to the value of the 0.975 quantile. + >>> reg2 = copy.deepcopy(reg) >>> reg2.inputs.winsorize_upper_quantile = 0.975 - >>> reg2.cmdline # doctest: +ALLOW_UNICODE + >>> reg2.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -476,10 +571,14 @@ class Registration(ANTSCommand): --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' + Clip extremely low intensity data points using winsorize_lower_quantile. All data points + lower than the 0.025 quantile are set to the original value at the 0.025 quantile. + + >>> reg3 = copy.deepcopy(reg) >>> reg3.inputs.winsorize_lower_quantile = 0.025 >>> reg3.inputs.winsorize_upper_quantile = 0.975 - >>> reg3.cmdline # doctest: +ALLOW_UNICODE + >>> reg3.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -489,9 +588,11 @@ class Registration(ANTSCommand): --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' + Use float instead of double for computations (saves memory usage) + >>> reg3a = copy.deepcopy(reg) >>> reg3a.inputs.float = True - >>> reg3a.cmdline # doctest: +ALLOW_UNICODE + >>> reg3a.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 \ --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ @@ -502,9 +603,11 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' + Force to use double instead of float for computations (more precision and memory usage). + >>> reg3b = copy.deepcopy(reg) >>> reg3b.inputs.float = False - >>> reg3b.cmdline # doctest: +ALLOW_UNICODE + >>> reg3b.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 \ --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ @@ -515,6 +618,9 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' + 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- + file. Note that forward_transforms will now be an empty list. + >>> # Test collapse transforms flag >>> reg4 = copy.deepcopy(reg) >>> reg4.inputs.save_state = 'trans.mat' @@ -522,17 +628,19 @@ class Registration(ANTSCommand): >>> reg4.inputs.initialize_transforms_per_stage = True >>> reg4.inputs.collapse_output_transforms = True >>> outputs = reg4._list_outputs() - >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, {'composite_transform': '.../nipype/testing/data/output_Composite.h5', + 'elapsed_time': , 'forward_invert_flags': [], 'forward_transforms': [], 'inverse_composite_transform': '.../nipype/testing/data/output_InverseComposite.h5', 'inverse_warped_image': , + 'metric_value': , 'reverse_invert_flags': [], 'reverse_transforms': [], 'save_state': '.../nipype/testing/data/trans.mat', 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} - >>> reg4.cmdline # doctest: +ALLOW_UNICODE + >>> reg4.cmdline 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ @@ -543,24 +651,27 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' + >>> # Test collapse transforms flag >>> reg4b = copy.deepcopy(reg4) >>> reg4b.inputs.write_composite_transform = False >>> outputs = reg4b._list_outputs() - >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, {'composite_transform': , + 'elapsed_time': , 'forward_invert_flags': [False, False], 'forward_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', '.../nipype/testing/data/output_1Warp.nii.gz'], 'inverse_composite_transform': , 'inverse_warped_image': , + 'metric_value': , 'reverse_invert_flags': [True, False], 'reverse_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', \ -'.../nipype/testing/data/output_1InverseWarp.nii.gz'], + '.../nipype/testing/data/output_1InverseWarp.nii.gz'], 'save_state': '.../nipype/testing/data/trans.mat', 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} >>> reg4b.aggregate_outputs() # doctest: +SKIP - >>> reg4b.cmdline # doctest: +ALLOW_UNICODE + >>> reg4b.cmdline 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ @@ -571,6 +682,14 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 0' + One can use multiple similarity metrics in a single registration stage.The Node below first + performs a linear registation using only the Mutual Information ('Mattes')-metric. + In a second stage, it performs a non-linear registration ('Syn') using both a + Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted + equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. + The local cross-correlations (correlations between every voxel's neighborhoods) is computed + with a radius of 4. + >>> # Test multiple metrics per stage >>> reg5 = copy.deepcopy(reg) >>> reg5.inputs.fixed_image = 'fixed1.nii' @@ -580,7 +699,7 @@ class Registration(ANTSCommand): >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] - >>> reg5.cmdline # doctest: +ALLOW_UNICODE + >>> reg5.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -591,11 +710,17 @@ class Registration(ANTSCommand): --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' - >>> # Test multiple inputs + ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed + that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and + moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, + then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from + the transformation of the first step. + + >>> # Test multiple inputS >>> reg6 = copy.deepcopy(reg5) >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] - >>> reg6.cmdline # doctest: +ALLOW_UNICODE + >>> reg6.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -606,11 +731,13 @@ class Registration(ANTSCommand): --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + Different methods can be used for the interpolation when applying transformations. + >>> # Test Interpolation Parameters (BSpline) >>> reg7a = copy.deepcopy(reg) >>> reg7a.inputs.interpolation = 'BSpline' >>> reg7a.inputs.interpolation_parameters = (3,) - >>> reg7a.cmdline # doctest: +ALLOW_UNICODE + >>> reg7a.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -624,7 +751,7 @@ class Registration(ANTSCommand): >>> reg7b = copy.deepcopy(reg) >>> reg7b.inputs.interpolation = 'Gaussian' >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) - >>> reg7b.cmdline # doctest: +ALLOW_UNICODE + >>> reg7b.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ @@ -635,11 +762,13 @@ class Registration(ANTSCommand): --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' + BSplineSyN non-linear registration with custom parameters. + >>> # Test Extended Transform Parameters >>> reg8 = copy.deepcopy(reg) >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] - >>> reg8.cmdline # doctest: +ALLOW_UNICODE + >>> reg8.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ @@ -648,6 +777,41 @@ class Registration(ANTSCommand): --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + Mask the fixed image in the second stage of the registration (but not the first). + + >>> # Test masking + >>> reg9 = copy.deepcopy(reg) + >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] + >>> reg9.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] \ +--winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + Here we use both a warpfield and a linear transformation, before registration commences. Note that + the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of + 'initial_moving_transform'. + + >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) + >>> reg10 = copy.deepcopy(reg) + >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] + >>> reg10.inputs.invert_initial_moving_transform = [False, False] + >>> reg10.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform \ +[ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear \ +--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' """ DEF_SAMPLING_STRATEGY = 'None' """The default sampling strategy argument.""" @@ -656,7 +820,31 @@ class Registration(ANTSCommand): input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec _quantilesDone = False - _linear_transform_names = ['Rigid', 'Affine', 'Translation', 'CompositeAffine', 'Similarity'] + _linear_transform_names = ['Rigid', 'Affine', + 'Translation', 'CompositeAffine', 'Similarity'] + + def __init__(self, **inputs): + super(Registration, self).__init__(**inputs) + self._elapsed_time = None + self._metric_value = None + + def _run_interface(self, runtime, correct_return_codes=(0,)): + runtime = super(Registration, self)._run_interface(runtime) + + # Parse some profiling info + output = runtime.stdout or runtime.merged + if output: + lines = output.split('\n') + for l in lines[::-1]: + # This should be the last line + if l.strip().startswith('Total elapsed time:'): + self._elapsed_time = float(l.strip().replace( + 'Total elapsed time: ', '')) + elif 'DIAGNOSTIC' in l: + self._metric_value = float(l.split(',')[2]) + break + + return runtime def _format_metric(self, index): """ @@ -783,6 +971,23 @@ def _format_registration(self): if isdefined(self.inputs.restrict_deformation): retval.append('--restrict-deformation %s' % self._format_xarray(self.inputs.restrict_deformation[ii])) + if any((isdefined(self.inputs.fixed_image_masks), + isdefined(self.inputs.moving_image_masks))): + if isdefined(self.inputs.fixed_image_masks): + fixed_masks = filename_to_list( + self.inputs.fixed_image_masks) + fixed_mask = fixed_masks[ii if len(fixed_masks) > 1 else 0] + else: + fixed_mask = 'NULL' + + if isdefined(self.inputs.moving_image_masks): + moving_masks = filename_to_list( + self.inputs.moving_image_masks) + moving_mask = moving_masks[ii if len( + moving_masks) > 1 else 0] + else: + moving_mask = 'NULL' + retval.append('--masks [ %s, %s ]' % (fixed_mask, moving_mask)) return " ".join(retval) def _get_outputfilenames(self, inverse=False): @@ -807,7 +1012,8 @@ def _get_outputfilenames(self, inverse=False): return inv_output_filename def _format_convergence(self, ii): - convergence_iter = self._format_xarray(self.inputs.number_of_iterations[ii]) + convergence_iter = self._format_xarray( + self.inputs.number_of_iterations[ii]) if len(self.inputs.convergence_threshold) > ii: convergence_value = self.inputs.convergence_threshold[ii] else: @@ -825,6 +1031,21 @@ def _format_winsorize_image_intensities(self): self._quantilesDone = True return '--winsorize-image-intensities [ %s, %s ]' % (self.inputs.winsorize_lower_quantile, self.inputs.winsorize_upper_quantile) + def _get_initial_transform_filenames(self): + n_transforms = len(self.inputs.initial_moving_transform) + + # Assume transforms should not be inverted by default + invert_flags = [0] * n_transforms + if isdefined(self.inputs.invert_initial_moving_transform): + if len(self.inputs.invert_initial_moving_transform) != n_transforms: + raise Exception( + 'Inputs "initial_moving_transform" and "invert_initial_moving_transform"' + 'should have the same length.') + invert_flags = self.inputs.invert_initial_moving_transform + + retval = ["[ %s, %d ]" % (xfm, int(flag)) for xfm, flag in zip( + self.inputs.initial_moving_transform, invert_flags)] + return " ".join(['--initial-moving-transform'] + retval) def _format_arg(self, opt, spec, val): if opt == 'fixed_image_mask': @@ -836,10 +1057,7 @@ def _format_arg(self, opt, spec, val): elif opt == 'transforms': return self._format_registration() elif opt == 'initial_moving_transform': - do_invert_transform = self.inputs.invert_initial_moving_transform \ - if isdefined(self.inputs.invert_initial_moving_transform) else 0 # Just do the default behavior - return '--initial-moving-transform [ %s, %d ]' % (self.inputs.initial_moving_transform, - do_invert_transform) + return self._get_initial_transform_filenames() elif opt == 'initial_moving_transform_com': do_center_of_mass_init = self.inputs.initial_moving_transform_com \ if isdefined(self.inputs.initial_moving_transform_com) else 0 # Just do the default behavior @@ -906,7 +1124,8 @@ def _list_outputs(self): # invert_initial_moving_transform should be always defined, even if # there's no initial transform - invert_initial_moving_transform = False + invert_initial_moving_transform = [ + False] * len(self.inputs.initial_moving_transform) if isdefined(self.inputs.invert_initial_moving_transform): invert_initial_moving_transform = self.inputs.invert_initial_moving_transform @@ -916,15 +1135,18 @@ def _list_outputs(self): filename = self.inputs.output_transform_prefix + \ 'InverseComposite.h5' outputs['inverse_composite_transform'] = os.path.abspath(filename) - else: # If composite transforms are written, then individuals are not written (as of 2014-10-26 + # If composite transforms are written, then individuals are not written (as of 2014-10-26 + else: if not self.inputs.collapse_output_transforms: transform_count = 0 if isdefined(self.inputs.initial_moving_transform): - outputs['forward_transforms'].append(self.inputs.initial_moving_transform) - outputs['forward_invert_flags'].append(invert_initial_moving_transform) - outputs['reverse_transforms'].insert(0, self.inputs.initial_moving_transform) - outputs['reverse_invert_flags'].insert(0, not invert_initial_moving_transform) # Prepend - transform_count += 1 + outputs['forward_transforms'] += self.inputs.initial_moving_transform + outputs['forward_invert_flags'] += invert_initial_moving_transform + outputs['reverse_transforms'] = self.inputs.initial_moving_transform + \ + outputs['reverse_transforms'] + outputs['reverse_invert_flags'] = [ + not e for e in invert_initial_moving_transform] + outputs['reverse_invert_flags'] # Prepend + transform_count += len(self.inputs.initial_moving_transform) elif isdefined(self.inputs.initial_moving_transform_com): forward_filename, forward_inversemode = self._output_filenames( self.inputs.output_transform_prefix, @@ -935,7 +1157,8 @@ def _list_outputs(self): transform_count, 'Initial', True) - outputs['forward_transforms'].append(os.path.abspath(forward_filename)) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(False) outputs['reverse_transforms'].insert(0, os.path.abspath(reverse_filename)) @@ -949,14 +1172,18 @@ def _list_outputs(self): reverse_filename, reverse_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, self.inputs.transforms[count], True) - outputs['forward_transforms'].append(os.path.abspath(forward_filename)) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(forward_inversemode) - outputs['reverse_transforms'].insert(0, os.path.abspath(reverse_filename)) - outputs['reverse_invert_flags'].insert(0, reverse_inversemode) + outputs['reverse_transforms'].insert( + 0, os.path.abspath(reverse_filename)) + outputs['reverse_invert_flags'].insert( + 0, reverse_inversemode) transform_count += 1 else: transform_count = 0 - is_linear = [t in self._linear_transform_names for t in self.inputs.transforms] + is_linear = [ + t in self._linear_transform_names for t in self.inputs.transforms] collapse_list = [] if isdefined(self.inputs.initial_moving_transform) or \ @@ -980,9 +1207,11 @@ def _list_outputs(self): transform_count, transform, inverse=True) - outputs['forward_transforms'].append(os.path.abspath(forward_filename)) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(forward_inversemode) - outputs['reverse_transforms'].append(os.path.abspath(reverse_filename)) + outputs['reverse_transforms'].append( + os.path.abspath(reverse_filename)) outputs['reverse_invert_flags'].append(reverse_inversemode) transform_count += 1 @@ -994,4 +1223,130 @@ def _list_outputs(self): outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename) if len(self.inputs.save_state): outputs['save_state'] = os.path.abspath(self.inputs.save_state) + if self._metric_value: + outputs['metric_value'] = self._metric_value + if self._elapsed_time: + outputs['elapsed_time'] = self._elapsed_time + return outputs + + +class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 2, 3, 4, + argstr='--dimensionality %d', position=1, + desc='Dimensionality of the fixed/moving image pair', + ) + fixed_image = File( + exists=True, mandatory=True, + desc='Image to which the moving image is warped', + ) + moving_image = File( + exists=True, mandatory=True, + desc='Image to apply transformation to (generally a coregistered functional)', + ) + metric = traits.Enum( + "CC", "MI", "Mattes", "MeanSquares", "Demons", "GC", + argstr="%s", mandatory=True, + ) + metric_weight = traits.Float( + requires=['metric'], default=1.0, usedefault=True, + desc='The "metricWeight" variable is not used.', + ) + radius_or_number_of_bins = traits.Int( + requires=['metric'], mandatory=True, + desc='The number of bins in each stage for the MI and Mattes metric, ' + 'or the radius for other metrics', + ) + sampling_strategy = traits.Enum( + "None", "Regular", "Random", + requires=['metric'], default="None", usedefault=True, + desc='Manner of choosing point set over which to optimize the metric. ' + 'Defaults to "None" (i.e. a dense sampling of one sample per voxel).' + ) + sampling_percentage = traits.Either( + traits.Range(low=0.0, high=1.0), + requires=['metric'], mandatory=True, + desc='Percentage of points accessible to the sampling strategy over which ' + 'to optimize the metric.' + ) + fixed_image_mask = File( + exists=True, argstr='%s', + desc='mask used to limit metric sampling region of the fixed image', + ) + moving_image_mask = File( + exists=True, requires=['fixed_image_mask'], + desc='mask used to limit metric sampling region of the moving image', + ) + + +class MeasureImageSimilarityOutputSpec(TraitedSpec): + similarity = traits.Float() + + +class MeasureImageSimilarity(ANTSCommand): + """ + + + Examples + -------- + + >>> from nipype.interfaces.ants import MeasureImageSimilarity + >>> sim = MeasureImageSimilarity() + >>> sim.inputs.dimension = 3 + >>> sim.inputs.metric = 'MI' + >>> sim.inputs.fixed_image = 'T1.nii' + >>> sim.inputs.moving_image = 'resting.nii' + >>> sim.inputs.metric_weight = 1.0 + >>> sim.inputs.radius_or_number_of_bins = 5 + >>> sim.inputs.sampling_strategy = 'Regular' + >>> sim.inputs.sampling_percentage = 1.0 + >>> sim.inputs.fixed_image_mask = 'mask.nii' + >>> sim.inputs.moving_image_mask = 'mask.nii.gz' + >>> sim.cmdline + 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] \ +--metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' + """ + _cmd = 'MeasureImageSimilarity' + input_spec = MeasureImageSimilarityInputSpec + output_spec = MeasureImageSimilarityOutputSpec + + def _metric_constructor(self): + retval = '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},'\ + '{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]'\ + .format( + metric=self.inputs.metric, + fixed_image=self.inputs.fixed_image, + moving_image=self.inputs.moving_image, + metric_weight=self.inputs.metric_weight, + radius_or_number_of_bins=self.inputs.radius_or_number_of_bins, + sampling_strategy=self.inputs.sampling_strategy, + sampling_percentage=self.inputs.sampling_percentage, + ) + return retval + + def _mask_constructor(self): + if self.inputs.moving_image_mask: + retval = '--masks ["{fixed_image_mask}","{moving_image_mask}"]'\ + .format( + fixed_image_mask=self.inputs.fixed_image_mask, + moving_image_mask=self.inputs.moving_image_mask, + ) + else: + retval = '--masks "{fixed_image_mask}"'\ + .format( + fixed_image_mask=self.inputs.fixed_image_mask, + ) + return retval + + def _format_arg(self, opt, spec, val): + if opt == 'metric': + return self._metric_constructor() + elif opt == 'fixed_image_mask': + return self._mask_constructor() + return super(MeasureImageSimilarity, self)._format_arg(opt, spec, val) + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + stdout = runtime.stdout.split('\n') + outputs.similarity = float(stdout[0]) return outputs diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py index 39393dc0f0..3ed60a51b1 100644 --- a/nipype/interfaces/ants/resampling.py +++ b/nipype/interfaces/ants/resampling.py @@ -43,9 +43,12 @@ class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec): desc='transformation file(s) to be applied', mandatory=True, copyfile=False) invert_affine = traits.List(traits.Int, - desc=('List of Affine transformations to invert. ' + desc=('List of Affine transformations to invert.' 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' - 'found in transformation_series')) + 'found in transformation_series. Note that indexing ' + 'starts with 1 and does not include warp fields. Affine ' + 'transformations are distinguished ' + 'from warp fields by the word "affine" included in their filenames.')) class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec): @@ -63,10 +66,18 @@ class WarpTimeSeriesImageMultiTransform(ANTSCommand): >>> wtsimt.inputs.input_image = 'resting.nii' >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] - >>> wtsimt.cmdline # doctest: +ALLOW_UNICODE + >>> wtsimt.cmdline 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ ants_Affine.txt' + >>> wtsimt = WarpTimeSeriesImageMultiTransform() + >>> wtsimt.inputs.input_image = 'resting.nii' + >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' + >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] + >>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt + >>> wtsimt.cmdline + 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ +-i ants_Affine.txt' """ _cmd = 'WarpTimeSeriesImageMultiTransform' @@ -81,13 +92,22 @@ def _format_arg(self, opt, spec, val): if opt == 'transformation_series': series = [] affine_counter = 0 + affine_invert = [] for transformation in val: if 'Affine' in transformation and \ isdefined(self.inputs.invert_affine): affine_counter += 1 if affine_counter in self.inputs.invert_affine: - series += ['-i'], + series += ['-i'] + affine_invert.append(affine_counter) series += [transformation] + + if isdefined(self.inputs.invert_affine): + diff_inv = set(self.inputs.invert_affine) - set(affine_invert) + if diff_inv: + raise Exceptions("Review invert_affine, not all indexes from invert_affine were used, " + "check the description for the full definition") + return ' '.join(series) return super(WarpTimeSeriesImageMultiTransform, self)._format_arg(opt, spec, val) @@ -159,7 +179,7 @@ class WarpImageMultiTransform(ANTSCommand): >>> wimt.inputs.input_image = 'structural.nii' >>> wimt.inputs.reference_image = 'ants_deformed.nii.gz' >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] - >>> wimt.cmdline # doctest: +ALLOW_UNICODE + >>> wimt.cmdline 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ ants_Affine.txt' @@ -168,8 +188,8 @@ class WarpImageMultiTransform(ANTSCommand): >>> wimt.inputs.reference_image = 'functional.nii' >>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', \ 'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] - >>> wimt.inputs.invert_affine = [1] - >>> wimt.cmdline # doctest: +ALLOW_UNICODE + >>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt' + >>> wimt.cmdline 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' @@ -190,14 +210,24 @@ def _format_arg(self, opt, spec, val): if opt == 'transformation_series': series = [] affine_counter = 0 + affine_invert = [] for transformation in val: if "affine" in transformation.lower() and \ isdefined(self.inputs.invert_affine): affine_counter += 1 if affine_counter in self.inputs.invert_affine: - series += '-i', + series += ['-i'] + affine_invert.append(affine_counter) series += [transformation] + + if isdefined(self.inputs.invert_affine): + diff_inv = set(self.inputs.invert_affine) - set(affine_invert) + if diff_inv: + raise Exceptions("Review invert_affine, not all indexes from invert_affine were used, " + "check the description for the full definition") + return ' '.join(series) + return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val) def _list_outputs(self): @@ -243,13 +273,15 @@ class ApplyTransformsInputSpec(ANTSCommandInputSpec): 'Gaussian', 'BSpline', argstr='%s', usedefault=True) - interpolation_parameters = traits.Either(traits.Tuple(traits.Int()), # BSpline (order) - traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha) - traits.Float()) - ) - transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, - desc='transform files: will be applied in reverse order. For ' - 'example, the last specified transform will be applied first.') + interpolation_parameters = traits.Either( + traits.Tuple(traits.Int()), # BSpline (order) + traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha) + traits.Float()) + ) + transforms = traits.Either( + InputMultiPath(File(exists=True)), 'identity', argstr='%s', mandatory=True, + desc='transform files: will be applied in reverse order. For ' + 'example, the last specified transform will be applied first.') invert_transform_flags = InputMultiPath(traits.Bool()) default_value = traits.Float(0.0, argstr='--default-value %g', usedefault=True) print_out_composite_warp_file = traits.Bool(False, requires=["output_image"], @@ -269,6 +301,15 @@ class ApplyTransforms(ANTSCommand): -------- >>> from nipype.interfaces.ants import ApplyTransforms + >>> at = ApplyTransforms() + >>> at.inputs.input_image = 'moving1.nii' + >>> at.inputs.reference_image = 'fixed1.nii' + >>> at.inputs.transforms = 'identity' + >>> at.cmdline + 'antsApplyTransforms --default-value 0 --input moving1.nii \ +--interpolation Linear --output moving1_trans.nii \ +--reference-image fixed1.nii -t identity' + >>> at = ApplyTransforms() >>> at.inputs.dimension = 3 >>> at.inputs.input_image = 'moving1.nii' @@ -278,7 +319,7 @@ class ApplyTransforms(ANTSCommand): >>> at.inputs.default_value = 0 >>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] >>> at.inputs.invert_transform_flags = [False, False] - >>> at.cmdline # doctest: +ALLOW_UNICODE + >>> at.cmdline 'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation Linear \ --output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \ --transform [ trans.mat, 0 ]' @@ -293,7 +334,7 @@ class ApplyTransforms(ANTSCommand): >>> at1.inputs.default_value = 0 >>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] >>> at1.inputs.invert_transform_flags = [False, False] - >>> at1.cmdline # doctest: +ALLOW_UNICODE + >>> at1.cmdline 'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation BSpline[ 5 ] \ --output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \ --transform [ trans.mat, 0 ]' @@ -338,6 +379,8 @@ def _format_arg(self, opt, spec, val): if opt == "output_image": return self._get_output_warped_filename() elif opt == "transforms": + if val == 'identity': + return '-t identity' return self._get_transform_filenames() elif opt == 'interpolation': if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ @@ -399,7 +442,7 @@ class ApplyTransformsToPoints(ANTSCommand): >>> at.inputs.input_file = 'moving.csv' >>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz'] >>> at.inputs.invert_transform_flags = [False, False] - >>> at.cmdline # doctest: +ALLOW_UNICODE + >>> at.cmdline 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv \ --transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]' diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index aff6f2c6c0..6c594b5e24 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -13,9 +13,8 @@ import os from ...external.due import BibTeX -from ...utils.filemanip import split_filename, copyfile -from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined, - _exists_in_path) +from ...utils.filemanip import split_filename, copyfile, which +from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined from .base import ANTSCommand, ANTSCommandInputSpec @@ -91,7 +90,7 @@ class Atropos(ANTSCommand): >>> at.inputs.posterior_formulation = 'Socrates' >>> at.inputs.use_mixture_model_proportions = True >>> at.inputs.save_posteriors = True - >>> at.cmdline # doctest: +ALLOW_UNICODE + >>> at.cmdline 'Atropos --image-dimensionality 3 --icm [1,1] \ --initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii \ --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ @@ -209,7 +208,7 @@ class LaplacianThickness(ANTSCommand): >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz' >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz' >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz' - >>> cort_thick.cmdline # doctest: +ALLOW_UNICODE + >>> cort_thick.cmdline 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz' """ @@ -238,9 +237,9 @@ def _list_outputs(self): class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): - dimension = traits.Enum(3, 2, argstr='-d %d', + dimension = traits.Enum(3, 2, 4, argstr='-d %d', usedefault=True, - desc='image dimension (2 or 3)') + desc='image dimension (2, 3 or 4)') input_image = File(argstr='--input-image %s', mandatory=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)')) @@ -294,7 +293,7 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4.inputs.bspline_fitting_distance = 300 >>> n4.inputs.shrink_factor = 3 >>> n4.inputs.n_iterations = [50,50,30,20] - >>> n4.cmdline # doctest: +ALLOW_UNICODE + >>> n4.cmdline 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ -d 3 --input-image structural.nii \ --convergence [ 50x50x30x20 ] --output structural_corrected.nii \ @@ -302,7 +301,7 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_2 = copy.deepcopy(n4) >>> n4_2.inputs.convergence_threshold = 1e-6 - >>> n4_2.cmdline # doctest: +ALLOW_UNICODE + >>> n4_2.cmdline 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ -d 3 --input-image structural.nii \ --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ @@ -310,7 +309,7 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_3 = copy.deepcopy(n4_2) >>> n4_3.inputs.bspline_order = 5 - >>> n4_3.cmdline # doctest: +ALLOW_UNICODE + >>> n4_3.cmdline 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \ -d 3 --input-image structural.nii \ --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ @@ -320,7 +319,7 @@ class N4BiasFieldCorrection(ANTSCommand): >>> n4_4.inputs.input_image = 'structural.nii' >>> n4_4.inputs.save_bias = True >>> n4_4.inputs.dimension = 3 - >>> n4_4.cmdline # doctest: +ALLOW_UNICODE + >>> n4_4.cmdline 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ --output [ structural_corrected.nii, structural_bias.nii ]' """ @@ -530,7 +529,7 @@ class CorticalThickness(ANTSCommand): ... 'BrainSegmentationPrior03.nii.gz', ... 'BrainSegmentationPrior04.nii.gz'] >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz' - >>> corticalthickness.cmdline # doctest: +ALLOW_UNICODE + >>> corticalthickness.cmdline 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ -s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' @@ -709,7 +708,7 @@ class BrainExtraction(ANTSCommand): >>> brainextraction.inputs.anatomical_image ='T1.nii.gz' >>> brainextraction.inputs.brain_template = 'study_template.nii.gz' >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' - >>> brainextraction.cmdline # doctest: +ALLOW_UNICODE + >>> brainextraction.cmdline 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ -s nii.gz -o highres001_' """ @@ -720,17 +719,19 @@ class BrainExtraction(ANTSCommand): def _run_interface(self, runtime, correct_return_codes=(0,)): # antsBrainExtraction.sh requires ANTSPATH to be defined out_environ = self._get_environ() - if out_environ.get('ANTSPATH') is None: - runtime.environ.update(out_environ) - executable_name = self.cmd.split()[0] - exist_val, cmd_path = _exists_in_path(executable_name, runtime.environ) - if not exist_val: - raise IOError("command '%s' could not be found on host %s" % - (self.cmd.split()[0], runtime.hostname)) - - # Set the environment variable if found - runtime.environ.update({'ANTSPATH': os.path.dirname(cmd_path)}) - + ants_path = out_environ.get('ANTSPATH', None) or os.getenv('ANTSPATH', None) + if ants_path is None: + # Check for antsRegistration, which is under bin/ (the $ANTSPATH) instead of + # checking for antsBrainExtraction.sh which is under script/ + cmd_path = which('antsRegistration', env=runtime.environ) + if not cmd_path: + raise RuntimeError( + 'The environment variable $ANTSPATH is not defined in host "%s", ' + 'and Nipype could not determine it automatically.' % runtime.hostname) + ants_path = os.path.dirname(cmd_path) + + self.inputs.environ.update({'ANTSPATH': ants_path}) + runtime.environ.update({'ANTSPATH': ants_path}) runtime = super(BrainExtraction, self)._run_interface(runtime) # Still, double-check if it didn't found N4 @@ -740,8 +741,8 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): tool = line.strip().replace('we cant find the', '').split(' ')[0] break - errmsg = ('antsBrainExtraction.sh requires %s the environment variable ' - 'ANTSPATH to be defined' % tool) + errmsg = ('antsBrainExtraction.sh requires "%s" to be found in $ANTSPATH ' + '($ANTSPATH="%s").') % (tool, ants_path) if runtime.stderr is None: runtime.stderr = errmsg else: @@ -900,7 +901,7 @@ class JointFusion(ANTSCommand): ... 'segmentation1.nii.gz', ... 'segmentation1.nii.gz'] >>> at.inputs.target_image = 'T1.nii' - >>> at.cmdline # doctest: +ALLOW_UNICODE + >>> at.cmdline 'jointfusion 3 1 -m Joint[0.1,2] -tg T1.nii -g im1.nii -g im2.nii -g im3.nii -l segmentation0.nii.gz \ -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' @@ -909,7 +910,7 @@ class JointFusion(ANTSCommand): >>> at.inputs.beta = 1 >>> at.inputs.patch_radius = [3,2,1] >>> at.inputs.search_radius = [1,2,3] - >>> at.cmdline # doctest: +ALLOW_UNICODE + >>> at.cmdline 'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \ -l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' """ @@ -986,20 +987,20 @@ class DenoiseImage(ANTSCommand): >>> denoise = DenoiseImage() >>> denoise.inputs.dimension = 3 >>> denoise.inputs.input_image = 'im1.nii' - >>> denoise.cmdline # doctest: +ALLOW_UNICODE + >>> denoise.cmdline 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1' >>> denoise_2 = copy.deepcopy(denoise) >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz' >>> denoise_2.inputs.noise_model = 'Rician' >>> denoise_2.inputs.shrink_factor = 2 - >>> denoise_2.cmdline # doctest: +ALLOW_UNICODE + >>> denoise_2.cmdline 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2' >>> denoise_3 = DenoiseImage() >>> denoise_3.inputs.input_image = 'im1.nii' >>> denoise_3.inputs.save_noise = True - >>> denoise_3.cmdline # doctest: +ALLOW_UNICODE + >>> denoise_3.cmdline 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' """ input_spec = DenoiseImageInputSpec @@ -1103,12 +1104,12 @@ class AntsJointFusion(ANTSCommand): >>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] >>> antsjointfusion.inputs.target_image = ['im1.nii'] - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" >>> antsjointfusion.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" @@ -1116,7 +1117,7 @@ class AntsJointFusion(ANTSCommand): ... ['rc2s1.nii','rc2s2.nii'] ] >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', ... 'segmentation1.nii.gz'] - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii \ -s 3x3x3 -t ['im1.nii', 'im2.nii']" @@ -1126,7 +1127,7 @@ class AntsJointFusion(ANTSCommand): >>> antsjointfusion.inputs.beta = 1.0 >>> antsjointfusion.inputs.patch_radius = [3,2,1] >>> antsjointfusion.inputs.search_radius = [3] - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii \ -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" @@ -1135,7 +1136,7 @@ class AntsJointFusion(ANTSCommand): >>> antsjointfusion.inputs.verbose = True >>> antsjointfusion.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] >>> antsjointfusion.inputs.exclusion_image_label = ['1','2'] - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" @@ -1144,7 +1145,7 @@ class AntsJointFusion(ANTSCommand): >>> antsjointfusion.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' >>> antsjointfusion.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' >>> antsjointfusion.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' - >>> antsjointfusion.cmdline # doctest: +ALLOW_UNICODE + >>> antsjointfusion.cmdline "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, \ @@ -1323,8 +1324,8 @@ class KellyKapowski(ANTSCommand): >>> #kk.inputs.use_bspline_smoothing = False >>> kk.inputs.number_integration_points = 10 >>> kk.inputs.thickness_prior_estimate = 10 - >>> kk.cmdline # doctest: +ALLOW_UNICODE - u'KellyKapowski --convergence "[45,0.0,10]" \ + >>> kk.cmdline + 'KellyKapowski --convergence "[45,0.0,10]" \ --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" \ --image-dimensionality 3 --gradient-step 0.025000 --number-of-integration-points 10 \ --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 \ diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py index e7fbe117ae..682d070ee2 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTS.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -21,12 +21,15 @@ def test_ANTS_inputs(): ), gradient_step_length=dict(requires=['transformation_model'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), metric=dict(mandatory=True, ), - metric_weight=dict(requires=['metric'], + metric_weight=dict(mandatory=True, + requires=['metric'], + usedefault=True, ), mi_option=dict(argstr='--MI-option %s', sep='x', @@ -49,7 +52,8 @@ def test_ANTS_inputs(): mandatory=True, usedefault=True, ), - radius=dict(requires=['metric'], + radius=dict(mandatory=True, + requires=['metric'], ), regularization=dict(argstr='%s', ), @@ -65,7 +69,8 @@ def test_ANTS_inputs(): ), symmetry_type=dict(requires=['delta_time'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation_model=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py index 4f6920645b..c58eeefc67 100644 --- a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -9,13 +9,15 @@ def test_ANTSCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ANTSCommand.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py index 319798e13f..fcdc519aad 100644 --- a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -17,7 +17,8 @@ def test_AffineInitializer_inputs(): mandatory=True, position=1, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), local_search=dict(argstr='%d', @@ -47,7 +48,8 @@ def test_AffineInitializer_inputs(): position=4, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AffineInitializer.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index dcd115429f..09d95a4205 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -31,7 +31,8 @@ def test_AntsJointFusion_inputs(): exclusion_image_label=dict(argstr='-e %s', requires=['exclusion_image'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask_image=dict(argstr='-x %s', @@ -68,7 +69,8 @@ def test_AntsJointFusion_inputs(): target_image=dict(argstr='-t %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-v', ), diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py index 4b27963757..b159e6ee1d 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -16,7 +16,8 @@ def test_ApplyTransforms_inputs(): ), float=dict(argstr='--float %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='--input %s', @@ -43,7 +44,8 @@ def test_ApplyTransforms_inputs(): reference_image=dict(argstr='--reference-image %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transforms=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py index 5a20ac0f43..b9f3f63d54 100644 --- a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -11,7 +11,8 @@ def test_ApplyTransformsToPoints_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='--input %s', @@ -26,7 +27,8 @@ def test_ApplyTransformsToPoints_inputs(): name_source=['input_file'], name_template='%s_transformed.csv', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transforms=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py index 50fd85477d..9cb7d36844 100644 --- a/nipype/interfaces/ants/tests/test_auto_Atropos.py +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -16,7 +16,8 @@ def test_Atropos_inputs(): ), icm_use_synchronous_update=dict(argstr='%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialization=dict(argstr='%s', @@ -57,7 +58,8 @@ def test_Atropos_inputs(): ), prior_weighting=dict(), save_posteriors=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_mixture_model_proportions=dict(requires=['posterior_formulation'], ), diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py index 25a7f0b892..52ff9679ee 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -14,7 +14,8 @@ def test_AverageAffineTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(nohash=True, @@ -24,7 +25,8 @@ def test_AverageAffineTransform_inputs(): mandatory=True, position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transforms=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py index 47accd6758..4c49eb27c0 100644 --- a/nipype/interfaces/ants/tests/test_auto_AverageImages.py +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -13,7 +13,8 @@ def test_AverageImages_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), images=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_AverageImages_inputs(): position=1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AverageImages.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py index 86f652cbbe..3f1ade4bc3 100644 --- a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -26,7 +26,8 @@ def test_BrainExtraction_inputs(): ), extraction_registration_mask=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_suffix=dict(argstr='-s %s', @@ -40,7 +41,8 @@ def test_BrainExtraction_inputs(): out_prefix=dict(argstr='-o %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_floatingpoint_precision=dict(argstr='-q %d', ), diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py new file mode 100644 index 0000000000..c9465f6a8e --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ComposeMultiTransform + + +def test_ComposeMultiTransform_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + dimension=dict(argstr='%d', + position=0, + usedefault=True, + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + output_transform=dict(argstr='%s', + keep_ext=True, + name_source=['transforms'], + name_template='%s_composed', + position=1, + ), + reference_image=dict(argstr='%s', + position=2, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + transforms=dict(argstr='%s', + mandatory=True, + position=3, + ), + ) + inputs = ComposeMultiTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_ComposeMultiTransform_outputs(): + output_map = dict(output_transform=dict(), + ) + outputs = ComposeMultiTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py index 42d049990b..2afe734522 100644 --- a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -23,7 +23,8 @@ def test_ConvertScalarImageToRGB_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='%s', @@ -57,7 +58,8 @@ def test_ConvertScalarImageToRGB_inputs(): position=2, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ConvertScalarImageToRGB.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py index 5fe224b494..8ad0203370 100644 --- a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -29,7 +29,8 @@ def test_CorticalThickness_inputs(): ), extraction_registration_mask=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_suffix=dict(argstr='-s %s', @@ -61,7 +62,8 @@ def test_CorticalThickness_inputs(): t1_registration_template=dict(argstr='-t %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_floatingpoint_precision=dict(argstr='-j %d', ), diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index f7aafb27be..561853e79b 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -16,7 +16,8 @@ def test_CreateJacobianDeterminantImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), imageDimension=dict(argstr='%d', @@ -31,7 +32,8 @@ def test_CreateJacobianDeterminantImage_inputs(): mandatory=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useGeometric=dict(argstr='%d', position=4, diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py index 09340f631f..38f24644a9 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -15,7 +15,8 @@ def test_CreateTiledMosaic_inputs(): ), flip_slice=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', @@ -38,7 +39,8 @@ def test_CreateTiledMosaic_inputs(): ), slices=dict(argstr='-s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tile_geometry=dict(argstr='-t %s', ), diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py index 6c28016de6..7d120a2f2e 100644 --- a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -12,7 +12,8 @@ def test_DenoiseImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', @@ -42,7 +43,8 @@ def test_DenoiseImage_inputs(): shrink_factor=dict(argstr='-s %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-v', ), diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py index f5d79bd851..c58c8abf20 100644 --- a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -17,7 +17,8 @@ def test_GenWarpFields_inputs(): ), force_proceed=dict(argstr='-f 1', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', @@ -43,7 +44,8 @@ def test_GenWarpFields_inputs(): ), similarity_metric=dict(argstr='-s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py index cddfb487be..796c7a7e13 100644 --- a/nipype/interfaces/ants/tests/test_auto_JointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -26,7 +26,8 @@ def test_JointFusion_inputs(): ), exclusion_region=dict(argstr='-x %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), method=dict(argstr='-m %s', @@ -56,7 +57,8 @@ def test_JointFusion_inputs(): target_image=dict(argstr='-tg %s...', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warped_intensity_images=dict(argstr='-g %s...', mandatory=True, diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py new file mode 100644 index 0000000000..e94cd44b1d --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -0,0 +1,84 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import KellyKapowski + + +def test_KellyKapowski_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + convergence=dict(argstr='--convergence "%s"', + usedefault=True, + ), + cortical_thickness=dict(argstr='--output "%s"', + hash_files=False, + keep_extension=True, + name_source=['segmentation_image'], + name_template='%s_cortical_thickness', + ), + dimension=dict(argstr='--image-dimensionality %d', + usedefault=True, + ), + environ=dict(nohash=True, + usedefault=True, + ), + gradient_step=dict(argstr='--gradient-step %f', + usedefault=True, + ), + gray_matter_label=dict(usedefault=True, + ), + gray_matter_prob_image=dict(argstr='--gray-matter-probability-image "%s"', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + max_invert_displacement_field_iters=dict(argstr='--maximum-number-of-invert-displacement-field-iterations %d', + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + number_integration_points=dict(argstr='--number-of-integration-points %d', + ), + segmentation_image=dict(argstr='--segmentation-image "%s"', + mandatory=True, + ), + smoothing_variance=dict(argstr='--smoothing-variance %f', + ), + smoothing_velocity_field=dict(argstr='--smoothing-velocity-field-parameter %f', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + thickness_prior_estimate=dict(argstr='--thickness-prior-estimate %f', + usedefault=True, + ), + thickness_prior_image=dict(argstr='--thickness-prior-image "%s"', + ), + use_bspline_smoothing=dict(argstr='--use-bspline-smoothing 1', + ), + warped_white_matter=dict(hash_files=False, + keep_extension=True, + name_source=['segmentation_image'], + name_template='%s_warped_white_matter', + ), + white_matter_label=dict(usedefault=True, + ), + white_matter_prob_image=dict(argstr='--white-matter-probability-image "%s"', + ), + ) + inputs = KellyKapowski.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_KellyKapowski_outputs(): + output_map = dict(cortical_thickness=dict(), + warped_white_matter=dict(), + ) + outputs = KellyKapowski.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py index 71b0483f92..83b6b37d83 100644 --- a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -12,7 +12,8 @@ def test_LaplacianThickness_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_gm=dict(argstr='%s', @@ -45,7 +46,8 @@ def test_LaplacianThickness_inputs(): sulcus_prior=dict(argstr='use-sulcus-prior', position=7, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = LaplacianThickness.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py new file mode 100644 index 0000000000..fbed924b24 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MeasureImageSimilarity + + +def test_MeasureImageSimilarity_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + dimension=dict(argstr='--dimensionality %d', + position=1, + ), + environ=dict(nohash=True, + usedefault=True, + ), + fixed_image=dict(mandatory=True, + ), + fixed_image_mask=dict(argstr='%s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + metric=dict(argstr='%s', + mandatory=True, + ), + metric_weight=dict(requires=['metric'], + usedefault=True, + ), + moving_image=dict(mandatory=True, + ), + moving_image_mask=dict(requires=['fixed_image_mask'], + ), + num_threads=dict(nohash=True, + usedefault=True, + ), + radius_or_number_of_bins=dict(mandatory=True, + requires=['metric'], + ), + sampling_percentage=dict(mandatory=True, + requires=['metric'], + ), + sampling_strategy=dict(requires=['metric'], + usedefault=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = MeasureImageSimilarity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_MeasureImageSimilarity_outputs(): + output_map = dict(similarity=dict(), + ) + outputs = MeasureImageSimilarity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py index 2175db201d..5057cb4ddf 100644 --- a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -18,7 +18,8 @@ def test_MultiplyImages_inputs(): mandatory=True, position=1, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(nohash=True, @@ -32,7 +33,8 @@ def test_MultiplyImages_inputs(): mandatory=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MultiplyImages.input_spec() diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py index b863f888d9..044b16ce50 100644 --- a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -23,7 +23,8 @@ def test_N4BiasFieldCorrection_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='--input-image %s', @@ -46,7 +47,8 @@ def test_N4BiasFieldCorrection_inputs(): ), shrink_factor=dict(argstr='--shrink-factor %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), weight_image=dict(argstr='--weight-image %s', ), diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py index dc95deea19..ea9bc02e79 100644 --- a/nipype/interfaces/ants/tests/test_auto_Registration.py +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -24,13 +24,20 @@ def test_Registration_inputs(): fixed_image=dict(mandatory=True, ), fixed_image_mask=dict(argstr='%s', + max_ver='2.1.0', + xor=['fixed_image_masks'], + ), + fixed_image_masks=dict(min_ver='2.2.0', + xor=['fixed_image_mask'], ), float=dict(argstr='--float %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initial_moving_transform=dict(argstr='%s', + exists=True, xor=['initial_moving_transform_com'], ), initial_moving_transform_com=dict(argstr='%s', @@ -58,7 +65,12 @@ def test_Registration_inputs(): metric_weight_stage_trait=dict(), moving_image=dict(mandatory=True, ), - moving_image_mask=dict(requires=['fixed_image_mask'], + moving_image_mask=dict(max_ver='2.1.0', + requires=['fixed_image_mask'], + xor=['moving_image_masks'], + ), + moving_image_masks=dict(min_ver='2.2.0', + xor=['moving_image_mask'], ), num_threads=dict(nohash=True, usedefault=True, @@ -96,7 +108,8 @@ def test_Registration_inputs(): ), smoothing_sigmas=dict(mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_parameters=dict(), transforms=dict(argstr='%s', @@ -126,10 +139,12 @@ def test_Registration_inputs(): def test_Registration_outputs(): output_map = dict(composite_transform=dict(), + elapsed_time=dict(), forward_invert_flags=dict(), forward_transforms=dict(), inverse_composite_transform=dict(), inverse_warped_image=dict(), + metric_value=dict(), reverse_invert_flags=dict(), reverse_transforms=dict(), save_state=dict(), diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py index e016aac163..9678152ba4 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -13,7 +13,8 @@ def test_WarpImageMultiTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_WarpImageMultiTransform_inputs(): ), reslice_by_header=dict(argstr='--reslice-by-header', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tightest_box=dict(argstr='--tightest-bounding-box', xor=['reference_image'], diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py index 79fbf89302..f7f310b5f8 100644 --- a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -13,7 +13,8 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_WarpTimeSeriesImageMultiTransform_inputs(): ), reslice_by_header=dict(argstr='--reslice-by-header', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tightest_box=dict(argstr='--tightest-bounding-box', xor=['reference_image'], diff --git a/nipype/interfaces/ants/tests/test_auto_antsBrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_antsBrainExtraction.py index 230176c856..05e12b9a00 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsBrainExtraction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsBrainExtraction.py @@ -26,7 +26,8 @@ def test_antsBrainExtraction_inputs(): ), extraction_registration_mask=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_suffix=dict(argstr='-s %s', @@ -40,7 +41,8 @@ def test_antsBrainExtraction_inputs(): out_prefix=dict(argstr='-o %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_floatingpoint_precision=dict(argstr='-q %d', ), diff --git a/nipype/interfaces/ants/tests/test_auto_antsCorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_antsCorticalThickness.py index 02f2d46c59..0e7a7ca4ba 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsCorticalThickness.py +++ b/nipype/interfaces/ants/tests/test_auto_antsCorticalThickness.py @@ -29,7 +29,8 @@ def test_antsCorticalThickness_inputs(): ), extraction_registration_mask=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_suffix=dict(argstr='-s %s', @@ -61,7 +62,8 @@ def test_antsCorticalThickness_inputs(): t1_registration_template=dict(argstr='-t %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_floatingpoint_precision=dict(argstr='-j %d', ), diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py index 0a9646ae2c..7dfd0f6539 100644 --- a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -17,7 +17,8 @@ def test_antsIntroduction_inputs(): ), force_proceed=dict(argstr='-f 1', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='-i %s', @@ -43,7 +44,8 @@ def test_antsIntroduction_inputs(): ), similarity_metric=dict(argstr='-s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py index 9232bb32b1..9b5005e840 100644 --- a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -17,7 +17,8 @@ def test_buildtemplateparallel_inputs(): ), gradient_step_size=dict(argstr='-g %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -46,7 +47,8 @@ def test_buildtemplateparallel_inputs(): ), similarity_metric=dict(argstr='-s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation_model=dict(argstr='-t %s', usedefault=True, diff --git a/nipype/interfaces/ants/tests/test_registration.py b/nipype/interfaces/ants/tests/test_registration.py new file mode 100644 index 0000000000..745b825c65 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_registration.py @@ -0,0 +1,22 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import unicode_literals +from nipype.interfaces.ants import registration +import os +import pytest + + +def test_ants_mand(tmpdir): + tmpdir.chdir() + filepath = os.path.dirname(os.path.realpath(__file__)) + datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + + ants = registration.ANTS() + ants.inputs.transformation_model = "SyN" + ants.inputs.moving_image = [os.path.join(datadir, 'resting.nii')] + ants.inputs.fixed_image = [os.path.join(datadir, 'T1.nii')] + ants.inputs.metric = ['MI'] + + with pytest.raises(ValueError) as er: + ants.run() + assert "ANTS requires a value for input 'radius'" in str(er.value) diff --git a/nipype/interfaces/ants/tests/test_resampling.py b/nipype/interfaces/ants/tests/test_resampling.py new file mode 100644 index 0000000000..509ebfe844 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_resampling.py @@ -0,0 +1,83 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from nipype.interfaces.ants import WarpImageMultiTransform, WarpTimeSeriesImageMultiTransform +import os +import pytest + + +@pytest.fixture() +def change_dir(request): + orig_dir = os.getcwd() + filepath = os.path.dirname( os.path.realpath( __file__ ) ) + datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + os.chdir(datadir) + + def move2orig(): + os.chdir(orig_dir) + + request.addfinalizer(move2orig) + + +@pytest.fixture() +def create_wimt(): + wimt = WarpImageMultiTransform() + wimt.inputs.input_image = 'diffusion_weighted.nii' + wimt.inputs.reference_image = 'functional.nii' + wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', \ + 'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] + return wimt + +def test_WarpImageMultiTransform(change_dir, create_wimt): + wimt = create_wimt + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_1(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [1] + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_2(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [2] + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz -i dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_wrong(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [3] + with pytest.raises(Exception): + assert wimt.cmdline + + +@pytest.fixture() +def create_wtsimt(): + wtsimt = WarpTimeSeriesImageMultiTransform() + wtsimt.inputs.input_image = 'resting.nii' + wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' + wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] + return wtsimt + + +def test_WarpTimeSeriesImageMultiTransform(change_dir, create_wtsimt): + wtsimt = create_wtsimt + assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' + + +def test_WarpTimeSeriesImageMultiTransform_invaffine(change_dir, create_wtsimt): + wtsimt = create_wtsimt + wtsimt.inputs.invert_affine = [1] + assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' + + +def test_WarpTimeSeriesImageMultiTransform_invaffine_wrong(change_dir, create_wtsimt): + wtsimt = create_wtsimt + wtsimt.inputs.invert_affine = [0] + with pytest.raises(Exception): + wtsimt.cmdline diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index d356f727dd..0ba918ee27 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -11,9 +11,7 @@ import os -from ...utils.filemanip import split_filename -from ..base import (TraitedSpec, File, traits, isdefined, InputMultiPath, - CommandLine, CommandLineInputSpec) +from ..base import TraitedSpec, File, traits, InputMultiPath from .base import ANTSCommand, ANTSCommandInputSpec @@ -39,7 +37,7 @@ class AverageAffineTransform(ANTSCommand): >>> avg.inputs.dimension = 3 >>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat'] >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' - >>> avg.cmdline # doctest: +ALLOW_UNICODE + >>> avg.cmdline 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' """ _cmd = 'AverageAffineTransform' @@ -59,12 +57,16 @@ def _list_outputs(self): class AverageImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', mandatory=True, position=0, desc='image dimension (2 or 3)') - output_average_image = File("average.nii", argstr='%s', position=1, desc='the name of the resulting image.', - usedefault=True, hash_files=False) - normalize = traits.Bool(argstr="%d", mandatory=True, position=2, desc='Normalize: if true, the 2nd image' + - 'is divided by its mean. This will select the largest image to average into.') - images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, position=3, - desc='image to apply transformation to (generally a coregistered functional)') + output_average_image = File( + "average.nii", argstr='%s', position=1, usedefault=True, hash_files=False, + desc='the name of the resulting image.') + normalize = traits.Bool( + argstr="%d", mandatory=True, position=2, + desc='Normalize: if true, the 2nd image is divided by its mean. ' + 'This will select the largest image to average into.') + images = InputMultiPath( + File(exists=True), argstr='%s', mandatory=True, position=3, + desc='image to apply transformation to (generally a coregistered functional)') class AverageImagesOutputSpec(TraitedSpec): @@ -81,7 +83,7 @@ class AverageImages(ANTSCommand): >>> avg.inputs.output_average_image = "average.nii.gz" >>> avg.inputs.normalize = True >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] - >>> avg.cmdline # doctest: +ALLOW_UNICODE + >>> avg.cmdline 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' """ _cmd = 'AverageImages' @@ -101,9 +103,11 @@ def _list_outputs(self): class MultiplyImagesInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, position=0, desc='image dimension (2 or 3)') - first_input = File(argstr='%s', exists=True, mandatory=True, position=1, desc='image 1') - second_input = traits.Either(File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2, - desc='image 2 or multiplication weight') + first_input = File(argstr='%s', exists=True, + mandatory=True, position=1, desc='image 1') + second_input = traits.Either( + File(exists=True), traits.Float, argstr='%s', mandatory=True, position=2, + desc='image 2 or multiplication weight') output_product_image = File(argstr='%s', mandatory=True, position=3, desc='Outputfname.nii.gz: the name of the resulting image.') @@ -122,7 +126,7 @@ class MultiplyImages(ANTSCommand): >>> test.inputs.first_input = 'moving2.nii' >>> test.inputs.second_input = 0.25 >>> test.inputs.output_product_image = "out.nii" - >>> test.cmdline # doctest: +ALLOW_UNICODE + >>> test.cmdline 'MultiplyImages 3 moving2.nii 0.25 out.nii' """ _cmd = 'MultiplyImages' @@ -138,22 +142,25 @@ def _list_outputs(self): self.inputs.output_product_image) return outputs + class CreateJacobianDeterminantImageInputSpec(ANTSCommandInputSpec): imageDimension = traits.Enum(3, 2, argstr='%d', usedefault=False, mandatory=True, - position=0, desc='image dimension (2 or 3)') + position=0, desc='image dimension (2 or 3)') deformationField = File(argstr='%s', exists=True, mandatory=True, - position=1, desc='deformation transformation file') + position=1, desc='deformation transformation file') outputImage = File(argstr='%s', mandatory=True, - position=2, - desc='output filename') + position=2, + desc='output filename') doLogJacobian = traits.Enum(0, 1, argstr='%d', position=3, - desc='return the log jacobian') + desc='return the log jacobian') useGeometric = traits.Enum(0, 1, argstr='%d', position=4, - desc='return the geometric jacobian') + desc='return the geometric jacobian') + class CreateJacobianDeterminantImageOutputSpec(TraitedSpec): jacobian_image = File(exists=True, desc='jacobian image') + class CreateJacobianDeterminantImage(ANTSCommand): """ Examples @@ -163,7 +170,7 @@ class CreateJacobianDeterminantImage(ANTSCommand): >>> jacobian.inputs.imageDimension = 3 >>> jacobian.inputs.deformationField = 'ants_Warp.nii.gz' >>> jacobian.inputs.outputImage = 'out_name.nii.gz' - >>> jacobian.cmdline # doctest: +ALLOW_UNICODE + >>> jacobian.cmdline 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' """ @@ -203,6 +210,7 @@ class AffineInitializerInputSpec(ANTSCommandInputSpec): desc=' determines if a local optimization is run at each search point for the set ' 'number of iterations') + class AffineInitializerOutputSpec(TraitedSpec): out_file = File(desc='output transform file') @@ -215,7 +223,7 @@ class AffineInitializer(ANTSCommand): >>> init = AffineInitializer() >>> init.inputs.fixed_image = 'fixed1.nii' >>> init.inputs.moving_image = 'moving1.nii' - >>> init.cmdline # doctest: +ALLOW_UNICODE + >>> init.cmdline 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' """ @@ -225,3 +233,38 @@ class AffineInitializer(ANTSCommand): def _list_outputs(self): return {'out_file': os.path.abspath(self.inputs.out_file)} + + +class ComposeMultiTransformInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum(3, 2, argstr='%d', usedefault=True, position=0, + desc='image dimension (2 or 3)') + output_transform = File(argstr='%s', position=1, name_source=['transforms'], + name_template='%s_composed', keep_ext=True, + desc='the name of the resulting transform.') + reference_image = File(argstr='%s', position=2, + desc='Reference image (only necessary when output is warpfield)') + transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, + position=3, desc='transforms to average') + + +class ComposeMultiTransformOutputSpec(TraitedSpec): + output_transform = File(exists=True, desc='Composed transform file') + + +class ComposeMultiTransform(ANTSCommand): + """ + Take a set of transformations and convert them to a single transformation matrix/warpfield. + + Examples + -------- + >>> from nipype.interfaces.ants import ComposeMultiTransform + >>> compose_transform = ComposeMultiTransform() + >>> compose_transform.inputs.dimension = 3 + >>> compose_transform.inputs.transforms = ['struct_to_template.mat', 'func_to_struct.mat'] + >>> compose_transform.cmdline + 'ComposeMultiTransform 3 struct_to_template_composed struct_to_template.mat func_to_struct.mat' + + """ + _cmd = 'ComposeMultiTransform' + input_spec = ComposeMultiTransformInputSpec + output_spec = ComposeMultiTransformOutputSpec diff --git a/nipype/interfaces/ants/visualization.py b/nipype/interfaces/ants/visualization.py index ef51914e6c..07cf8af086 100644 --- a/nipype/interfaces/ants/visualization.py +++ b/nipype/interfaces/ants/visualization.py @@ -57,7 +57,7 @@ class ConvertScalarImageToRGB(ANTSCommand): >>> converter.inputs.colormap = 'jet' >>> converter.inputs.minimum_input = 0 >>> converter.inputs.maximum_input = 6 - >>> converter.cmdline # doctest: +ALLOW_UNICODE + >>> converter.cmdline 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' """ _cmd = 'ConvertScalarImageToRGB' @@ -143,7 +143,7 @@ class CreateTiledMosaic(ANTSCommand): >>> mosaic_slicer.inputs.direction = 2 >>> mosaic_slicer.inputs.pad_or_crop = '[ -15x -50 , -15x -30 ,0]' >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]' - >>> mosaic_slicer.cmdline # doctest: +ALLOW_UNICODE + >>> mosaic_slicer.cmdline 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] \ -r rgb.nii.gz -s [2 ,100 ,160]' """ diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py deleted file mode 100644 index 8d7c53cde1..0000000000 --- a/nipype/interfaces/base.py +++ /dev/null @@ -1,2130 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Package contains interfaces for using existing functionality in other packages - -Exaples FSL, matlab/SPM , afni - -Requires Packages to be installed -""" -from __future__ import print_function, division, unicode_literals, absolute_import -from future import standard_library -standard_library.install_aliases() -from builtins import range, object, open, str, bytes - -from configparser import NoOptionError -from copy import deepcopy -import datetime -from datetime import datetime as dt -import errno -import locale -import os -import re -import platform -from string import Template -import select -import subprocess -import sys -import time -from textwrap import wrap -from warnings import warn -import simplejson as json -from dateutil.parser import parse as parseutc -from packaging.version import Version - -from .. import config, logging, LooseVersion, __version__ -from ..utils.provenance import write_provenance -from ..utils.misc import is_container, trim, str2bool -from ..utils.filemanip import (md5, hash_infile, FileNotFoundError, hash_timestamp, - split_filename, to_str) -from .traits_extension import ( - traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, - File, Directory, DictStrStr, has_metadata, ImageFile) -from ..external.due import due - -runtime_profile = str2bool(config.get('execution', 'profile_runtime')) -nipype_version = Version(__version__) -iflogger = logging.getLogger('interface') - -FLOAT_FORMAT = '{:.10f}'.format -PY35 = sys.version_info >= (3, 5) -PY3 = sys.version_info[0] > 2 - -if runtime_profile: - try: - import psutil - except ImportError as exc: - iflogger.info('Unable to import packages needed for runtime profiling. '\ - 'Turning off runtime profiler. Reason: %s' % exc) - runtime_profile = False - -__docformat__ = 'restructuredtext' - - -class Str(traits.Unicode): - pass - -traits.Str = Str - -class NipypeInterfaceError(Exception): - def __init__(self, value): - self.value = value - - def __str__(self): - return '{}'.format(self.value) - -def _exists_in_path(cmd, environ): - """ - Based on a code snippet from - http://orip.org/2009/08/python-checking-if-executable-exists-in.html - """ - - if 'PATH' in environ: - input_environ = environ.get("PATH") - else: - input_environ = os.environ.get("PATH", "") - extensions = os.environ.get("PATHEXT", "").split(os.pathsep) - for directory in input_environ.split(os.pathsep): - base = os.path.join(directory, cmd) - options = [base] + [(base + ext) for ext in extensions] - for filename in options: - if os.path.exists(filename): - return True, filename - return False, None - - -def load_template(name): - """Load a template from the script_templates directory - - Parameters - ---------- - name : str - The name of the file to load - - Returns - ------- - template : string.Template - - """ - - full_fname = os.path.join(os.path.dirname(__file__), - 'script_templates', name) - template_file = open(full_fname) - template = Template(template_file.read()) - template_file.close() - return template - - -class Bunch(object): - """Dictionary-like class that provides attribute-style access to it's items. - - A `Bunch` is a simple container that stores it's items as class - attributes. Internally all items are stored in a dictionary and - the class exposes several of the dictionary methods. - - Examples - -------- - >>> from nipype.interfaces.base import Bunch - >>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True) - >>> inputs # doctest: +ALLOW_UNICODE - Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True) - >>> inputs.register_to_mean = False - >>> inputs # doctest: +ALLOW_UNICODE - Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False) - - - Notes - ----- - The Bunch pattern came from the Python Cookbook: - - .. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named - Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005. - - """ - - - def __init__(self, *args, **kwargs): - self.__dict__.update(*args, **kwargs) - - def update(self, *args, **kwargs): - """update existing attribute, or create new attribute - - Note: update is very much like HasTraits.set""" - self.__dict__.update(*args, **kwargs) - - def items(self): - """iterates over bunch attributes as key, value pairs""" - return list(self.__dict__.items()) - - def iteritems(self): - """iterates over bunch attributes as key, value pairs""" - warn('iteritems is deprecated, use items instead') - return list(self.items()) - - def get(self, *args): - """Support dictionary get() functionality - """ - return self.__dict__.get(*args) - - def set(self, **kwargs): - """Support dictionary get() functionality - """ - return self.__dict__.update(**kwargs) - - def dictcopy(self): - """returns a deep copy of existing Bunch as a dictionary""" - return deepcopy(self.__dict__) - - def __repr__(self): - """representation of the sorted Bunch as a string - - Currently, this string representation of the `inputs` Bunch of - interfaces is hashed to determine if the process' dirty-bit - needs setting or not. Till that mechanism changes, only alter - this after careful consideration. - """ - outstr = ['Bunch('] - first = True - for k, v in sorted(self.items()): - if not first: - outstr.append(', ') - if isinstance(v, dict): - pairs = [] - for key, value in sorted(v.items()): - pairs.append("'%s': %s" % (key, value)) - v = '{' + ', '.join(pairs) + '}' - outstr.append('%s=%s' % (k, v)) - else: - outstr.append('%s=%r' % (k, v)) - first = False - outstr.append(')') - return ''.join(outstr) - - def _hash_infile(self, adict, key): - # Inject file hashes into adict[key] - stuff = adict[key] - if not is_container(stuff): - stuff = [stuff] - file_list = [] - for afile in stuff: - if os.path.isfile(afile): - md5obj = md5() - with open(afile, 'rb') as fp: - while True: - data = fp.read(8192) - if not data: - break - md5obj.update(data) - md5hex = md5obj.hexdigest() - else: - md5hex = None - file_list.append((afile, md5hex)) - return file_list - - def _get_bunch_hash(self): - """Return a dictionary of our items with hashes for each file. - - Searches through dictionary items and if an item is a file, it - calculates the md5 hash of the file contents and stores the - file name and hash value as the new key value. - - However, the overall bunch hash is calculated only on the hash - value of a file. The path and name of the file are not used in - the overall hash calculation. - - Returns - ------- - dict_withhash : dict - Copy of our dictionary with the new file hashes included - with each file. - hashvalue : str - The md5 hash value of the `dict_withhash` - - """ - - infile_list = [] - for key, val in list(self.items()): - if is_container(val): - # XXX - SG this probably doesn't catch numpy arrays - # containing embedded file names either. - if isinstance(val, dict): - # XXX - SG should traverse dicts, but ignoring for now - item = None - else: - if len(val) == 0: - raise AttributeError('%s attribute is empty' % key) - item = val[0] - else: - item = val - try: - if isinstance(item, str) and os.path.isfile(item): - infile_list.append(key) - except TypeError: - # `item` is not a file or string. - continue - dict_withhash = self.dictcopy() - dict_nofilename = self.dictcopy() - for item in infile_list: - dict_withhash[item] = self._hash_infile(dict_withhash, item) - dict_nofilename[item] = [val[1] for val in dict_withhash[item]] - # Sort the items of the dictionary, before hashing the string - # representation so we get a predictable order of the - # dictionary. - sorted_dict = to_str(sorted(dict_nofilename.items())) - return dict_withhash, md5(sorted_dict.encode()).hexdigest() - - def __pretty__(self, p, cycle): - """Support for the pretty module - - pretty is included in ipython.externals for ipython > 0.10""" - if cycle: - p.text('Bunch(...)') - else: - p.begin_group(6, 'Bunch(') - first = True - for k, v in sorted(self.items()): - if not first: - p.text(',') - p.breakable() - p.text(k + '=') - p.pretty(v) - first = False - p.end_group(6, ')') - - -class InterfaceResult(object): - """Object that contains the results of running a particular Interface. - - Attributes - ---------- - version : version of this Interface result object (a readonly property) - interface : class type - A copy of the `Interface` class that was run to generate this result. - inputs : a traits free representation of the inputs - outputs : Bunch - An `Interface` specific Bunch that contains all possible files - that are generated by the interface. The `outputs` are used - as the `inputs` to another node when interfaces are used in - the pipeline. - runtime : Bunch - - Contains attributes that describe the runtime environment when - the `Interface` was run. Contains the attributes: - - * cmdline : The command line string that was executed - * cwd : The directory the ``cmdline`` was executed in. - * stdout : The output of running the ``cmdline``. - * stderr : Any error messages output from running ``cmdline``. - * returncode : The code returned from running the ``cmdline``. - - """ - - def __init__(self, interface, runtime, inputs=None, outputs=None, - provenance=None): - self._version = 2.0 - self.interface = interface - self.runtime = runtime - self.inputs = inputs - self.outputs = outputs - self.provenance = provenance - - @property - def version(self): - return self._version - - -class BaseTraitedSpec(traits.HasTraits): - """Provide a few methods necessary to support nipype interface api - - The inputs attribute of interfaces call certain methods that are not - available in traits.HasTraits. These are provided here. - - new metadata: - - * usedefault : set this to True if the default value of the trait should be - used. Unless this is set, the attributes are set to traits.Undefined - - new attribute: - - * get_hashval : returns a tuple containing the state of the trait as a dict - and hashvalue corresponding to dict. - - XXX Reconsider this in the long run, but it seems like the best - solution to move forward on the refactoring. - """ - package_version = nipype_version - - def __init__(self, **kwargs): - """ Initialize handlers and inputs""" - # NOTE: In python 2.6, object.__init__ no longer accepts input - # arguments. HasTraits does not define an __init__ and - # therefore these args were being ignored. - # super(TraitedSpec, self).__init__(*args, **kwargs) - super(BaseTraitedSpec, self).__init__(**kwargs) - traits.push_exception_handler(reraise_exceptions=True) - undefined_traits = {} - for trait in self.copyable_trait_names(): - if not self.traits()[trait].usedefault: - undefined_traits[trait] = Undefined - self.trait_set(trait_change_notify=False, **undefined_traits) - self._generate_handlers() - self.trait_set(**kwargs) - - def items(self): - """ Name, trait generator for user modifiable traits - """ - for name in sorted(self.copyable_trait_names()): - yield name, self.traits()[name] - - def __repr__(self): - """ Return a well-formatted representation of the traits """ - outstr = [] - for name, value in sorted(self.trait_get().items()): - outstr.append('%s = %s' % (name, value)) - return '\n{}\n'.format('\n'.join(outstr)) - - def _generate_handlers(self): - """Find all traits with the 'xor' metadata and attach an event - handler to them. - """ - has_xor = dict(xor=lambda t: t is not None) - xors = self.trait_names(**has_xor) - for elem in xors: - self.on_trait_change(self._xor_warn, elem) - has_deprecation = dict(deprecated=lambda t: t is not None) - deprecated = self.trait_names(**has_deprecation) - for elem in deprecated: - self.on_trait_change(self._deprecated_warn, elem) - - def _xor_warn(self, obj, name, old, new): - """ Generates warnings for xor traits - """ - if isdefined(new): - trait_spec = self.traits()[name] - # for each xor, set to default_value - for trait_name in trait_spec.xor: - if trait_name == name: - # skip ourself - continue - if isdefined(getattr(self, trait_name)): - self.trait_set(trait_change_notify=False, - **{'%s' % name: Undefined}) - msg = ('Input "%s" is mutually exclusive with input "%s", ' - 'which is already set') % (name, trait_name) - raise IOError(msg) - - def _requires_warn(self, obj, name, old, new): - """Part of the xor behavior - """ - if isdefined(new): - trait_spec = self.traits()[name] - msg = None - for trait_name in trait_spec.requires: - if not isdefined(getattr(self, trait_name)): - if not msg: - msg = 'Input %s requires inputs: %s' \ - % (name, ', '.join(trait_spec.requires)) - if msg: # only one requires warning at a time. - warn(msg) - - def _deprecated_warn(self, obj, name, old, new): - """Checks if a user assigns a value to a deprecated trait - """ - if isdefined(new): - trait_spec = self.traits()[name] - msg1 = ('Input %s in interface %s is deprecated.' % - (name, - self.__class__.__name__.split('InputSpec')[0])) - msg2 = ('Will be removed or raise an error as of release %s' - % trait_spec.deprecated) - if trait_spec.new_name: - if trait_spec.new_name not in self.copyable_trait_names(): - raise TraitError(msg1 + ' Replacement trait %s not found' % - trait_spec.new_name) - msg3 = 'It has been replaced by %s.' % trait_spec.new_name - else: - msg3 = '' - msg = ' '.join((msg1, msg2, msg3)) - if Version(str(trait_spec.deprecated)) < self.package_version: - raise TraitError(msg) - else: - if trait_spec.new_name: - msg += 'Unsetting old value %s; setting new value %s.' % ( - name, trait_spec.new_name) - warn(msg) - if trait_spec.new_name: - self.trait_set(trait_change_notify=False, - **{'%s' % name: Undefined, - '%s' % trait_spec.new_name: new}) - - def _hash_infile(self, adict, key): - """ Inject file hashes into adict[key]""" - stuff = adict[key] - if not is_container(stuff): - stuff = [stuff] - file_list = [] - for afile in stuff: - if is_container(afile): - hashlist = self._hash_infile({'infiles': afile}, 'infiles') - hash = [val[1] for val in hashlist] - else: - if config.get('execution', - 'hash_method').lower() == 'timestamp': - hash = hash_timestamp(afile) - elif config.get('execution', - 'hash_method').lower() == 'content': - hash = hash_infile(afile) - else: - raise Exception("Unknown hash method: %s" % - config.get('execution', 'hash_method')) - file_list.append((afile, hash)) - return file_list - - def get(self, **kwargs): - """ Returns traited class as a dict - - Augments the trait get function to return a dictionary without - notification handles - """ - out = super(BaseTraitedSpec, self).get(**kwargs) - out = self._clean_container(out, Undefined) - return out - - def get_traitsfree(self, **kwargs): - """ Returns traited class as a dict - - Augments the trait get function to return a dictionary without - any traits. The dictionary does not contain any attributes that - were Undefined - """ - out = super(BaseTraitedSpec, self).get(**kwargs) - out = self._clean_container(out, skipundefined=True) - return out - - def _clean_container(self, object, undefinedval=None, skipundefined=False): - """Convert a traited obejct into a pure python representation. - """ - if isinstance(object, TraitDictObject) or isinstance(object, dict): - out = {} - for key, val in list(object.items()): - if isdefined(val): - out[key] = self._clean_container(val, undefinedval) - else: - if not skipundefined: - out[key] = undefinedval - elif (isinstance(object, TraitListObject) or - isinstance(object, list) or isinstance(object, tuple)): - out = [] - for val in object: - if isdefined(val): - out.append(self._clean_container(val, undefinedval)) - else: - if not skipundefined: - out.append(undefinedval) - else: - out.append(None) - if isinstance(object, tuple): - out = tuple(out) - else: - if isdefined(object): - out = object - else: - if not skipundefined: - out = undefinedval - return out - - def has_metadata(self, name, metadata, value=None, recursive=True): - """ - Return has_metadata for the requested trait name in this - interface - """ - return has_metadata(self.trait(name).trait_type, metadata, value, - recursive) - - def get_hashval(self, hash_method=None): - """Return a dictionary of our items with hashes for each file. - - Searches through dictionary items and if an item is a file, it - calculates the md5 hash of the file contents and stores the - file name and hash value as the new key value. - - However, the overall bunch hash is calculated only on the hash - value of a file. The path and name of the file are not used in - the overall hash calculation. - - Returns - ------- - dict_withhash : dict - Copy of our dictionary with the new file hashes included - with each file. - hashvalue : str - The md5 hash value of the traited spec - - """ - - dict_withhash = [] - dict_nofilename = [] - for name, val in sorted(self.get().items()): - if not isdefined(val) or self.has_metadata(name, "nohash", True): - # skip undefined traits and traits with nohash=True - continue - - hash_files = (not self.has_metadata(name, "hash_files", False) and not - self.has_metadata(name, "name_source")) - dict_nofilename.append((name, - self._get_sorteddict(val, hash_method=hash_method, - hash_files=hash_files))) - dict_withhash.append((name, - self._get_sorteddict(val, True, hash_method=hash_method, - hash_files=hash_files))) - return dict_withhash, md5(to_str(dict_nofilename).encode()).hexdigest() - - - def _get_sorteddict(self, objekt, dictwithhash=False, hash_method=None, - hash_files=True): - if isinstance(objekt, dict): - out = [] - for key, val in sorted(objekt.items()): - if isdefined(val): - out.append((key, - self._get_sorteddict(val, dictwithhash, - hash_method=hash_method, - hash_files=hash_files))) - elif isinstance(objekt, (list, tuple)): - out = [] - for val in objekt: - if isdefined(val): - out.append(self._get_sorteddict(val, dictwithhash, - hash_method=hash_method, - hash_files=hash_files)) - if isinstance(objekt, tuple): - out = tuple(out) - else: - if isdefined(objekt): - if (hash_files and isinstance(objekt, (str, bytes)) and - os.path.isfile(objekt)): - if hash_method is None: - hash_method = config.get('execution', 'hash_method') - - if hash_method.lower() == 'timestamp': - hash = hash_timestamp(objekt) - elif hash_method.lower() == 'content': - hash = hash_infile(objekt) - else: - raise Exception("Unknown hash method: %s" % hash_method) - if dictwithhash: - out = (objekt, hash) - else: - out = hash - elif isinstance(objekt, float): - out = FLOAT_FORMAT(objekt) - else: - out = objekt - return out - - -class DynamicTraitedSpec(BaseTraitedSpec): - """ A subclass to handle dynamic traits - - This class is a workaround for add_traits and clone_traits not - functioning well together. - """ - - def __deepcopy__(self, memo): - """ bug in deepcopy for HasTraits results in weird cloning behavior for - added traits - """ - id_self = id(self) - if id_self in memo: - return memo[id_self] - dup_dict = deepcopy(self.get(), memo) - # access all keys - for key in self.copyable_trait_names(): - if key in self.__dict__.keys(): - _ = getattr(self, key) - # clone once - dup = self.clone_traits(memo=memo) - for key in self.copyable_trait_names(): - try: - _ = getattr(dup, key) - except: - pass - # clone twice - dup = self.clone_traits(memo=memo) - dup.trait_set(**dup_dict) - return dup - - -class TraitedSpec(BaseTraitedSpec): - """ Create a subclass with strict traits. - - This is used in 90% of the cases. - """ - _ = traits.Disallow - - -class Interface(object): - """This is an abstract definition for Interface objects. - - It provides no functionality. It defines the necessary attributes - and methods all Interface objects should have. - - """ - - input_spec = None # A traited input specification - output_spec = None # A traited output specification - - # defines if the interface can reuse partial results after interruption - _can_resume = False - - @property - def can_resume(self): - return self._can_resume - - # should the interface be always run even if the inputs were not changed? - _always_run = False - - @property - def always_run(self): - return self._always_run - - def __init__(self, **inputs): - """Initialize command with given args and inputs.""" - raise NotImplementedError - - @classmethod - def help(cls): - """ Prints class help""" - raise NotImplementedError - - @classmethod - def _inputs_help(cls): - """ Prints inputs help""" - raise NotImplementedError - - @classmethod - def _outputs_help(cls): - """ Prints outputs help""" - raise NotImplementedError - - @classmethod - def _outputs(cls): - """ Initializes outputs""" - raise NotImplementedError - - @property - def version(self): - raise NotImplementedError - - def run(self): - """Execute the command.""" - raise NotImplementedError - - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """Called to populate outputs""" - raise NotImplementedError - - def _list_outputs(self): - """ List expected outputs""" - raise NotImplementedError - - def _get_filecopy_info(self): - """ Provides information about file inputs to copy or link to cwd. - Necessary for pipeline operation - """ - raise NotImplementedError - - -class BaseInterfaceInputSpec(TraitedSpec): - ignore_exception = traits.Bool(False, desc="Print an error message instead \ -of throwing an exception in case the interface fails to run", usedefault=True, - nohash=True) - - -class BaseInterface(Interface): - """Implements common interface functionality. - - Implements - ---------- - - * Initializes inputs/outputs from input_spec/output_spec - * Provides help based on input_spec and output_spec - * Checks for mandatory inputs before running an interface - * Runs an interface and returns results - * Determines which inputs should be copied or linked to cwd - - This class does not implement aggregate_outputs, input_spec or - output_spec. These should be defined by derived classes. - - This class cannot be instantiated. - - """ - input_spec = BaseInterfaceInputSpec - _version = None - _additional_metadata = [] - _redirect_x = False - references_ = [] - - def __init__(self, from_file=None, **inputs): - if not self.input_spec: - raise Exception('No input_spec in class: %s' % - self.__class__.__name__) - - self.inputs = self.input_spec(**inputs) - self.estimated_memory_gb = 0.25 - self.num_threads = 1 - - if from_file is not None: - self.load_inputs_from_json(from_file, overwrite=True) - - for name, value in list(inputs.items()): - setattr(self.inputs, name, value) - - - @classmethod - def help(cls, returnhelp=False): - """ Prints class help - """ - - if cls.__doc__: - # docstring = cls.__doc__.split('\n') - # docstring = [trim(line, '') for line in docstring] - docstring = trim(cls.__doc__).split('\n') + [''] - else: - docstring = [''] - - allhelp = '\n'.join(docstring + cls._inputs_help() + [''] + - cls._outputs_help() + [''] + - cls._refs_help() + ['']) - if returnhelp: - return allhelp - else: - print(allhelp) - - @classmethod - def _refs_help(cls): - """ Prints interface references. - """ - if not cls.references_: - return [] - - helpstr = ['References::'] - - for r in cls.references_: - helpstr += ['{}'.format(r['entry'])] - - return helpstr - - @classmethod - def _get_trait_desc(self, inputs, name, spec): - desc = spec.desc - xor = spec.xor - requires = spec.requires - argstr = spec.argstr - - manhelpstr = ['\t%s' % name] - - type_info = spec.full_info(inputs, name, None) - - default = '' - if spec.usedefault: - default = ', nipype default value: %s' % str(spec.default_value()[1]) - line = "(%s%s)" % (type_info, default) - - manhelpstr = wrap(line, 70, - initial_indent=manhelpstr[0] + ': ', - subsequent_indent='\t\t ') - - if desc: - for line in desc.split('\n'): - line = re.sub("\s+", " ", line) - manhelpstr += wrap(line, 70, - initial_indent='\t\t', - subsequent_indent='\t\t') - - if argstr: - pos = spec.position - if pos is not None: - manhelpstr += wrap('flag: %s, position: %s' % (argstr, pos), 70, - initial_indent='\t\t', - subsequent_indent='\t\t') - else: - manhelpstr += wrap('flag: %s' % argstr, 70, - initial_indent='\t\t', - subsequent_indent='\t\t') - - if xor: - line = '%s' % ', '.join(xor) - manhelpstr += wrap(line, 70, - initial_indent='\t\tmutually_exclusive: ', - subsequent_indent='\t\t ') - - if requires: - others = [field for field in requires if field != name] - line = '%s' % ', '.join(others) - manhelpstr += wrap(line, 70, - initial_indent='\t\trequires: ', - subsequent_indent='\t\t ') - return manhelpstr - - @classmethod - def _inputs_help(cls): - """ Prints description for input parameters - """ - helpstr = ['Inputs::'] - - inputs = cls.input_spec() - if len(list(inputs.traits(transient=None).items())) == 0: - helpstr += ['', '\tNone'] - return helpstr - - manhelpstr = ['', '\t[Mandatory]'] - mandatory_items = inputs.traits(mandatory=True) - for name, spec in sorted(mandatory_items.items()): - manhelpstr += cls._get_trait_desc(inputs, name, spec) - - opthelpstr = ['', '\t[Optional]'] - for name, spec in sorted(inputs.traits(transient=None).items()): - if name in mandatory_items: - continue - opthelpstr += cls._get_trait_desc(inputs, name, spec) - - if manhelpstr: - helpstr += manhelpstr - if opthelpstr: - helpstr += opthelpstr - return helpstr - - @classmethod - def _outputs_help(cls): - """ Prints description for output parameters - """ - helpstr = ['Outputs::', ''] - if cls.output_spec: - outputs = cls.output_spec() #pylint: disable=E1102 - for name, spec in sorted(outputs.traits(transient=None).items()): - helpstr += cls._get_trait_desc(outputs, name, spec) - if len(helpstr) == 2: - helpstr += ['\tNone'] - return helpstr - - def _outputs(self): - """ Returns a bunch containing output fields for the class - """ - outputs = None - if self.output_spec: - outputs = self.output_spec() #pylint: disable=E1102 - - return outputs - - @classmethod - def _get_filecopy_info(cls): - """ Provides information about file inputs to copy or link to cwd. - Necessary for pipeline operation - """ - info = [] - if cls.input_spec is None: - return info - metadata = dict(copyfile=lambda t: t is not None) - for name, spec in sorted(cls.input_spec().traits(**metadata).items()): - info.append(dict(key=name, - copy=spec.copyfile)) - return info - - def _check_requires(self, spec, name, value): - """ check if required inputs are satisfied - """ - if spec.requires: - values = [not isdefined(getattr(self.inputs, field)) - for field in spec.requires] - if any(values) and isdefined(value): - msg = ("%s requires a value for input '%s' because one of %s " - "is set. For a list of required inputs, see %s.help()" % - (self.__class__.__name__, name, - ', '.join(spec.requires), self.__class__.__name__)) - raise ValueError(msg) - - def _check_xor(self, spec, name, value): - """ check if mutually exclusive inputs are satisfied - """ - if spec.xor: - values = [isdefined(getattr(self.inputs, field)) - for field in spec.xor] - if not any(values) and not isdefined(value): - msg = ("%s requires a value for one of the inputs '%s'. " - "For a list of required inputs, see %s.help()" % - (self.__class__.__name__, ', '.join(spec.xor), - self.__class__.__name__)) - raise ValueError(msg) - - def _check_mandatory_inputs(self): - """ Raises an exception if a mandatory input is Undefined - """ - for name, spec in list(self.inputs.traits(mandatory=True).items()): - value = getattr(self.inputs, name) - self._check_xor(spec, name, value) - if not isdefined(value) and spec.xor is None: - msg = ("%s requires a value for input '%s'. " - "For a list of required inputs, see %s.help()" % - (self.__class__.__name__, name, self.__class__.__name__)) - raise ValueError(msg) - if isdefined(value): - self._check_requires(spec, name, value) - for name, spec in list(self.inputs.traits(mandatory=None, - transient=None).items()): - self._check_requires(spec, name, getattr(self.inputs, name)) - - def _check_version_requirements(self, trait_object, raise_exception=True): - """ Raises an exception on version mismatch - """ - unavailable_traits = [] - # check minimum version - check = dict(min_ver=lambda t: t is not None) - names = trait_object.trait_names(**check) - - if names and self.version: - version = LooseVersion(str(self.version)) - for name in names: - min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) - if min_ver > version: - unavailable_traits.append(name) - if not isdefined(getattr(trait_object, name)): - continue - if raise_exception: - raise Exception('Trait %s (%s) (version %s < required %s)' % - (name, self.__class__.__name__, - version, min_ver)) - check = dict(max_ver=lambda t: t is not None) - names = trait_object.trait_names(**check) - for name in names: - max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) - if max_ver < version: - unavailable_traits.append(name) - if not isdefined(getattr(trait_object, name)): - continue - if raise_exception: - raise Exception('Trait %s (%s) (version %s > required %s)' % - (name, self.__class__.__name__, - version, max_ver)) - return unavailable_traits - - def _run_wrapper(self, runtime): - sysdisplay = os.getenv('DISPLAY') - if self._redirect_x: - try: - from xvfbwrapper import Xvfb - except ImportError: - iflogger.error('Xvfb wrapper could not be imported') - raise - - vdisp = Xvfb(nolisten='tcp') - vdisp.start() - try: - vdisp_num = vdisp.new_display - except AttributeError: # outdated version of xvfbwrapper - vdisp_num = vdisp.vdisplay_num - - iflogger.info('Redirecting X to :%d' % vdisp_num) - runtime.environ['DISPLAY'] = ':%d' % vdisp_num - - runtime = self._run_interface(runtime) - - if self._redirect_x: - vdisp.stop() - - return runtime - - def _run_interface(self, runtime): - """ Core function that executes interface - """ - raise NotImplementedError - - def _duecredit_cite(self): - """ Add the interface references to the duecredit citations - """ - for r in self.references_: - r['path'] = self.__module__ - due.cite(**r) - - def run(self, **inputs): - """Execute this interface. - - This interface will not raise an exception if runtime.returncode is - non-zero. - - Parameters - ---------- - inputs : allows the interface settings to be updated - - Returns - ------- - results : an InterfaceResult object containing a copy of the instance - that was executed, provenance information and, if successful, results - """ - self.inputs.trait_set(**inputs) - self._check_mandatory_inputs() - self._check_version_requirements(self.inputs) - interface = self.__class__ - self._duecredit_cite() - - # initialize provenance tracking - env = deepcopy(dict(os.environ)) - runtime = Bunch(cwd=os.getcwd(), - returncode=None, - duration=None, - environ=env, - startTime=dt.isoformat(dt.utcnow()), - endTime=None, - platform=platform.platform(), - hostname=platform.node(), - version=self.version) - try: - runtime = self._run_wrapper(runtime) - outputs = self.aggregate_outputs(runtime) - runtime.endTime = dt.isoformat(dt.utcnow()) - timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) - runtime.duration = (timediff.days * 86400 + timediff.seconds + - timediff.microseconds / 100000.) - results = InterfaceResult(interface, runtime, - inputs=self.inputs.get_traitsfree(), - outputs=outputs) - prov_record = None - if str2bool(config.get('execution', 'write_provenance')): - prov_record = write_provenance(results) - results.provenance = prov_record - except Exception as e: - runtime.endTime = dt.isoformat(dt.utcnow()) - timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) - runtime.duration = (timediff.days * 86400 + timediff.seconds + - timediff.microseconds / 100000.) - if len(e.args) == 0: - e.args = ("") - - message = "\nInterface %s failed to run." % self.__class__.__name__ - - if config.has_option('logging', 'interface_level') and \ - config.get('logging', 'interface_level').lower() == 'debug': - inputs_str = "\nInputs:" + str(self.inputs) + "\n" - else: - inputs_str = '' - - if len(e.args) == 1 and isinstance(e.args[0], (str, bytes)): - e.args = (e.args[0] + " ".join([message, inputs_str]),) - else: - e.args += (message, ) - if inputs_str != '': - e.args += (inputs_str, ) - - # exception raising inhibition for special cases - import traceback - runtime.traceback = traceback.format_exc() - runtime.traceback_args = e.args - inputs = None - try: - inputs = self.inputs.get_traitsfree() - except Exception as e: - pass - results = InterfaceResult(interface, runtime, inputs=inputs) - prov_record = None - if str2bool(config.get('execution', 'write_provenance')): - try: - prov_record = write_provenance(results) - except Exception: - prov_record = None - results.provenance = prov_record - if hasattr(self.inputs, 'ignore_exception') and \ - isdefined(self.inputs.ignore_exception) and \ - self.inputs.ignore_exception: - pass - else: - raise - return results - - def _list_outputs(self): - """ List the expected outputs - """ - if self.output_spec: - raise NotImplementedError - else: - return None - - def aggregate_outputs(self, runtime=None, needed_outputs=None): - """ Collate expected outputs and check for existence - """ - predicted_outputs = self._list_outputs() - outputs = self._outputs() - if predicted_outputs: - _unavailable_outputs = [] - if outputs: - _unavailable_outputs = \ - self._check_version_requirements(self._outputs()) - for key, val in list(predicted_outputs.items()): - if needed_outputs and key not in needed_outputs: - continue - if key in _unavailable_outputs: - raise KeyError(('Output trait %s not available in version ' - '%s of interface %s. Please inform ' - 'developers.') % (key, self.version, - self.__class__.__name__)) - try: - setattr(outputs, key, val) - _ = getattr(outputs, key) - except TraitError as error: - if hasattr(error, 'info') and \ - error.info.startswith("an existing"): - msg = ("File/Directory '%s' not found for %s output " - "'%s'." % (val, self.__class__.__name__, key)) - raise FileNotFoundError(msg) - else: - raise error - return outputs - - @property - def version(self): - if self._version is None: - if str2bool(config.get('execution', 'stop_on_unknown_version')): - raise ValueError('Interface %s has no version information' % - self.__class__.__name__) - return self._version - - def load_inputs_from_json(self, json_file, overwrite=True): - """ - A convenient way to load pre-set inputs from a JSON file. - """ - - with open(json_file) as fhandle: - inputs_dict = json.load(fhandle) - - def_inputs = [] - if not overwrite: - def_inputs = list(self.inputs.get_traitsfree().keys()) - - new_inputs = list(set(list(inputs_dict.keys())) - set(def_inputs)) - for key in new_inputs: - if hasattr(self.inputs, key): - setattr(self.inputs, key, inputs_dict[key]) - - def save_inputs_to_json(self, json_file): - """ - A convenient way to save current inputs to a JSON file. - """ - inputs = self.inputs.get_traitsfree() - iflogger.debug('saving inputs {}', inputs) - with open(json_file, 'w' if PY3 else 'wb') as fhandle: - json.dump(inputs, fhandle, indent=4, ensure_ascii=False) - - -class Stream(object): - """Function to capture stdout and stderr streams with timestamps - - stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359 - """ - - def __init__(self, name, impl): - self._name = name - self._impl = impl - self._buf = '' - self._rows = [] - self._lastidx = 0 - self.default_encoding = locale.getdefaultlocale()[1] - if self.default_encoding is None: - self.default_encoding = 'UTF-8' - - def fileno(self): - "Pass-through for file descriptor." - return self._impl.fileno() - - def read(self, drain=0): - "Read from the file descriptor. If 'drain' set, read until EOF." - while self._read(drain) is not None: - if not drain: - break - - def _read(self, drain): - "Read from the file descriptor" - fd = self.fileno() - buf = os.read(fd, 4096).decode(self.default_encoding) - if not buf and not self._buf: - return None - if '\n' not in buf: - if not drain: - self._buf += buf - return [] - - # prepend any data previously read, then split into lines and format - buf = self._buf + buf - if '\n' in buf: - tmp, rest = buf.rsplit('\n', 1) - else: - tmp = buf - rest = None - self._buf = rest - now = datetime.datetime.now().isoformat() - rows = tmp.split('\n') - self._rows += [(now, '%s %s:%s' % (self._name, now, r), r) - for r in rows] - for idx in range(self._lastidx, len(self._rows)): - iflogger.info(self._rows[idx][1]) - self._lastidx = len(self._rows) - - -# Get number of threads for process -def _get_num_threads(proc): - """Function to get the number of threads a process is using - NOTE: If - - Parameters - ---------- - proc : psutil.Process instance - the process to evaluate thead usage of - - Returns - ------- - num_threads : int - the number of threads that the process is using - """ - - # Import packages - import psutil - - # If process is running - if proc.status() == psutil.STATUS_RUNNING: - num_threads = proc.num_threads() - elif proc.num_threads() > 1: - tprocs = [psutil.Process(thr.id) for thr in proc.threads()] - alive_tprocs = [tproc for tproc in tprocs if tproc.status() == psutil.STATUS_RUNNING] - num_threads = len(alive_tprocs) - else: - num_threads = 1 - - # Try-block for errors - try: - child_threads = 0 - # Iterate through child processes and get number of their threads - for child in proc.children(recursive=True): - # Leaf process - if len(child.children()) == 0: - # If process is running, get its number of threads - if child.status() == psutil.STATUS_RUNNING: - child_thr = child.num_threads() - # If its not necessarily running, but still multi-threaded - elif child.num_threads() > 1: - # Cast each thread as a process and check for only running - tprocs = [psutil.Process(thr.id) for thr in child.threads()] - alive_tprocs = [tproc for tproc in tprocs if tproc.status() == psutil.STATUS_RUNNING] - child_thr = len(alive_tprocs) - # Otherwise, no threads are running - else: - child_thr = 0 - # Increment child threads - child_threads += child_thr - # Catch any NoSuchProcess errors - except psutil.NoSuchProcess: - pass - - # Number of threads is max between found active children and parent - num_threads = max(child_threads, num_threads) - - # Return number of threads found - return num_threads - - -# Get ram usage of process -def _get_ram_mb(pid, pyfunc=False): - """Function to get the RAM usage of a process and its children - - Parameters - ---------- - pid : integer - the PID of the process to get RAM usage of - pyfunc : boolean (optional); default=False - a flag to indicate if the process is a python function; - when Pythons are multithreaded via multiprocess or threading, - children functions include their own memory + parents. if this - is set, the parent memory will removed from children memories - - Reference: http://ftp.dev411.com/t/python/python-list/095thexx8g/multiprocessing-forking-memory-usage - - Returns - ------- - mem_mb : float - the memory RAM in MB utilized by the process PID - """ - - # Import packages - import psutil - - # Init variables - _MB = 1024.0**2 - - # Try block to protect against any dying processes in the interim - try: - # Init parent - parent = psutil.Process(pid) - # Get memory of parent - parent_mem = parent.memory_info().rss - mem_mb = parent_mem/_MB - - # Iterate through child processes - for child in parent.children(recursive=True): - child_mem = child.memory_info().rss - if pyfunc: - child_mem -= parent_mem - mem_mb += child_mem/_MB - - # Catch if process dies, return gracefully - except psutil.NoSuchProcess: - pass - - # Return memory - return mem_mb - - -# Get max resources used for process -def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): - """Function to get the RAM and threads usage of a process - - Parameters - ---------- - pid : integer - the process ID of process to profile - mem_mb : float - the high memory watermark so far during process execution (in MB) - num_threads: int - the high thread watermark so far during process execution - - Returns - ------- - mem_mb : float - the new high memory watermark of process (MB) - num_threads : float - the new high thread watermark of process - """ - - # Import packages - import psutil - - try: - mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) - num_threads = max(num_threads, _get_num_threads(psutil.Process(pid))) - except Exception as exc: - iflogger.info('Could not get resources used by process. Error: %s'\ - % exc) - - # Return resources - return mem_mb, num_threads - - -def run_command(runtime, output=None, timeout=0.01, redirect_x=False): - """Run a command, read stdout and stderr, prefix with timestamp. - - The returned runtime contains a merged stdout+stderr log with timestamps - """ - - # Init logger - logger = logging.getLogger('workflow') - - # Init variables - PIPE = subprocess.PIPE - cmdline = runtime.cmdline - - if redirect_x: - exist_xvfb, _ = _exists_in_path('xvfb-run', runtime.environ) - if not exist_xvfb: - raise RuntimeError('Xvfb was not found, X redirection aborted') - cmdline = 'xvfb-run -a ' + cmdline - - default_encoding = locale.getdefaultlocale()[1] - if default_encoding is None: - default_encoding = 'UTF-8' - if output == 'file': - errfile = os.path.join(runtime.cwd, 'stderr.nipype') - outfile = os.path.join(runtime.cwd, 'stdout.nipype') - stderr = open(errfile, 'wb') # t=='text'===default - stdout = open(outfile, 'wb') - - proc = subprocess.Popen(cmdline, - stdout=stdout, - stderr=stderr, - shell=True, - cwd=runtime.cwd, - env=runtime.environ) - else: - proc = subprocess.Popen(cmdline, - stdout=PIPE, - stderr=PIPE, - shell=True, - cwd=runtime.cwd, - env=runtime.environ) - result = {} - errfile = os.path.join(runtime.cwd, 'stderr.nipype') - outfile = os.path.join(runtime.cwd, 'stdout.nipype') - - # Init variables for memory profiling - mem_mb = 0 - num_threads = 1 - interval = .5 - - if output == 'stream': - streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] - - def _process(drain=0): - try: - res = select.select(streams, [], [], timeout) - except select.error as e: - iflogger.info(str(e)) - if e[0] == errno.EINTR: - return - else: - raise - else: - for stream in res[0]: - stream.read(drain) - while proc.returncode is None: - if runtime_profile: - mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) - proc.poll() - _process() - time.sleep(interval) - _process(drain=1) - - # collect results, merge and return - result = {} - temp = [] - for stream in streams: - rows = stream._rows - temp += rows - result[stream._name] = [r[2] for r in rows] - temp.sort() - result['merged'] = [r[1] for r in temp] - - if output == 'allatonce': - if runtime_profile: - while proc.returncode is None: - mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) - proc.poll() - time.sleep(interval) - stdout, stderr = proc.communicate() - stdout = stdout.decode(default_encoding) - stderr = stderr.decode(default_encoding) - result['stdout'] = stdout.split('\n') - result['stderr'] = stderr.split('\n') - result['merged'] = '' - if output == 'file': - if runtime_profile: - while proc.returncode is None: - mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) - proc.poll() - time.sleep(interval) - ret_code = proc.wait() - stderr.flush() - stdout.flush() - result['stdout'] = [line.decode(default_encoding).strip() for line in open(outfile, 'rb').readlines()] - result['stderr'] = [line.decode(default_encoding).strip() for line in open(errfile, 'rb').readlines()] - result['merged'] = '' - if output == 'none': - if runtime_profile: - while proc.returncode is None: - mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads) - proc.poll() - time.sleep(interval) - proc.communicate() - result['stdout'] = [] - result['stderr'] = [] - result['merged'] = '' - - setattr(runtime, 'runtime_memory_gb', mem_mb/1024.0) - setattr(runtime, 'runtime_threads', num_threads) - runtime.stderr = '\n'.join(result['stderr']) - runtime.stdout = '\n'.join(result['stdout']) - runtime.merged = result['merged'] - runtime.returncode = proc.returncode - return runtime - - -def get_dependencies(name, environ): - """Return library dependencies of a dynamically linked executable - - Uses otool on darwin, ldd on linux. Currently doesn't support windows. - - """ - PIPE = subprocess.PIPE - if sys.platform == 'darwin': - proc = subprocess.Popen('otool -L `which %s`' % name, - stdout=PIPE, - stderr=PIPE, - shell=True, - env=environ) - elif 'linux' in sys.platform: - proc = subprocess.Popen('ldd `which %s`' % name, - stdout=PIPE, - stderr=PIPE, - shell=True, - env=environ) - else: - return 'Platform %s not supported' % sys.platform - o, e = proc.communicate() - return o.rstrip() - - -class CommandLineInputSpec(BaseInterfaceInputSpec): - args = Str(argstr='%s', desc='Additional parameters to the command') - environ = DictStrStr(desc='Environment variables', usedefault=True, - nohash=True) - # This input does not have a "usedefault=True" so the set_default_terminal_output() - # method would work - terminal_output = traits.Enum('stream', 'allatonce', 'file', 'none', - desc=('Control terminal output: `stream` - ' - 'displays to terminal immediately (default), ' - '`allatonce` - waits till command is ' - 'finished to display output, `file` - ' - 'writes output to file, `none` - output' - ' is ignored'), - nohash=True) - - -class CommandLine(BaseInterface): - """Implements functionality to interact with command line programs - class must be instantiated with a command argument - - Parameters - ---------- - - command : string - define base immutable `command` you wish to run - - args : string, optional - optional arguments passed to base `command` - - - Examples - -------- - >>> import pprint - >>> from nipype.interfaces.base import CommandLine - >>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'}) - >>> cli.inputs.args = '-al' - >>> cli.cmdline # doctest: +ALLOW_UNICODE - 'ls -al' - - >>> pprint.pprint(cli.inputs.trait_get()) # doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE - {'args': '-al', - 'environ': {'DISPLAY': ':1'}, - 'ignore_exception': False, - 'terminal_output': 'stream'} - - >>> cli.inputs.get_hashval()[0][0] # doctest: +ALLOW_UNICODE - ('args', '-al') - >>> cli.inputs.get_hashval()[1] # doctest: +ALLOW_UNICODE - '11c37f97649cd61627f4afe5136af8c0' - - """ - input_spec = CommandLineInputSpec - _cmd = None - _version = None - _terminal_output = 'stream' - - def __init__(self, command=None, **inputs): - super(CommandLine, self).__init__(**inputs) - self._environ = None - if not hasattr(self, '_cmd'): - self._cmd = None - if self.cmd is None and command is None: - raise Exception("Missing command") - if command: - self._cmd = command - self.inputs.on_trait_change(self._terminal_output_update, - 'terminal_output') - if not isdefined(self.inputs.terminal_output): - self.inputs.terminal_output = self._terminal_output - else: - self._terminal_output_update() - - def _terminal_output_update(self): - self._terminal_output = self.inputs.terminal_output - - @classmethod - def set_default_terminal_output(cls, output_type): - """Set the default terminal output for CommandLine Interfaces. - - This method is used to set default terminal output for - CommandLine Interfaces. However, setting this will not - update the output type for any existing instances. For these, - assign the .inputs.terminal_output. - """ - - if output_type in ['stream', 'allatonce', 'file', 'none']: - cls._terminal_output = output_type - else: - raise AttributeError('Invalid terminal output_type: %s' % - output_type) - - @property - def cmd(self): - """sets base command, immutable""" - return self._cmd - - @property - def cmdline(self): - """ `command` plus any arguments (args) - validates arguments and generates command line""" - self._check_mandatory_inputs() - allargs = self._parse_inputs() - allargs.insert(0, self.cmd) - return ' '.join(allargs) - - def raise_exception(self, runtime): - raise RuntimeError( - ('Command:\n{cmdline}\nStandard output:\n{stdout}\n' - 'Standard error:\n{stderr}\nReturn code: {returncode}').format( - **runtime.dictcopy())) - - @classmethod - def help(cls, returnhelp=False): - allhelp = super(CommandLine, cls).help(returnhelp=True) - - allhelp = "Wraps command **%s**\n\n" % cls._cmd + allhelp - - if returnhelp: - return allhelp - else: - print(allhelp) - - def _get_environ(self): - out_environ = {} - if not self._redirect_x: - try: - display_var = config.get('execution', 'display_variable') - out_environ = {'DISPLAY': display_var} - except NoOptionError: - pass - iflogger.debug(out_environ) - if isdefined(self.inputs.environ): - out_environ.update(self.inputs.environ) - return out_environ - - def version_from_command(self, flag='-v'): - cmdname = self.cmd.split()[0] - env = dict(os.environ) - if _exists_in_path(cmdname, env): - out_environ = self._get_environ() - env.update(out_environ) - proc = subprocess.Popen(' '.join((cmdname, flag)), - shell=True, - env=env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - o, e = proc.communicate() - return o - - def _run_wrapper(self, runtime): - runtime = self._run_interface(runtime) - return runtime - - def _run_interface(self, runtime, correct_return_codes=(0,)): - """Execute command via subprocess - - Parameters - ---------- - runtime : passed by the run function - - Returns - ------- - runtime : updated runtime information - adds stdout, stderr, merged, cmdline, dependencies, command_path - - """ - setattr(runtime, 'stdout', None) - setattr(runtime, 'stderr', None) - setattr(runtime, 'cmdline', self.cmdline) - out_environ = self._get_environ() - runtime.environ.update(out_environ) - executable_name = self.cmd.split()[0] - exist_val, cmd_path = _exists_in_path(executable_name, - runtime.environ) - if not exist_val: - raise IOError("command '%s' could not be found on host %s" % - (self.cmd.split()[0], runtime.hostname)) - setattr(runtime, 'command_path', cmd_path) - setattr(runtime, 'dependencies', get_dependencies(executable_name, - runtime.environ)) - runtime = run_command(runtime, output=self.inputs.terminal_output, - redirect_x=self._redirect_x) - if runtime.returncode is None or \ - runtime.returncode not in correct_return_codes: - self.raise_exception(runtime) - - return runtime - - def _format_arg(self, name, trait_spec, value): - """A helper function for _parse_inputs - - Formats a trait containing argstr metadata - """ - argstr = trait_spec.argstr - iflogger.debug('%s_%s' % (name, str(value))) - if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: - if value: - # Boolean options have no format string. Just append options - # if True. - return argstr - else: - return None - # traits.Either turns into traits.TraitCompound and does not have any - # inner_traits - elif trait_spec.is_trait_type(traits.List) \ - or (trait_spec.is_trait_type(traits.TraitCompound) and - isinstance(value, list)): - # This is a bit simple-minded at present, and should be - # construed as the default. If more sophisticated behavior - # is needed, it can be accomplished with metadata (e.g. - # format string for list member str'ification, specifying - # the separator, etc.) - - # Depending on whether we stick with traitlets, and whether or - # not we beef up traitlets.List, we may want to put some - # type-checking code here as well - sep = trait_spec.sep - if sep is None: - sep = ' ' - if argstr.endswith('...'): - - # repeatable option - # --id %d... will expand to - # --id 1 --id 2 --id 3 etc.,. - argstr = argstr.replace('...', '') - return sep.join([argstr % elt for elt in value]) - else: - return argstr % sep.join(str(elt) for elt in value) - else: - # Append options using format string. - return argstr % value - - def _filename_from_source(self, name, chain=None): - if chain is None: - chain = [] - - trait_spec = self.inputs.trait(name) - retval = getattr(self.inputs, name) - source_ext = None - if not isdefined(retval) or "%s" in retval: - if not trait_spec.name_source: - return retval - if isdefined(retval) and "%s" in retval: - name_template = retval - else: - name_template = trait_spec.name_template - if not name_template: - name_template = "%s_generated" - - ns = trait_spec.name_source - while isinstance(ns, (list, tuple)): - if len(ns) > 1: - iflogger.warn('Only one name_source per trait is allowed') - ns = ns[0] - - if not isinstance(ns, (str, bytes)): - raise ValueError( - 'name_source of \'{}\' trait should be an input trait ' - 'name, but a type {} object was found'.format(name, type(ns))) - - if isdefined(getattr(self.inputs, ns)): - name_source = ns - source = getattr(self.inputs, name_source) - while isinstance(source, list): - source = source[0] - - # special treatment for files - try: - _, base, source_ext = split_filename(source) - except (AttributeError, TypeError): - base = source - else: - if name in chain: - raise NipypeInterfaceError('Mutually pointing name_sources') - - chain.append(name) - base = self._filename_from_source(ns, chain) - if isdefined(base): - _, _, source_ext = split_filename(base) - - chain = None - retval = name_template % base - _, _, ext = split_filename(retval) - if trait_spec.keep_extension and (ext or source_ext): - if (ext is None or not ext) and source_ext: - retval = retval + source_ext - else: - retval = self._overload_extension(retval, name) - return retval - - def _gen_filename(self, name): - raise NotImplementedError - - def _overload_extension(self, value, name=None): - return value - - def _list_outputs(self): - metadata = dict(name_source=lambda t: t is not None) - traits = self.inputs.traits(**metadata) - if traits: - outputs = self.output_spec().get() #pylint: disable=E1102 - for name, trait_spec in list(traits.items()): - out_name = name - if trait_spec.output_name is not None: - out_name = trait_spec.output_name - outputs[out_name] = \ - os.path.abspath(self._filename_from_source(name)) - return outputs - - def _parse_inputs(self, skip=None): - """Parse all inputs using the ``argstr`` format string in the Trait. - - Any inputs that are assigned (not the default_value) are formatted - to be added to the command line. - - Returns - ------- - all_args : list - A list of all inputs formatted for the command line. - - """ - all_args = [] - initial_args = {} - final_args = {} - metadata = dict(argstr=lambda t: t is not None) - for name, spec in sorted(self.inputs.traits(**metadata).items()): - if skip and name in skip: - continue - value = getattr(self.inputs, name) - if spec.name_source: - value = self._filename_from_source(name) - elif spec.genfile: - if not isdefined(value) or value is None: - value = self._gen_filename(name) - - if not isdefined(value): - continue - arg = self._format_arg(name, spec, value) - if arg is None: - continue - pos = spec.position - if pos is not None: - if int(pos) >= 0: - initial_args[pos] = arg - else: - final_args[pos] = arg - else: - all_args.append(arg) - first_args = [arg for pos, arg in sorted(initial_args.items())] - last_args = [arg for pos, arg in sorted(final_args.items())] - return first_args + all_args + last_args - - -class StdOutCommandLineInputSpec(CommandLineInputSpec): - out_file = File(argstr="> %s", position=-1, genfile=True) - - -class StdOutCommandLine(CommandLine): - input_spec = StdOutCommandLineInputSpec - - def _gen_filename(self, name): - if name == 'out_file': - return self._gen_outfilename() - else: - return None - - def _gen_outfilename(self): - raise NotImplementedError - - -class MpiCommandLineInputSpec(CommandLineInputSpec): - use_mpi = traits.Bool(False, - desc="Whether or not to run the command with mpiexec", - usedefault=True) - n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " - "specify if this is managed externally (e.g. through " - "SGE)") - - -class MpiCommandLine(CommandLine): - """Implements functionality to interact with command line programs - that can be run with MPI (i.e. using 'mpiexec'). - - Examples - -------- - >>> from nipype.interfaces.base import MpiCommandLine - >>> mpi_cli = MpiCommandLine(command='my_mpi_prog') - >>> mpi_cli.inputs.args = '-v' - >>> mpi_cli.cmdline # doctest: +ALLOW_UNICODE - 'my_mpi_prog -v' - - >>> mpi_cli.inputs.use_mpi = True - >>> mpi_cli.inputs.n_procs = 8 - >>> mpi_cli.cmdline # doctest: +ALLOW_UNICODE - 'mpiexec -n 8 my_mpi_prog -v' - """ - input_spec = MpiCommandLineInputSpec - - @property - def cmdline(self): - """Adds 'mpiexec' to begining of command""" - result = [] - if self.inputs.use_mpi: - result.append('mpiexec') - if self.inputs.n_procs: - result.append('-n %d' % self.inputs.n_procs) - result.append(super(MpiCommandLine, self).cmdline) - return ' '.join(result) - - -class SEMLikeCommandLine(CommandLine): - """In SEM derived interface all outputs have corresponding inputs. - However, some SEM commands create outputs that are not defined in the XML. - In those cases one has to create a subclass of the autogenerated one and - overload the _list_outputs method. _outputs_from_inputs should still be - used but only for the reduced (by excluding those that do not have - corresponding inputs list of outputs. - """ - - def _list_outputs(self): - outputs = self.output_spec().get() #pylint: disable=E1102 - return self._outputs_from_inputs(outputs) - - def _outputs_from_inputs(self, outputs): - for name in list(outputs.keys()): - corresponding_input = getattr(self.inputs, name) - if isdefined(corresponding_input): - if (isinstance(corresponding_input, bool) and - corresponding_input): - outputs[name] = \ - os.path.abspath(self._outputs_filenames[name]) - else: - if isinstance(corresponding_input, list): - outputs[name] = [os.path.abspath(inp) - for inp in corresponding_input] - else: - outputs[name] = os.path.abspath(corresponding_input) - return outputs - - def _format_arg(self, name, spec, value): - if name in list(self._outputs_filenames.keys()): - if isinstance(value, bool): - if value: - value = os.path.abspath(self._outputs_filenames[name]) - else: - return "" - return super(SEMLikeCommandLine, self)._format_arg(name, spec, value) - - -class MultiPath(traits.List): - """ Abstract class - shared functionality of input and output MultiPath - """ - - def validate(self, object, name, value): - if not isdefined(value) or \ - (isinstance(value, list) and len(value) == 0): - return Undefined - newvalue = value - - if not isinstance(value, list) \ - or (self.inner_traits() and - isinstance(self.inner_traits()[0].trait_type, - traits.List) and not - isinstance(self.inner_traits()[0].trait_type, - InputMultiPath) and - isinstance(value, list) and - value and not - isinstance(value[0], list)): - newvalue = [value] - value = super(MultiPath, self).validate(object, name, newvalue) - - if len(value) > 0: - return value - - self.error(object, name, value) - - -class OutputMultiPath(MultiPath): - """ Implements a user friendly traits that accepts one or more - paths to files or directories. This is the output version which - return a single string whenever possible (when it was set to a - single value or a list of length 1). Default value of this trait - is _Undefined. It does not accept empty lists. - - XXX This should only be used as a final resort. We should stick to - established Traits to the extent possible. - - XXX This needs to be vetted by somebody who understands traits - - >>> from nipype.interfaces.base import OutputMultiPath - >>> class A(TraitedSpec): - ... foo = OutputMultiPath(File(exists=False)) - >>> a = A() - >>> a.foo - - - >>> a.foo = '/software/temp/foo.txt' - >>> a.foo # doctest: +ALLOW_UNICODE - '/software/temp/foo.txt' - - >>> a.foo = ['/software/temp/foo.txt'] - >>> a.foo # doctest: +ALLOW_UNICODE - '/software/temp/foo.txt' - - >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] - >>> a.foo # doctest: +ALLOW_UNICODE - ['/software/temp/foo.txt', '/software/temp/goo.txt'] - - """ - - def get(self, object, name): - value = self.get_value(object, name) - if len(value) == 0: - return Undefined - elif len(value) == 1: - return value[0] - else: - return value - - def set(self, object, name, value): - self.set_value(object, name, value) - - -class InputMultiPath(MultiPath): - """ Implements a user friendly traits that accepts one or more - paths to files or directories. This is the input version which - always returns a list. Default value of this trait - is _Undefined. It does not accept empty lists. - - XXX This should only be used as a final resort. We should stick to - established Traits to the extent possible. - - XXX This needs to be vetted by somebody who understands traits - - >>> from nipype.interfaces.base import InputMultiPath - >>> class A(TraitedSpec): - ... foo = InputMultiPath(File(exists=False)) - >>> a = A() - >>> a.foo - - - >>> a.foo = '/software/temp/foo.txt' - >>> a.foo # doctest: +ALLOW_UNICODE - ['/software/temp/foo.txt'] - - >>> a.foo = ['/software/temp/foo.txt'] - >>> a.foo # doctest: +ALLOW_UNICODE - ['/software/temp/foo.txt'] - - >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] - >>> a.foo # doctest: +ALLOW_UNICODE - ['/software/temp/foo.txt', '/software/temp/goo.txt'] - - """ - pass diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py new file mode 100644 index 0000000000..ee0f10fd7a --- /dev/null +++ b/nipype/interfaces/base/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype base interfaces +---------------------- + +This module defines the API of all nipype interfaces. + +""" +from .core import ( + Interface, BaseInterface, SimpleInterface, + CommandLine, StdOutCommandLine, + MpiCommandLine, SEMLikeCommandLine, PackageInfo +) + +from .specs import ( + BaseTraitedSpec, TraitedSpec, DynamicTraitedSpec, + BaseInterfaceInputSpec, CommandLineInputSpec, + StdOutCommandLineInputSpec +) + +from .traits_extension import ( + traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, + File, Directory, Str, DictStrStr, has_metadata, ImageFile, + MultiPath, OutputMultiPath, InputMultiPath) + +from .support import ( + Bunch, InterfaceResult, load_template, + NipypeInterfaceError +) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py new file mode 100644 index 0000000000..bcf2656620 --- /dev/null +++ b/nipype/interfaces/base/core.py @@ -0,0 +1,1248 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype interfaces core +...................... + + +Defines the ``Interface`` API and the body of the +most basic interfaces. +The I/O specifications corresponding to these base +interfaces are found in the ``specs`` module. + +""" +from __future__ import print_function, division, unicode_literals, absolute_import + +from builtins import object, open, str, bytes + +import gc +from copy import deepcopy +from datetime import datetime as dt +import errno +import os +import re +import platform +import select +import subprocess as sp +import sys +from textwrap import wrap +import simplejson as json +from dateutil.parser import parse as parseutc + +from ... import config, logging, LooseVersion +from ...utils.provenance import write_provenance +from ...utils.misc import trim, str2bool +from ...utils.filemanip import ( + FileNotFoundError, split_filename, read_stream, which, + get_dependencies, canonicalize_env as _canonicalize_env) + +from ...external.due import due + +from .traits_extension import traits, isdefined, TraitError +from .specs import ( + BaseInterfaceInputSpec, CommandLineInputSpec, + StdOutCommandLineInputSpec, MpiCommandLineInputSpec +) +from .support import ( + Bunch, Stream, InterfaceResult, NipypeInterfaceError +) + +from future import standard_library +standard_library.install_aliases() + + +iflogger = logging.getLogger('interface') + +PY35 = sys.version_info >= (3, 5) +PY3 = sys.version_info[0] > 2 +VALID_TERMINAL_OUTPUT = ['stream', 'allatonce', 'file', 'file_split', + 'file_stdout', 'file_stderr', 'none'] +__docformat__ = 'restructuredtext' + + +class Interface(object): + """This is an abstract definition for Interface objects. + + It provides no functionality. It defines the necessary attributes + and methods all Interface objects should have. + + """ + + input_spec = None # A traited input specification + output_spec = None # A traited output specification + + # defines if the interface can reuse partial results after interruption + _can_resume = False + + @property + def can_resume(self): + return self._can_resume + + # should the interface be always run even if the inputs were not changed? + _always_run = False + + @property + def always_run(self): + return self._always_run + + def __init__(self, **inputs): + """Initialize command with given args and inputs.""" + raise NotImplementedError + + @classmethod + def help(cls): + """ Prints class help""" + raise NotImplementedError + + @classmethod + def _inputs_help(cls): + """ Prints inputs help""" + raise NotImplementedError + + @classmethod + def _outputs_help(cls): + """ Prints outputs help""" + raise NotImplementedError + + @classmethod + def _outputs(cls): + """ Initializes outputs""" + raise NotImplementedError + + @property + def version(self): + raise NotImplementedError + + def run(self): + """Execute the command.""" + raise NotImplementedError + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Called to populate outputs""" + raise NotImplementedError + + def _list_outputs(self): + """ List expected outputs""" + raise NotImplementedError + + def _get_filecopy_info(self): + """ Provides information about file inputs to copy or link to cwd. + Necessary for pipeline operation + """ + raise NotImplementedError + + +class BaseInterface(Interface): + """Implements common interface functionality. + + Implements + ---------- + + * Initializes inputs/outputs from input_spec/output_spec + * Provides help based on input_spec and output_spec + * Checks for mandatory inputs before running an interface + * Runs an interface and returns results + * Determines which inputs should be copied or linked to cwd + + This class does not implement aggregate_outputs, input_spec or + output_spec. These should be defined by derived classes. + + This class cannot be instantiated. + + + Relevant Interface attributes + ----------------------------- + + ``input_spec`` points to the traited class for the inputs + ``output_spec`` points to the traited class for the outputs + ``_redirect_x`` should be set to ``True`` when the interface requires + connecting to a ``$DISPLAY`` (default is ``False``). + ``resource_monitor`` if ``False`` prevents resource-monitoring this + interface, if ``True`` monitoring will be enabled IFF the general + Nipype config is set on (``resource_monitor = true``). + + + """ + input_spec = BaseInterfaceInputSpec + _version = None + _additional_metadata = [] + _redirect_x = False + references_ = [] + resource_monitor = True # Enabled for this interface IFF enabled in the config + + def __init__(self, from_file=None, resource_monitor=None, **inputs): + if not self.input_spec: + raise Exception('No input_spec in class: %s' % + self.__class__.__name__) + + self.inputs = self.input_spec(**inputs) + + if resource_monitor is not None: + self.resource_monitor = resource_monitor + + if from_file is not None: + self.load_inputs_from_json(from_file, overwrite=True) + + for name, value in list(inputs.items()): + setattr(self.inputs, name, value) + + @classmethod + def help(cls, returnhelp=False): + """ Prints class help + """ + + if cls.__doc__: + # docstring = cls.__doc__.split('\n') + # docstring = [trim(line, '') for line in docstring] + docstring = trim(cls.__doc__).split('\n') + [''] + else: + docstring = [''] + + allhelp = '\n'.join(docstring + cls._inputs_help() + [''] + + cls._outputs_help() + [''] + + cls._refs_help() + ['']) + if returnhelp: + return allhelp + else: + print(allhelp) + + @classmethod + def _refs_help(cls): + """ Prints interface references. + """ + if not cls.references_: + return [] + + helpstr = ['References::'] + + for r in cls.references_: + helpstr += ['{}'.format(r['entry'])] + + return helpstr + + @classmethod + def _get_trait_desc(self, inputs, name, spec): + desc = spec.desc + xor = spec.xor + requires = spec.requires + argstr = spec.argstr + + manhelpstr = ['\t%s' % name] + + type_info = spec.full_info(inputs, name, None) + + default = '' + if spec.usedefault: + default = ', nipype default value: %s' % str(spec.default_value()[1]) + line = "(%s%s)" % (type_info, default) + + manhelpstr = wrap(line, 70, + initial_indent=manhelpstr[0] + ': ', + subsequent_indent='\t\t ') + + if desc: + for line in desc.split('\n'): + line = re.sub("\s+", " ", line) + manhelpstr += wrap(line, 70, + initial_indent='\t\t', + subsequent_indent='\t\t') + + if argstr: + pos = spec.position + if pos is not None: + manhelpstr += wrap('flag: %s, position: %s' % (argstr, pos), 70, + initial_indent='\t\t', + subsequent_indent='\t\t') + else: + manhelpstr += wrap('flag: %s' % argstr, 70, + initial_indent='\t\t', + subsequent_indent='\t\t') + + if xor: + line = '%s' % ', '.join(xor) + manhelpstr += wrap(line, 70, + initial_indent='\t\tmutually_exclusive: ', + subsequent_indent='\t\t ') + + if requires: + others = [field for field in requires if field != name] + line = '%s' % ', '.join(others) + manhelpstr += wrap(line, 70, + initial_indent='\t\trequires: ', + subsequent_indent='\t\t ') + return manhelpstr + + @classmethod + def _inputs_help(cls): + """ Prints description for input parameters + """ + helpstr = ['Inputs::'] + + inputs = cls.input_spec() + if len(list(inputs.traits(transient=None).items())) == 0: + helpstr += ['', '\tNone'] + return helpstr + + manhelpstr = ['', '\t[Mandatory]'] + mandatory_items = inputs.traits(mandatory=True) + for name, spec in sorted(mandatory_items.items()): + manhelpstr += cls._get_trait_desc(inputs, name, spec) + + opthelpstr = ['', '\t[Optional]'] + for name, spec in sorted(inputs.traits(transient=None).items()): + if name in mandatory_items: + continue + opthelpstr += cls._get_trait_desc(inputs, name, spec) + + if manhelpstr: + helpstr += manhelpstr + if opthelpstr: + helpstr += opthelpstr + return helpstr + + @classmethod + def _outputs_help(cls): + """ Prints description for output parameters + """ + helpstr = ['Outputs::', ''] + if cls.output_spec: + outputs = cls.output_spec() # pylint: disable=E1102 + for name, spec in sorted(outputs.traits(transient=None).items()): + helpstr += cls._get_trait_desc(outputs, name, spec) + if len(helpstr) == 2: + helpstr += ['\tNone'] + return helpstr + + def _outputs(self): + """ Returns a bunch containing output fields for the class + """ + outputs = None + if self.output_spec: + outputs = self.output_spec() # pylint: disable=E1102 + + return outputs + + @classmethod + def _get_filecopy_info(cls): + """ Provides information about file inputs to copy or link to cwd. + Necessary for pipeline operation + """ + info = [] + if cls.input_spec is None: + return info + metadata = dict(copyfile=lambda t: t is not None) + for name, spec in sorted(cls.input_spec().traits(**metadata).items()): + info.append(dict(key=name, + copy=spec.copyfile)) + return info + + def _check_requires(self, spec, name, value): + """ check if required inputs are satisfied + """ + if spec.requires: + values = [not isdefined(getattr(self.inputs, field)) + for field in spec.requires] + if any(values) and isdefined(value): + msg = ("%s requires a value for input '%s' because one of %s " + "is set. For a list of required inputs, see %s.help()" % + (self.__class__.__name__, name, + ', '.join(spec.requires), self.__class__.__name__)) + raise ValueError(msg) + + def _check_xor(self, spec, name, value): + """ check if mutually exclusive inputs are satisfied + """ + if spec.xor: + values = [isdefined(getattr(self.inputs, field)) + for field in spec.xor] + if not any(values) and not isdefined(value): + msg = ("%s requires a value for one of the inputs '%s'. " + "For a list of required inputs, see %s.help()" % + (self.__class__.__name__, ', '.join(spec.xor), + self.__class__.__name__)) + raise ValueError(msg) + + def _check_mandatory_inputs(self): + """ Raises an exception if a mandatory input is Undefined + """ + for name, spec in list(self.inputs.traits(mandatory=True).items()): + value = getattr(self.inputs, name) + self._check_xor(spec, name, value) + if not isdefined(value) and spec.xor is None: + msg = ("%s requires a value for input '%s'. " + "For a list of required inputs, see %s.help()" % + (self.__class__.__name__, name, self.__class__.__name__)) + raise ValueError(msg) + if isdefined(value): + self._check_requires(spec, name, value) + for name, spec in list(self.inputs.traits(mandatory=None, + transient=None).items()): + self._check_requires(spec, name, getattr(self.inputs, name)) + + def _check_version_requirements(self, trait_object, raise_exception=True): + """ Raises an exception on version mismatch + """ + unavailable_traits = [] + # check minimum version + check = dict(min_ver=lambda t: t is not None) + names = trait_object.trait_names(**check) + + if names and self.version: + version = LooseVersion(str(self.version)) + for name in names: + min_ver = LooseVersion(str(trait_object.traits()[name].min_ver)) + if min_ver > version: + unavailable_traits.append(name) + if not isdefined(getattr(trait_object, name)): + continue + if raise_exception: + raise Exception('Trait %s (%s) (version %s < required %s)' % + (name, self.__class__.__name__, + version, min_ver)) + check = dict(max_ver=lambda t: t is not None) + names = trait_object.trait_names(**check) + for name in names: + max_ver = LooseVersion(str(trait_object.traits()[name].max_ver)) + if max_ver < version: + unavailable_traits.append(name) + if not isdefined(getattr(trait_object, name)): + continue + if raise_exception: + raise Exception('Trait %s (%s) (version %s > required %s)' % + (name, self.__class__.__name__, + version, max_ver)) + return unavailable_traits + + def _run_interface(self, runtime): + """ Core function that executes interface + """ + raise NotImplementedError + + def _duecredit_cite(self): + """ Add the interface references to the duecredit citations + """ + for r in self.references_: + r['path'] = self.__module__ + due.cite(**r) + + def run(self, **inputs): + """Execute this interface. + + This interface will not raise an exception if runtime.returncode is + non-zero. + + Parameters + ---------- + inputs : allows the interface settings to be updated + + Returns + ------- + results : an InterfaceResult object containing a copy of the instance + that was executed, provenance information and, if successful, results + """ + from ...utils.profiler import ResourceMonitor + + enable_rm = config.resource_monitor and self.resource_monitor + force_raise = not getattr(self.inputs, 'ignore_exception', False) + self.inputs.trait_set(**inputs) + self._check_mandatory_inputs() + self._check_version_requirements(self.inputs) + interface = self.__class__ + self._duecredit_cite() + + # initialize provenance tracking + store_provenance = str2bool(config.get( + 'execution', 'write_provenance', 'false')) + env = deepcopy(dict(os.environ)) + if self._redirect_x: + env['DISPLAY'] = config.get_display() + + runtime = Bunch(cwd=os.getcwd(), + returncode=None, + duration=None, + environ=env, + startTime=dt.isoformat(dt.utcnow()), + endTime=None, + platform=platform.platform(), + hostname=platform.node(), + version=self.version) + + mon_sp = None + if enable_rm: + mon_freq = float(config.get('execution', 'resource_monitor_frequency', 1)) + proc_pid = os.getpid() + iflogger.debug('Creating a ResourceMonitor on a %s interface, PID=%d.', + self.__class__.__name__, proc_pid) + mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) + mon_sp.start() + + # Grab inputs now, as they should not change during execution + inputs = self.inputs.get_traitsfree() + outputs = None + + try: + runtime = self._run_interface(runtime) + outputs = self.aggregate_outputs(runtime) + except Exception as e: + import traceback + # Retrieve the maximum info fast + runtime.traceback = traceback.format_exc() + # Gather up the exception arguments and append nipype info. + exc_args = e.args if getattr(e, 'args') else tuple() + exc_args += ('An exception of type %s occurred while running interface %s.' % + (type(e).__name__, self.__class__.__name__), ) + if config.get('logging', 'interface_level', 'info').lower() == 'debug': + exc_args += ('Inputs: %s' % str(self.inputs),) + + runtime.traceback_args = ('\n'.join(['%s' % arg for arg in exc_args]),) + + if force_raise: + raise + finally: + # This needs to be done always + runtime.endTime = dt.isoformat(dt.utcnow()) + timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) + runtime.duration = (timediff.days * 86400 + timediff.seconds + + timediff.microseconds / 1e6) + results = InterfaceResult(interface, runtime, inputs=inputs, outputs=outputs, + provenance=None) + + # Add provenance (if required) + if store_provenance: + # Provenance will only throw a warning if something went wrong + results.provenance = write_provenance(results) + + # Make sure runtime profiler is shut down + if enable_rm: + import numpy as np + mon_sp.stop() + + runtime.mem_peak_gb = None + runtime.cpu_percent = None + + # Read .prof file in and set runtime values + vals = np.loadtxt(mon_sp.fname, delimiter=',') + if vals.size: + vals = np.atleast_2d(vals) + runtime.mem_peak_gb = vals[:, 1].max() / 1024 + runtime.cpu_percent = vals[:, 2].max() + + runtime.prof_dict = { + 'time': vals[:, 0].tolist(), + 'cpus': vals[:, 1].tolist(), + 'rss_GiB': (vals[:, 2] / 1024).tolist(), + 'vms_GiB': (vals[:, 3] / 1024).tolist(), + } + + return results + + def _list_outputs(self): + """ List the expected outputs + """ + if self.output_spec: + raise NotImplementedError + else: + return None + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """ Collate expected outputs and check for existence + """ + + predicted_outputs = self._list_outputs() + outputs = self._outputs() + if predicted_outputs: + _unavailable_outputs = [] + if outputs: + _unavailable_outputs = \ + self._check_version_requirements(self._outputs()) + for key, val in list(predicted_outputs.items()): + if needed_outputs and key not in needed_outputs: + continue + if key in _unavailable_outputs: + raise KeyError(('Output trait %s not available in version ' + '%s of interface %s. Please inform ' + 'developers.') % (key, self.version, + self.__class__.__name__)) + try: + setattr(outputs, key, val) + except TraitError as error: + if getattr(error, 'info', 'default').startswith('an existing'): + msg = ("File/Directory '%s' not found for %s output " + "'%s'." % (val, self.__class__.__name__, key)) + raise FileNotFoundError(msg) + raise error + + return outputs + + @property + def version(self): + if self._version is None: + if str2bool(config.get('execution', 'stop_on_unknown_version')): + raise ValueError('Interface %s has no version information' % + self.__class__.__name__) + return self._version + + def load_inputs_from_json(self, json_file, overwrite=True): + """ + A convenient way to load pre-set inputs from a JSON file. + """ + + with open(json_file) as fhandle: + inputs_dict = json.load(fhandle) + + def_inputs = [] + if not overwrite: + def_inputs = list(self.inputs.get_traitsfree().keys()) + + new_inputs = list(set(list(inputs_dict.keys())) - set(def_inputs)) + for key in new_inputs: + if hasattr(self.inputs, key): + setattr(self.inputs, key, inputs_dict[key]) + + def save_inputs_to_json(self, json_file): + """ + A convenient way to save current inputs to a JSON file. + """ + inputs = self.inputs.get_traitsfree() + iflogger.debug('saving inputs {}', inputs) + with open(json_file, 'w' if PY3 else 'wb') as fhandle: + json.dump(inputs, fhandle, indent=4, ensure_ascii=False) + + +class SimpleInterface(BaseInterface): + """ An interface pattern that allows outputs to be set in a dictionary + called ``_results`` that is automatically interpreted by + ``_list_outputs()`` to find the outputs. + + When implementing ``_run_interface``, set outputs with:: + + self._results[out_name] = out_value + + This can be a way to upgrade a ``Function`` interface to do type checking. + + Examples + -------- + + .. testsetup:: + + >>> from .specs import TraitedSpec + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() # changing to a temporary directory + + .. doctest:: + + >>> def double(x): + ... return 2 * x + ... + >>> class DoubleInputSpec(BaseInterfaceInputSpec): + ... x = traits.Float(mandatory=True) + ... + >>> class DoubleOutputSpec(TraitedSpec): + ... doubled = traits.Float() + ... + >>> class Double(SimpleInterface): + ... input_spec = DoubleInputSpec + ... output_spec = DoubleOutputSpec + ... + ... def _run_interface(self, runtime): + ... self._results['doubled'] = double(self.inputs.x) + ... return runtime + + >>> dbl = Double() + >>> dbl.inputs.x = 2 + >>> dbl.run().outputs.doubled + 4.0 + + .. testsetup:: + + >>> os.chdir(old.strpath) + + """ + + def __init__(self, from_file=None, resource_monitor=None, **inputs): + super(SimpleInterface, self).__init__( + from_file=from_file, resource_monitor=resource_monitor, **inputs) + self._results = {} + + def _list_outputs(self): + return self._results + + +def run_command(runtime, output=None, timeout=0.01): + """Run a command, read stdout and stderr, prefix with timestamp. + + The returned runtime contains a merged stdout+stderr log with timestamps + """ + + # Init variables + cmdline = runtime.cmdline + env = _canonicalize_env(runtime.environ) + + errfile = None + outfile = None + stdout = sp.PIPE + stderr = sp.PIPE + + if output == 'file': + outfile = os.path.join(runtime.cwd, 'output.nipype') + stdout = open(outfile, 'wb') # t=='text'===default + stderr = sp.STDOUT + elif output == 'file_split': + outfile = os.path.join(runtime.cwd, 'stdout.nipype') + stdout = open(outfile, 'wb') + errfile = os.path.join(runtime.cwd, 'stderr.nipype') + stderr = open(errfile, 'wb') + elif output == 'file_stdout': + outfile = os.path.join(runtime.cwd, 'stdout.nipype') + stdout = open(outfile, 'wb') + elif output == 'file_stderr': + errfile = os.path.join(runtime.cwd, 'stderr.nipype') + stderr = open(errfile, 'wb') + + proc = sp.Popen(cmdline, + stdout=stdout, + stderr=stderr, + shell=True, + cwd=runtime.cwd, + env=env, + close_fds=True, + ) + + result = { + 'stdout': [], + 'stderr': [], + 'merged': [], + } + + if output == 'stream': + streams = [Stream('stdout', proc.stdout), Stream('stderr', proc.stderr)] + + def _process(drain=0): + try: + res = select.select(streams, [], [], timeout) + except select.error as e: + iflogger.info(e) + if e[0] == errno.EINTR: + return + else: + raise + else: + for stream in res[0]: + stream.read(drain) + + while proc.returncode is None: + proc.poll() + _process() + + _process(drain=1) + + # collect results, merge and return + result = {} + temp = [] + for stream in streams: + rows = stream._rows + temp += rows + result[stream._name] = [r[2] for r in rows] + temp.sort() + result['merged'] = [r[1] for r in temp] + + if output.startswith('file'): + proc.wait() + if outfile is not None: + stdout.flush() + stdout.close() + with open(outfile, 'rb') as ofh: + stdoutstr = ofh.read() + result['stdout'] = read_stream(stdoutstr, logger=iflogger) + del stdoutstr + + if errfile is not None: + stderr.flush() + stderr.close() + with open(errfile, 'rb') as efh: + stderrstr = efh.read() + result['stderr'] = read_stream(stderrstr, logger=iflogger) + del stderrstr + + if output == 'file': + result['merged'] = result['stdout'] + result['stdout'] = [] + else: + stdout, stderr = proc.communicate() + if output == 'allatonce': # Discard stdout and stderr otherwise + result['stdout'] = read_stream(stdout, logger=iflogger) + result['stderr'] = read_stream(stderr, logger=iflogger) + + runtime.returncode = proc.returncode + try: + proc.terminate() # Ensure we are done + except OSError as error: + # Python 2 raises when the process is already gone + if error.errno != errno.ESRCH: + raise + + # Dereference & force GC for a cleanup + del proc + del stdout + del stderr + gc.collect() + + runtime.stderr = '\n'.join(result['stderr']) + runtime.stdout = '\n'.join(result['stdout']) + runtime.merged = '\n'.join(result['merged']) + return runtime + + +class CommandLine(BaseInterface): + """Implements functionality to interact with command line programs + class must be instantiated with a command argument + + Parameters + ---------- + + command : string + define base immutable `command` you wish to run + + args : string, optional + optional arguments passed to base `command` + + + Examples + -------- + >>> import pprint + >>> from nipype.interfaces.base import CommandLine + >>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'}) + >>> cli.inputs.args = '-al' + >>> cli.cmdline + 'ls -al' + + # Use get_traitsfree() to check all inputs set + >>> pprint.pprint(cli.inputs.get_traitsfree()) # doctest: + {'args': '-al', + 'environ': {'DISPLAY': ':1'}, + 'ignore_exception': False} + + >>> cli.inputs.get_hashval()[0][0] + ('args', '-al') + >>> cli.inputs.get_hashval()[1] + '11c37f97649cd61627f4afe5136af8c0' + + """ + input_spec = CommandLineInputSpec + _cmd = None + _version = None + _terminal_output = 'stream' + + @classmethod + def set_default_terminal_output(cls, output_type): + """Set the default terminal output for CommandLine Interfaces. + + This method is used to set default terminal output for + CommandLine Interfaces. However, setting this will not + update the output type for any existing instances. For these, + assign the .terminal_output. + """ + + if output_type in VALID_TERMINAL_OUTPUT: + cls._terminal_output = output_type + else: + raise AttributeError('Invalid terminal output_type: %s' % + output_type) + + @classmethod + def help(cls, returnhelp=False): + allhelp = 'Wraps command **{cmd}**\n\n{help}'.format( + cmd=cls._cmd, help=super(CommandLine, cls).help(returnhelp=True)) + if returnhelp: + return allhelp + print(allhelp) + + def __init__(self, command=None, terminal_output=None, **inputs): + super(CommandLine, self).__init__(**inputs) + self._environ = None + # Set command. Input argument takes precedence + self._cmd = command or getattr(self, '_cmd', None) + + # Store dependencies in runtime object + self._ldd = str2bool(config.get('execution', 'get_linked_libs', 'true')) + + if self._cmd is None: + raise Exception("Missing command") + + if terminal_output is not None: + self.terminal_output = terminal_output + + # Attach terminal_output callback for backwards compatibility + self.inputs.on_trait_change(self._terminal_output_update, + 'terminal_output') + + @property + def cmd(self): + """sets base command, immutable""" + return self._cmd + + @property + def cmdline(self): + """ `command` plus any arguments (args) + validates arguments and generates command line""" + self._check_mandatory_inputs() + allargs = [self.cmd] + self._parse_inputs() + return ' '.join(allargs) + + @property + def terminal_output(self): + return self._terminal_output + + @terminal_output.setter + def terminal_output(self, value): + if value not in VALID_TERMINAL_OUTPUT: + raise RuntimeError( + 'Setting invalid value "%s" for terminal_output. Valid values are ' + '%s.' % (value, ', '.join(['"%s"' % v for v in VALID_TERMINAL_OUTPUT]))) + self._terminal_output = value + + def _terminal_output_update(self): + self.terminal_output = self.terminal_output + + def raise_exception(self, runtime): + raise RuntimeError( + ('Command:\n{cmdline}\nStandard output:\n{stdout}\n' + 'Standard error:\n{stderr}\nReturn code: {returncode}').format( + **runtime.dictcopy())) + + def _get_environ(self): + return getattr(self.inputs, 'environ', {}) + + def version_from_command(self, flag='-v', cmd=None): + iflogger.warning('version_from_command member of CommandLine was ' + 'Deprecated in nipype-1.0.0 and deleted in 1.1.0') + if cmd is None: + cmd = self.cmd.split()[0] + + env = dict(os.environ) + if which(cmd, env=env): + out_environ = self._get_environ() + env.update(out_environ) + proc = sp.Popen(' '.join((cmd, flag)), + shell=True, + env=env, + stdout=sp.PIPE, + stderr=sp.PIPE, + ) + o, e = proc.communicate() + return o + + def _run_interface(self, runtime, correct_return_codes=(0,)): + """Execute command via subprocess + + Parameters + ---------- + runtime : passed by the run function + + Returns + ------- + runtime : updated runtime information + adds stdout, stderr, merged, cmdline, dependencies, command_path + + """ + + out_environ = self._get_environ() + # Initialize runtime Bunch + runtime.stdout = None + runtime.stderr = None + runtime.cmdline = self.cmdline + runtime.environ.update(out_environ) + + # which $cmd + executable_name = self.cmd.split()[0] + cmd_path = which(executable_name, env=runtime.environ) + + if cmd_path is None: + raise IOError( + 'No command "%s" found on host %s. Please check that the ' + 'corresponding package is installed.' % ( + executable_name, runtime.hostname)) + + runtime.command_path = cmd_path + runtime.dependencies = (get_dependencies(executable_name, runtime.environ) + if self._ldd else '') + runtime = run_command(runtime, output=self.terminal_output) + if runtime.returncode is None or \ + runtime.returncode not in correct_return_codes: + self.raise_exception(runtime) + + return runtime + + def _format_arg(self, name, trait_spec, value): + """A helper function for _parse_inputs + + Formats a trait containing argstr metadata + """ + argstr = trait_spec.argstr + iflogger.debug('%s_%s', name, value) + if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: + # Boolean options have no format string. Just append options if True. + return argstr if value else None + # traits.Either turns into traits.TraitCompound and does not have any + # inner_traits + elif trait_spec.is_trait_type(traits.List) \ + or (trait_spec.is_trait_type(traits.TraitCompound) and + isinstance(value, list)): + # This is a bit simple-minded at present, and should be + # construed as the default. If more sophisticated behavior + # is needed, it can be accomplished with metadata (e.g. + # format string for list member str'ification, specifying + # the separator, etc.) + + # Depending on whether we stick with traitlets, and whether or + # not we beef up traitlets.List, we may want to put some + # type-checking code here as well + sep = trait_spec.sep if trait_spec.sep is not None else ' ' + + if argstr.endswith('...'): + # repeatable option + # --id %d... will expand to + # --id 1 --id 2 --id 3 etc.,. + argstr = argstr.replace('...', '') + return sep.join([argstr % elt for elt in value]) + else: + return argstr % sep.join(str(elt) for elt in value) + else: + # Append options using format string. + return argstr % value + + def _filename_from_source(self, name, chain=None): + if chain is None: + chain = [] + + trait_spec = self.inputs.trait(name) + retval = getattr(self.inputs, name) + source_ext = None + if not isdefined(retval) or "%s" in retval: + if not trait_spec.name_source: + return retval + if isdefined(retval) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning('Only one name_source per trait is allowed') + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + 'name_source of \'{}\' trait should be an input trait ' + 'name, but a type {} object was found'.format(name, type(ns))) + + if isdefined(getattr(self.inputs, ns)): + name_source = ns + source = getattr(self.inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError('Mutually pointing name_sources') + + chain.append(name) + base = self._filename_from_source(ns, chain) + if isdefined(base): + _, _, source_ext = split_filename(base) + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = self._overload_extension(retval, name) + return retval + + def _gen_filename(self, name): + raise NotImplementedError + + def _overload_extension(self, value, name=None): + return value + + def _list_outputs(self): + metadata = dict(name_source=lambda t: t is not None) + traits = self.inputs.traits(**metadata) + if traits: + outputs = self.output_spec().get() # pylint: disable=E1102 + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + outputs[out_name] = \ + os.path.abspath(self._filename_from_source(name)) + return outputs + + def _parse_inputs(self, skip=None): + """Parse all inputs using the ``argstr`` format string in the Trait. + + Any inputs that are assigned (not the default_value) are formatted + to be added to the command line. + + Returns + ------- + all_args : list + A list of all inputs formatted for the command line. + + """ + all_args = [] + initial_args = {} + final_args = {} + metadata = dict(argstr=lambda t: t is not None) + for name, spec in sorted(self.inputs.traits(**metadata).items()): + if skip and name in skip: + continue + value = getattr(self.inputs, name) + if spec.name_source: + value = self._filename_from_source(name) + elif spec.genfile: + if not isdefined(value) or value is None: + value = self._gen_filename(name) + + if not isdefined(value): + continue + arg = self._format_arg(name, spec, value) + if arg is None: + continue + pos = spec.position + if pos is not None: + if int(pos) >= 0: + initial_args[pos] = arg + else: + final_args[pos] = arg + else: + all_args.append(arg) + first_args = [el for _, el in sorted(initial_args.items())] + last_args = [el for _, el in sorted(final_args.items())] + return first_args + all_args + last_args + + +class StdOutCommandLine(CommandLine): + input_spec = StdOutCommandLineInputSpec + + def _gen_filename(self, name): + return self._gen_outfilename() if name == 'out_file' else None + + def _gen_outfilename(self): + raise NotImplementedError + + +class MpiCommandLine(CommandLine): + """Implements functionality to interact with command line programs + that can be run with MPI (i.e. using 'mpiexec'). + + Examples + -------- + >>> from nipype.interfaces.base import MpiCommandLine + >>> mpi_cli = MpiCommandLine(command='my_mpi_prog') + >>> mpi_cli.inputs.args = '-v' + >>> mpi_cli.cmdline + 'my_mpi_prog -v' + + >>> mpi_cli.inputs.use_mpi = True + >>> mpi_cli.inputs.n_procs = 8 + >>> mpi_cli.cmdline + 'mpiexec -n 8 my_mpi_prog -v' + """ + input_spec = MpiCommandLineInputSpec + + @property + def cmdline(self): + """Adds 'mpiexec' to begining of command""" + result = [] + if self.inputs.use_mpi: + result.append('mpiexec') + if self.inputs.n_procs: + result.append('-n %d' % self.inputs.n_procs) + result.append(super(MpiCommandLine, self).cmdline) + return ' '.join(result) + + +class SEMLikeCommandLine(CommandLine): + """In SEM derived interface all outputs have corresponding inputs. + However, some SEM commands create outputs that are not defined in the XML. + In those cases one has to create a subclass of the autogenerated one and + overload the _list_outputs method. _outputs_from_inputs should still be + used but only for the reduced (by excluding those that do not have + corresponding inputs list of outputs. + """ + + def _list_outputs(self): + outputs = self.output_spec().get() # pylint: disable=E1102 + return self._outputs_from_inputs(outputs) + + def _outputs_from_inputs(self, outputs): + for name in list(outputs.keys()): + corresponding_input = getattr(self.inputs, name) + if isdefined(corresponding_input): + if (isinstance(corresponding_input, bool) and + corresponding_input): + outputs[name] = \ + os.path.abspath(self._outputs_filenames[name]) + else: + if isinstance(corresponding_input, list): + outputs[name] = [os.path.abspath(inp) + for inp in corresponding_input] + else: + outputs[name] = os.path.abspath(corresponding_input) + return outputs + + def _format_arg(self, name, spec, value): + if name in list(self._outputs_filenames.keys()): + if isinstance(value, bool): + if value: + value = os.path.abspath(self._outputs_filenames[name]) + else: + return "" + return super(SEMLikeCommandLine, self)._format_arg(name, spec, value) + + +class PackageInfo(object): + _version = None + version_cmd = None + version_file = None + + @classmethod + def version(klass): + if klass._version is None: + if klass.version_cmd is not None: + try: + clout = CommandLine(command=klass.version_cmd, + resource_monitor=False, + terminal_output='allatonce').run() + except IOError: + return None + + raw_info = clout.runtime.stdout + elif klass.version_file is not None: + try: + with open(klass.version_file, 'rt') as fobj: + raw_info = fobj.read() + except OSError: + return None + else: + return None + + klass._version = klass.parse_version(raw_info) + + return klass._version + + @staticmethod + def parse_version(raw_info): + raise NotImplementedError diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py new file mode 100644 index 0000000000..f0d1184d85 --- /dev/null +++ b/nipype/interfaces/base/specs.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + +Base I/O specifications for Nipype interfaces +............................................. + +Define the API for the I/O of interfaces + +""" +from __future__ import print_function, division, unicode_literals, absolute_import + +import os +from copy import deepcopy +from warnings import warn +from builtins import str, bytes +from packaging.version import Version + +from ...utils.misc import is_container +from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str +from .traits_extension import ( + traits, Undefined, isdefined, TraitError, TraitDictObject, TraitListObject, + has_metadata, +) + +from ... import config, __version__ + +FLOAT_FORMAT = '{:.10f}'.format +nipype_version = Version(__version__) + + +class BaseTraitedSpec(traits.HasTraits): + """ + Provide a few methods necessary to support nipype interface api + + The inputs attribute of interfaces call certain methods that are not + available in traits.HasTraits. These are provided here. + + new metadata: + + * usedefault : set this to True if the default value of the trait should be + used. Unless this is set, the attributes are set to traits.Undefined + + new attribute: + + * get_hashval : returns a tuple containing the state of the trait as a dict + and hashvalue corresponding to dict. + + XXX Reconsider this in the long run, but it seems like the best + solution to move forward on the refactoring. + """ + package_version = nipype_version + + def __init__(self, **kwargs): + """ Initialize handlers and inputs""" + # NOTE: In python 2.6, object.__init__ no longer accepts input + # arguments. HasTraits does not define an __init__ and + # therefore these args were being ignored. + # super(TraitedSpec, self).__init__(*args, **kwargs) + super(BaseTraitedSpec, self).__init__(**kwargs) + traits.push_exception_handler(reraise_exceptions=True) + undefined_traits = {} + for trait in self.copyable_trait_names(): + if not self.traits()[trait].usedefault: + undefined_traits[trait] = Undefined + self.trait_set(trait_change_notify=False, **undefined_traits) + self._generate_handlers() + self.trait_set(**kwargs) + + def items(self): + """ Name, trait generator for user modifiable traits + """ + for name in sorted(self.copyable_trait_names()): + yield name, self.traits()[name] + + def __repr__(self): + """ Return a well-formatted representation of the traits """ + outstr = [] + for name, value in sorted(self.trait_get().items()): + outstr.append('%s = %s' % (name, value)) + return '\n{}\n'.format('\n'.join(outstr)) + + def _generate_handlers(self): + """Find all traits with the 'xor' metadata and attach an event + handler to them. + """ + has_xor = dict(xor=lambda t: t is not None) + xors = self.trait_names(**has_xor) + for elem in xors: + self.on_trait_change(self._xor_warn, elem) + has_deprecation = dict(deprecated=lambda t: t is not None) + deprecated = self.trait_names(**has_deprecation) + for elem in deprecated: + self.on_trait_change(self._deprecated_warn, elem) + + def _xor_warn(self, obj, name, old, new): + """ Generates warnings for xor traits + """ + if isdefined(new): + trait_spec = self.traits()[name] + # for each xor, set to default_value + for trait_name in trait_spec.xor: + if trait_name == name: + # skip ourself + continue + if isdefined(getattr(self, trait_name)): + self.trait_set(trait_change_notify=False, + **{'%s' % name: Undefined}) + msg = ('Input "%s" is mutually exclusive with input "%s", ' + 'which is already set') % (name, trait_name) + raise IOError(msg) + + def _requires_warn(self, obj, name, old, new): + """Part of the xor behavior + """ + if isdefined(new): + trait_spec = self.traits()[name] + msg = None + for trait_name in trait_spec.requires: + if not isdefined(getattr(self, trait_name)): + if not msg: + msg = 'Input %s requires inputs: %s' \ + % (name, ', '.join(trait_spec.requires)) + if msg: # only one requires warning at a time. + warn(msg) + + def _deprecated_warn(self, obj, name, old, new): + """Checks if a user assigns a value to a deprecated trait + """ + if isdefined(new): + trait_spec = self.traits()[name] + msg1 = ('Input %s in interface %s is deprecated.' % + (name, + self.__class__.__name__.split('InputSpec')[0])) + msg2 = ('Will be removed or raise an error as of release %s' + % trait_spec.deprecated) + if trait_spec.new_name: + if trait_spec.new_name not in self.copyable_trait_names(): + raise TraitError(msg1 + ' Replacement trait %s not found' % + trait_spec.new_name) + msg3 = 'It has been replaced by %s.' % trait_spec.new_name + else: + msg3 = '' + msg = ' '.join((msg1, msg2, msg3)) + if Version(str(trait_spec.deprecated)) < self.package_version: + raise TraitError(msg) + else: + if trait_spec.new_name: + msg += 'Unsetting old value %s; setting new value %s.' % ( + name, trait_spec.new_name) + warn(msg) + if trait_spec.new_name: + self.trait_set(trait_change_notify=False, + **{'%s' % name: Undefined, + '%s' % trait_spec.new_name: new}) + + def _hash_infile(self, adict, key): + """ Inject file hashes into adict[key]""" + stuff = adict[key] + if not is_container(stuff): + stuff = [stuff] + file_list = [] + for afile in stuff: + if is_container(afile): + hashlist = self._hash_infile({'infiles': afile}, 'infiles') + hash = [val[1] for val in hashlist] + else: + if config.get('execution', + 'hash_method').lower() == 'timestamp': + hash = hash_timestamp(afile) + elif config.get('execution', + 'hash_method').lower() == 'content': + hash = hash_infile(afile) + else: + raise Exception("Unknown hash method: %s" % + config.get('execution', 'hash_method')) + file_list.append((afile, hash)) + return file_list + + def get(self, **kwargs): + """ Returns traited class as a dict + + Augments the trait get function to return a dictionary without + notification handles + """ + out = super(BaseTraitedSpec, self).get(**kwargs) + out = self._clean_container(out, Undefined) + return out + + def get_traitsfree(self, **kwargs): + """ Returns traited class as a dict + + Augments the trait get function to return a dictionary without + any traits. The dictionary does not contain any attributes that + were Undefined + """ + out = super(BaseTraitedSpec, self).get(**kwargs) + out = self._clean_container(out, skipundefined=True) + return out + + def _clean_container(self, objekt, undefinedval=None, skipundefined=False): + """Convert a traited obejct into a pure python representation. + """ + if isinstance(objekt, TraitDictObject) or isinstance(objekt, dict): + out = {} + for key, val in list(objekt.items()): + if isdefined(val): + out[key] = self._clean_container(val, undefinedval) + else: + if not skipundefined: + out[key] = undefinedval + elif (isinstance(objekt, TraitListObject) or + isinstance(objekt, list) or isinstance(objekt, tuple)): + out = [] + for val in objekt: + if isdefined(val): + out.append(self._clean_container(val, undefinedval)) + else: + if not skipundefined: + out.append(undefinedval) + else: + out.append(None) + if isinstance(objekt, tuple): + out = tuple(out) + else: + if isdefined(objekt): + out = objekt + else: + if not skipundefined: + out = undefinedval + return out + + def has_metadata(self, name, metadata, value=None, recursive=True): + """ + Return has_metadata for the requested trait name in this + interface + """ + return has_metadata(self.trait(name).trait_type, metadata, value, + recursive) + + def get_hashval(self, hash_method=None): + """Return a dictionary of our items with hashes for each file. + + Searches through dictionary items and if an item is a file, it + calculates the md5 hash of the file contents and stores the + file name and hash value as the new key value. + + However, the overall bunch hash is calculated only on the hash + value of a file. The path and name of the file are not used in + the overall hash calculation. + + Returns + ------- + list_withhash : dict + Copy of our dictionary with the new file hashes included + with each file. + hashvalue : str + The md5 hash value of the traited spec + + """ + + list_withhash = [] + list_nofilename = [] + for name, val in sorted(self.get().items()): + if not isdefined(val) or self.has_metadata(name, "nohash", True): + # skip undefined traits and traits with nohash=True + continue + + hash_files = (not self.has_metadata(name, "hash_files", False) and not + self.has_metadata(name, "name_source")) + list_nofilename.append((name, + self._get_sorteddict(val, hash_method=hash_method, + hash_files=hash_files))) + list_withhash.append((name, + self._get_sorteddict(val, True, hash_method=hash_method, + hash_files=hash_files))) + return list_withhash, md5(to_str(list_nofilename).encode()).hexdigest() + + def _get_sorteddict(self, objekt, dictwithhash=False, hash_method=None, + hash_files=True): + if isinstance(objekt, dict): + out = [] + for key, val in sorted(objekt.items()): + if isdefined(val): + out.append((key, + self._get_sorteddict(val, dictwithhash, + hash_method=hash_method, + hash_files=hash_files))) + elif isinstance(objekt, (list, tuple)): + out = [] + for val in objekt: + if isdefined(val): + out.append(self._get_sorteddict(val, dictwithhash, + hash_method=hash_method, + hash_files=hash_files)) + if isinstance(objekt, tuple): + out = tuple(out) + else: + if isdefined(objekt): + if (hash_files and isinstance(objekt, (str, bytes)) and + os.path.isfile(objekt)): + if hash_method is None: + hash_method = config.get('execution', 'hash_method') + + if hash_method.lower() == 'timestamp': + hash = hash_timestamp(objekt) + elif hash_method.lower() == 'content': + hash = hash_infile(objekt) + else: + raise Exception("Unknown hash method: %s" % hash_method) + if dictwithhash: + out = (objekt, hash) + else: + out = hash + elif isinstance(objekt, float): + out = FLOAT_FORMAT(objekt) + else: + out = objekt + return out + + +class TraitedSpec(BaseTraitedSpec): + """ Create a subclass with strict traits. + + This is used in 90% of the cases. + """ + _ = traits.Disallow + + +class BaseInterfaceInputSpec(TraitedSpec): + ignore_exception = traits.Bool(False, usedefault=True, nohash=True, deprecated='1.0.0', + desc='Print an error message instead of throwing an exception ' + 'in case the interface fails to run') + + +class DynamicTraitedSpec(BaseTraitedSpec): + """ A subclass to handle dynamic traits + + This class is a workaround for add_traits and clone_traits not + functioning well together. + """ + + def __deepcopy__(self, memo): + """ bug in deepcopy for HasTraits results in weird cloning behavior for + added traits + """ + id_self = id(self) + if id_self in memo: + return memo[id_self] + dup_dict = deepcopy(self.get(), memo) + # access all keys + for key in self.copyable_trait_names(): + if key in self.__dict__.keys(): + _ = getattr(self, key) + # clone once + dup = self.clone_traits(memo=memo) + for key in self.copyable_trait_names(): + try: + _ = getattr(dup, key) + except: + pass + # clone twice + dup = self.clone_traits(memo=memo) + dup.trait_set(**dup_dict) + return dup + + +class CommandLineInputSpec(BaseInterfaceInputSpec): + args = traits.Str(argstr='%s', desc='Additional parameters to the command') + environ = traits.DictStrStr(desc='Environment variables', usedefault=True, + nohash=True) + # This input does not have a "usedefault=True" so the set_default_terminal_output() + # method would work + terminal_output = traits.Enum('stream', 'allatonce', 'file', 'none', + deprecated='1.0.0', + desc=('Control terminal output: `stream` - ' + 'displays to terminal immediately (default), ' + '`allatonce` - waits till command is ' + 'finished to display output, `file` - ' + 'writes output to file, `none` - output' + ' is ignored'), + nohash=True) + + +class StdOutCommandLineInputSpec(CommandLineInputSpec): + out_file = traits.File(argstr="> %s", position=-1, genfile=True) + + +class MpiCommandLineInputSpec(CommandLineInputSpec): + use_mpi = traits.Bool(False, + desc="Whether or not to run the command with mpiexec", + usedefault=True) + n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " + "specify if this is managed externally (e.g. through " + "SGE)") diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py new file mode 100644 index 0000000000..f047cd120f --- /dev/null +++ b/nipype/interfaces/base/support.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + +Miscellaneous tools to support Interface functionality +...................................................... + +""" +from __future__ import print_function, division, unicode_literals, absolute_import +from builtins import range, object, str + +import os +from copy import deepcopy + +import datetime +import locale + +from ... import logging +from ...utils.misc import is_container +from ...utils.filemanip import md5, to_str, hash_infile +iflogger = logging.getLogger('interface') + + +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return '{}'.format(self.value) + + +class Bunch(object): + """Dictionary-like class that provides attribute-style access to it's items. + + A `Bunch` is a simple container that stores it's items as class + attributes. Internally all items are stored in a dictionary and + the class exposes several of the dictionary methods. + + Examples + -------- + >>> from nipype.interfaces.base import Bunch + >>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True) + >>> inputs + Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True) + >>> inputs.register_to_mean = False + >>> inputs + Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False) + + Notes + ----- + The Bunch pattern came from the Python Cookbook: + + .. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named + Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005. + + """ + + def __init__(self, *args, **kwargs): + self.__dict__.update(*args, **kwargs) + + def update(self, *args, **kwargs): + """update existing attribute, or create new attribute + + Note: update is very much like HasTraits.set""" + self.__dict__.update(*args, **kwargs) + + def items(self): + """iterates over bunch attributes as key, value pairs""" + return list(self.__dict__.items()) + + def iteritems(self): + """iterates over bunch attributes as key, value pairs""" + iflogger.warning('iteritems is deprecated, use items instead') + return list(self.items()) + + def get(self, *args): + """Support dictionary get() functionality + """ + return self.__dict__.get(*args) + + def set(self, **kwargs): + """Support dictionary get() functionality + """ + return self.__dict__.update(**kwargs) + + def dictcopy(self): + """returns a deep copy of existing Bunch as a dictionary""" + return deepcopy(self.__dict__) + + def __repr__(self): + """representation of the sorted Bunch as a string + + Currently, this string representation of the `inputs` Bunch of + interfaces is hashed to determine if the process' dirty-bit + needs setting or not. Till that mechanism changes, only alter + this after careful consideration. + """ + outstr = ['Bunch('] + first = True + for k, v in sorted(self.items()): + if not first: + outstr.append(', ') + if isinstance(v, dict): + pairs = [] + for key, value in sorted(v.items()): + pairs.append("'%s': %s" % (key, value)) + v = '{' + ', '.join(pairs) + '}' + outstr.append('%s=%s' % (k, v)) + else: + outstr.append('%s=%r' % (k, v)) + first = False + outstr.append(')') + return ''.join(outstr) + + def _get_bunch_hash(self): + """Return a dictionary of our items with hashes for each file. + + Searches through dictionary items and if an item is a file, it + calculates the md5 hash of the file contents and stores the + file name and hash value as the new key value. + + However, the overall bunch hash is calculated only on the hash + value of a file. The path and name of the file are not used in + the overall hash calculation. + + Returns + ------- + dict_withhash : dict + Copy of our dictionary with the new file hashes included + with each file. + hashvalue : str + The md5 hash value of the `dict_withhash` + + """ + + infile_list = [] + for key, val in list(self.items()): + if is_container(val): + # XXX - SG this probably doesn't catch numpy arrays + # containing embedded file names either. + if isinstance(val, dict): + # XXX - SG should traverse dicts, but ignoring for now + item = None + else: + if len(val) == 0: + raise AttributeError('%s attribute is empty' % key) + item = val[0] + else: + item = val + try: + if isinstance(item, str) and os.path.isfile(item): + infile_list.append(key) + except TypeError: + # `item` is not a file or string. + continue + dict_withhash = self.dictcopy() + dict_nofilename = self.dictcopy() + for item in infile_list: + dict_withhash[item] = _hash_bunch_dict(dict_withhash, item) + dict_nofilename[item] = [val[1] for val in dict_withhash[item]] + # Sort the items of the dictionary, before hashing the string + # representation so we get a predictable order of the + # dictionary. + sorted_dict = to_str(sorted(dict_nofilename.items())) + return dict_withhash, md5(sorted_dict.encode()).hexdigest() + + def __pretty__(self, p, cycle): + """Support for the pretty module + + pretty is included in ipython.externals for ipython > 0.10""" + if cycle: + p.text('Bunch(...)') + else: + p.begin_group(6, 'Bunch(') + first = True + for k, v in sorted(self.items()): + if not first: + p.text(',') + p.breakable() + p.text(k + '=') + p.pretty(v) + first = False + p.end_group(6, ')') + + +def _hash_bunch_dict(adict, key): + """Inject file hashes into adict[key]""" + stuff = adict[key] + if not is_container(stuff): + stuff = [stuff] + return [(afile, hash_infile(afile)) + for afile in stuff] + + +class InterfaceResult(object): + """Object that contains the results of running a particular Interface. + + Attributes + ---------- + version : version of this Interface result object (a readonly property) + interface : class type + A copy of the `Interface` class that was run to generate this result. + inputs : a traits free representation of the inputs + outputs : Bunch + An `Interface` specific Bunch that contains all possible files + that are generated by the interface. The `outputs` are used + as the `inputs` to another node when interfaces are used in + the pipeline. + runtime : Bunch + + Contains attributes that describe the runtime environment when + the `Interface` was run. Contains the attributes: + + * cmdline : The command line string that was executed + * cwd : The directory the ``cmdline`` was executed in. + * stdout : The output of running the ``cmdline``. + * stderr : Any error messages output from running ``cmdline``. + * returncode : The code returned from running the ``cmdline``. + + """ + + def __init__(self, interface, runtime, inputs=None, outputs=None, + provenance=None): + self._version = 2.0 + self.interface = interface + self.runtime = runtime + self.inputs = inputs + self.outputs = outputs + self.provenance = provenance + + @property + def version(self): + return self._version + + +class Stream(object): + """Function to capture stdout and stderr streams with timestamps + + stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359 + """ + + def __init__(self, name, impl): + self._name = name + self._impl = impl + self._buf = '' + self._rows = [] + self._lastidx = 0 + self.default_encoding = locale.getdefaultlocale()[1] or 'UTF-8' + + def fileno(self): + "Pass-through for file descriptor." + return self._impl.fileno() + + def read(self, drain=0): + "Read from the file descriptor. If 'drain' set, read until EOF." + while self._read(drain) is not None: + if not drain: + break + + def _read(self, drain): + "Read from the file descriptor" + fd = self.fileno() + buf = os.read(fd, 4096).decode(self.default_encoding) + if not buf and not self._buf: + return None + if '\n' not in buf: + if not drain: + self._buf += buf + return [] + + # prepend any data previously read, then split into lines and format + buf = self._buf + buf + if '\n' in buf: + tmp, rest = buf.rsplit('\n', 1) + else: + tmp = buf + rest = None + self._buf = rest + now = datetime.datetime.now().isoformat() + rows = tmp.split('\n') + self._rows += [(now, '%s %s:%s' % (self._name, now, r), r) + for r in rows] + for idx in range(self._lastidx, len(self._rows)): + iflogger.info(self._rows[idx][1]) + self._lastidx = len(self._rows) + + +def load_template(name): + """ + Deprecated stub for backwards compatibility, + please use nipype.interfaces.fsl.model.load_template + + """ + from ..fsl.model import load_template + iflogger.warning( + 'Deprecated in 1.0.0, and will be removed in 1.1.0, ' + 'please use nipype.interfaces.fsl.model.load_template instead.' + ) + return load_template(name) diff --git a/nipype/interfaces/base/tests/__init__.py b/nipype/interfaces/base/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/base/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/tests/test_auto_BaseInterface.py b/nipype/interfaces/base/tests/test_auto_BaseInterface.py similarity index 76% rename from nipype/interfaces/tests/test_auto_BaseInterface.py rename to nipype/interfaces/base/tests/test_auto_BaseInterface.py index 9c1f2cfaa6..33652036c7 100644 --- a/nipype/interfaces/tests/test_auto_BaseInterface.py +++ b/nipype/interfaces/base/tests/test_auto_BaseInterface.py @@ -1,10 +1,11 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..base import BaseInterface +from ..core import BaseInterface def test_BaseInterface_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/interfaces/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py similarity index 75% rename from nipype/interfaces/tests/test_auto_CommandLine.py rename to nipype/interfaces/base/tests/test_auto_CommandLine.py index 01f7c8f6fb..8154f73a3d 100644 --- a/nipype/interfaces/tests/test_auto_CommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -1,6 +1,6 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..base import CommandLine +from ..core import CommandLine def test_CommandLine_inputs(): @@ -9,10 +9,12 @@ def test_CommandLine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = CommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py similarity index 77% rename from nipype/interfaces/tests/test_auto_MpiCommandLine.py rename to nipype/interfaces/base/tests/test_auto_MpiCommandLine.py index f1bc2486b2..644de736ba 100644 --- a/nipype/interfaces/tests/test_auto_MpiCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -1,6 +1,6 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..base import MpiCommandLine +from ..core import MpiCommandLine def test_MpiCommandLine_inputs(): @@ -9,11 +9,13 @@ def test_MpiCommandLine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), n_procs=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_mpi=dict(usedefault=True, ), diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py new file mode 100644 index 0000000000..98ee386ee4 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import SEMLikeCommandLine + + +def test_SEMLikeCommandLine_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = SEMLikeCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + diff --git a/nipype/interfaces/base/tests/test_auto_SimpleInterface.py b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py new file mode 100644 index 0000000000..6c19b125f2 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py @@ -0,0 +1,17 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import SimpleInterface + + +def test_SimpleInterface_inputs(): + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + ) + inputs = SimpleInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + diff --git a/nipype/interfaces/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py similarity index 78% rename from nipype/interfaces/tests/test_auto_StdOutCommandLine.py rename to nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py index 46a0974b34..a876e1b61e 100644 --- a/nipype/interfaces/tests/test_auto_StdOutCommandLine.py +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -1,6 +1,6 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..base import StdOutCommandLine +from ..core import StdOutCommandLine def test_StdOutCommandLine_inputs(): @@ -9,14 +9,16 @@ def test_StdOutCommandLine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_file=dict(argstr='> %s', genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = StdOutCommandLine.input_spec() diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py new file mode 100644 index 0000000000..1eb2cf4b42 --- /dev/null +++ b/nipype/interfaces/base/tests/test_core.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from future import standard_library +from builtins import open +import os +import simplejson as json + +import pytest + +from .... import config +from ....testing import example_data +from ... import base as nib + +standard_library.install_aliases() + + +def check_dict(ref_dict, tst_dict): + """Compare dictionaries of inputs and and those loaded from json files""" + def to_list(x): + if isinstance(x, tuple): + x = list(x) + + if isinstance(x, list): + for i, xel in enumerate(x): + x[i] = to_list(xel) + + return x + + failed_dict = {} + for key, value in list(ref_dict.items()): + newval = to_list(tst_dict[key]) + if newval != value: + failed_dict[key] = (value, newval) + return failed_dict + + +def test_Interface(): + assert nib.Interface.input_spec is None + assert nib.Interface.output_spec is None + with pytest.raises(NotImplementedError): + nib.Interface() + with pytest.raises(NotImplementedError): + nib.Interface.help() + with pytest.raises(NotImplementedError): + nib.Interface._inputs_help() + with pytest.raises(NotImplementedError): + nib.Interface._outputs_help() + with pytest.raises(NotImplementedError): + nib.Interface._outputs() + + class DerivedInterface(nib.Interface): + def __init__(self): + pass + + nif = DerivedInterface() + with pytest.raises(NotImplementedError): + nif.run() + with pytest.raises(NotImplementedError): + nif.aggregate_outputs() + with pytest.raises(NotImplementedError): + nif._list_outputs() + with pytest.raises(NotImplementedError): + nif._get_filecopy_info() + + +def test_BaseInterface(): + config.set('monitoring', 'enable', '0') + + assert nib.BaseInterface.help() is None + assert nib.BaseInterface._get_filecopy_info() == [] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + goo = nib.traits.Int(desc='a random int', mandatory=True) + moo = nib.traits.Int(desc='a random int', mandatory=False) + hoo = nib.traits.Int(desc='a random int', usedefault=True) + zoo = nib.File(desc='a file', copyfile=False) + woo = nib.File(desc='a file', copyfile=True) + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class DerivedInterface(nib.BaseInterface): + input_spec = InputSpec + resource_monitor = False + + assert DerivedInterface.help() is None + assert 'moo' in ''.join(DerivedInterface._inputs_help()) + assert DerivedInterface()._outputs() is None + assert DerivedInterface._get_filecopy_info()[0]['key'] == 'woo' + assert DerivedInterface._get_filecopy_info()[0]['copy'] + assert DerivedInterface._get_filecopy_info()[1]['key'] == 'zoo' + assert not DerivedInterface._get_filecopy_info()[1]['copy'] + assert DerivedInterface().inputs.foo == nib.Undefined + with pytest.raises(ValueError): + DerivedInterface()._check_mandatory_inputs() + assert DerivedInterface(goo=1)._check_mandatory_inputs() is None + with pytest.raises(ValueError): + DerivedInterface().run() + with pytest.raises(NotImplementedError): + DerivedInterface(goo=1).run() + + class DerivedInterface2(DerivedInterface): + output_spec = OutputSpec + + def _run_interface(self, runtime): + return runtime + + assert DerivedInterface2.help() is None + assert DerivedInterface2()._outputs().foo == nib.Undefined + with pytest.raises(NotImplementedError): + DerivedInterface2(goo=1).run() + + default_inpu_spec = nib.BaseInterface.input_spec + nib.BaseInterface.input_spec = None + with pytest.raises(Exception): + nib.BaseInterface() + nib.BaseInterface.input_spec = default_inpu_spec + + +def test_BaseInterface_load_save_inputs(tmpdir): + tmp_json = tmpdir.join('settings.json').strpath + + class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int() + input2 = nib.traits.Float() + input3 = nib.traits.Bool() + input4 = nib.traits.Str() + + class DerivedInterface(nib.BaseInterface): + input_spec = InputSpec + + def __init__(self, **inputs): + super(DerivedInterface, self).__init__(**inputs) + + inputs_dict = {'input1': 12, 'input3': True, + 'input4': 'some string'} + bif = DerivedInterface(**inputs_dict) + bif.save_inputs_to_json(tmp_json) + bif2 = DerivedInterface() + bif2.load_inputs_from_json(tmp_json) + assert bif2.inputs.get_traitsfree() == inputs_dict + + bif3 = DerivedInterface(from_file=tmp_json) + assert bif3.inputs.get_traitsfree() == inputs_dict + + inputs_dict2 = inputs_dict.copy() + inputs_dict2.update({'input4': 'some other string'}) + bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4']) + assert bif4.inputs.get_traitsfree() == inputs_dict2 + + bif5 = DerivedInterface(input4=inputs_dict2['input4']) + bif5.load_inputs_from_json(tmp_json, overwrite=False) + assert bif5.inputs.get_traitsfree() == inputs_dict2 + + bif6 = DerivedInterface(input4=inputs_dict2['input4']) + bif6.load_inputs_from_json(tmp_json) + assert bif6.inputs.get_traitsfree() == inputs_dict + + # test get hashval in a complex interface + from nipype.interfaces.ants import Registration + settings = example_data(example_data('smri_ants_registration_settings.json')) + with open(settings) as setf: + data_dict = json.load(setf) + + tsthash = Registration() + tsthash.load_inputs_from_json(settings) + assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree()) + + tsthash2 = Registration(from_file=settings) + assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree()) + + _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp') + assert 'ec5755e07287e04a4b409e03b77a517c' == hashvalue + + +def test_input_version(): + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + obj = DerivedInterface1() + obj._check_version_requirements(obj.inputs) + + config.set('execution', 'stop_on_unknown_version', True) + + with pytest.raises(Exception): + obj._check_version_requirements(obj.inputs) + + config.set_default_config() + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + _version = '0.8' + obj = DerivedInterface1() + obj.inputs.foo = 1 + with pytest.raises(Exception): + obj._check_version_requirements() + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + _version = '0.10' + obj = DerivedInterface1() + obj._check_version_requirements(obj.inputs) + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + _version = '0.9' + obj = DerivedInterface1() + obj.inputs.foo = 1 + obj._check_version_requirements(obj.inputs) + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', max_ver='0.7') + + class DerivedInterface2(nib.BaseInterface): + input_spec = InputSpec + _version = '0.8' + obj = DerivedInterface2() + obj.inputs.foo = 1 + with pytest.raises(Exception): + obj._check_version_requirements() + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', max_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + _version = '0.9' + obj = DerivedInterface1() + obj.inputs.foo = 1 + obj._check_version_requirements(obj.inputs) + + +def test_output_version(): + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + obj = DerivedInterface1() + assert obj._check_version_requirements(obj._outputs()) == [] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.11') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + obj = DerivedInterface1() + assert obj._check_version_requirements(obj._outputs()) == ['foo'] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.11') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + def _run_interface(self, runtime): + return runtime + + def _list_outputs(self): + return {'foo': 1} + obj = DerivedInterface1() + with pytest.raises(KeyError): + obj.run() + + +def test_Commandline(): + with pytest.raises(Exception): + nib.CommandLine() + ci = nib.CommandLine(command='which') + assert ci.cmd == 'which' + assert ci.inputs.args == nib.Undefined + ci2 = nib.CommandLine(command='which', args='ls') + assert ci2.cmdline == 'which ls' + ci3 = nib.CommandLine(command='echo') + ci3.resource_monitor = False + ci3.inputs.environ = {'MYENV': 'foo'} + res = ci3.run() + assert res.runtime.environ['MYENV'] == 'foo' + assert res.outputs is None + + class CommandLineInputSpec1(nib.CommandLineInputSpec): + foo = nib.Str(argstr='%s', desc='a str') + goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0) + hoo = nib.traits.List(argstr='-l %s', desc='a list') + moo = nib.traits.List(argstr='-i %d...', desc='a repeated list', + position=-1) + noo = nib.traits.Int(argstr='-x %d', desc='an int') + roo = nib.traits.Str(desc='not on command line') + soo = nib.traits.Bool(argstr="-soo") + + nib.CommandLine.input_spec = CommandLineInputSpec1 + ci4 = nib.CommandLine(command='cmd') + ci4.inputs.foo = 'foo' + ci4.inputs.goo = True + ci4.inputs.hoo = ['a', 'b'] + ci4.inputs.moo = [1, 2, 3] + ci4.inputs.noo = 0 + ci4.inputs.roo = 'hello' + ci4.inputs.soo = False + cmd = ci4._parse_inputs() + assert cmd[0] == '-g' + assert cmd[-1] == '-i 1 -i 2 -i 3' + assert 'hello' not in ' '.join(cmd) + assert '-soo' not in ' '.join(cmd) + ci4.inputs.soo = True + cmd = ci4._parse_inputs() + assert '-soo' in ' '.join(cmd) + + class CommandLineInputSpec2(nib.CommandLineInputSpec): + foo = nib.File(argstr='%s', desc='a str', genfile=True) + nib.CommandLine.input_spec = CommandLineInputSpec2 + ci5 = nib.CommandLine(command='cmd') + with pytest.raises(NotImplementedError): + ci5._parse_inputs() + + class DerivedClass(nib.CommandLine): + input_spec = CommandLineInputSpec2 + + def _gen_filename(self, name): + return 'filename' + + ci6 = DerivedClass(command='cmd') + assert ci6._parse_inputs()[0] == 'filename' + nib.CommandLine.input_spec = nib.CommandLineInputSpec + + +def test_Commandline_environ(monkeypatch, tmpdir): + from nipype import config + config.set_default_config() + + tmpdir.chdir() + monkeypatch.setitem(os.environ, 'DISPLAY', ':1') + # Test environment + ci3 = nib.CommandLine(command='echo') + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':1' + + # Test display_variable option + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + config.set('execution', 'display_variable', ':3') + res = ci3.run() + assert 'DISPLAY' not in ci3.inputs.environ + assert 'DISPLAY' not in res.runtime.environ + + # If the interface has _redirect_x then yes, it should be set + ci3._redirect_x = True + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':3' + + # Test overwrite + monkeypatch.setitem(os.environ, 'DISPLAY', ':1') + ci3.inputs.environ = {'DISPLAY': ':2'} + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':2' + + +def test_CommandLine_output(tmpdir): + # Create one file + tmpdir.chdir() + file = tmpdir.join('foo.txt') + file.write('123456\n') + name = os.path.basename(file.strpath) + + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'allatonce' + res = ci.run() + assert res.runtime.merged == '' + assert name in res.runtime.stdout + + # Check stdout is written + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_stdout' + res = ci.run() + assert os.path.isfile('stdout.nipype') + assert name in res.runtime.stdout + tmpdir.join('stdout.nipype').remove(ignore_errors=True) + + # Check stderr is written + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_stderr' + res = ci.run() + assert os.path.isfile('stderr.nipype') + tmpdir.join('stderr.nipype').remove(ignore_errors=True) + + # Check outputs are thrown away + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'none' + res = ci.run() + assert res.runtime.stdout == '' and \ + res.runtime.stderr == '' and \ + res.runtime.merged == '' + + # Check that new interfaces are set to default 'stream' + ci = nib.CommandLine(command='ls -l') + res = ci.run() + assert ci.terminal_output == 'stream' + assert name in res.runtime.stdout and \ + res.runtime.stderr == '' + + # Check only one file is generated + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file' + res = ci.run() + assert os.path.isfile('output.nipype') + assert name in res.runtime.merged and \ + res.runtime.stdout == '' and \ + res.runtime.stderr == '' + tmpdir.join('output.nipype').remove(ignore_errors=True) + + # Check split files are generated + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_split' + res = ci.run() + assert os.path.isfile('stdout.nipype') + assert os.path.isfile('stderr.nipype') + assert name in res.runtime.stdout + + +def test_global_CommandLine_output(tmpdir): + """Ensures CommandLine.set_default_terminal_output works""" + from nipype.interfaces.fsl import BET + + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'stream' # default case + + ci = BET() + assert ci.terminal_output == 'stream' # default case + + nib.CommandLine.set_default_terminal_output('allatonce') + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'allatonce' + + nib.CommandLine.set_default_terminal_output('file') + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'file' + + # Check default affects derived interfaces + ci = BET() + assert ci.terminal_output == 'file' diff --git a/nipype/interfaces/base/tests/test_resource_monitor.py b/nipype/interfaces/base/tests/test_resource_monitor.py new file mode 100644 index 0000000000..88e71921c4 --- /dev/null +++ b/nipype/interfaces/base/tests/test_resource_monitor.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Module to unit test the resource_monitor in nipype +""" + +from __future__ import print_function, division, unicode_literals, absolute_import +import os +import pytest + +# Import packages +from .... import config +from ....utils.profiler import _use_resources +from ...base import traits, CommandLine, CommandLineInputSpec +from ... import utility as niu + +# Try to enable the resource monitor +config.enable_resource_monitor() +run_profile = config.resource_monitor + + +class UseResourcesInputSpec(CommandLineInputSpec): + mem_gb = traits.Float(desc='Number of GB of RAM to use', + argstr='-g %f', mandatory=True) + n_procs = traits.Int(desc='Number of threads to use', + argstr='-p %d', mandatory=True) + + +class UseResources(CommandLine): + """ + use_resources cmd interface + """ + from nipype import __path__ + # Init attributes + input_spec = UseResourcesInputSpec + + # Get path of executable + exec_dir = os.path.realpath(__path__[0]) + exec_path = os.path.join(exec_dir, 'utils', 'tests', 'use_resources') + + # Init cmd + _cmd = exec_path + _always_run = True + + +@pytest.mark.skip(reason="inconsistent readings") +@pytest.mark.skipif(os.getenv('CI_SKIP_TEST', False), reason='disabled in CI tests') +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) +def test_cmdline_profiling(tmpdir, mem_gb, n_procs): + """ + Test runtime profiler correctly records workflow RAM/CPUs consumption + of a CommandLine-derived interface + """ + from nipype import config + config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + tmpdir.chdir() + iface = UseResources(mem_gb=mem_gb, n_procs=n_procs) + result = iface.run() + + assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB' + assert int(result.runtime.cpu_percent / 100 + 0.2) == n_procs, 'wrong number of threads estimated' + + +@pytest.mark.skipif(True, reason='test disabled temporarily, until funcion profiling works') +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), (1.5, 1)]) +def test_function_profiling(tmpdir, mem_gb, n_procs): + """ + Test runtime profiler correctly records workflow RAM/CPUs consumption + of a Function interface + """ + from nipype import config + config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + tmpdir.chdir() + iface = niu.Function(function=_use_resources) + iface.inputs.mem_gb = mem_gb + iface.inputs.n_procs = n_procs + result = iface.run() + + assert abs(mem_gb - result.runtime.mem_peak_gb) < 0.3, 'estimated memory error above .3GB' + assert int(result.runtime.cpu_percent / 100 + 0.2) >= n_procs diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py new file mode 100644 index 0000000000..168e021339 --- /dev/null +++ b/nipype/interfaces/base/tests/test_specs.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from future import standard_library +import os +import warnings + +import pytest + +from ....utils.filemanip import split_filename +from ... import base as nib +from ...base import traits, Undefined + +standard_library.install_aliases() + + +@pytest.fixture(scope="module") +def setup_file(request, tmpdir_factory): + tmp_dir = tmpdir_factory.mktemp('files') + tmp_infile = tmp_dir.join('foo.txt') + with tmp_infile.open('w') as fp: + fp.writelines(['123456789']) + + tmp_dir.chdir() + + return tmp_infile.strpath + + +def test_TraitedSpec(): + assert nib.TraitedSpec().get_hashval() + assert nib.TraitedSpec().__repr__() == '\n\n' + + class spec(nib.TraitedSpec): + foo = nib.traits.Int + goo = nib.traits.Float(usedefault=True) + + assert spec().foo == Undefined + assert spec().goo == 0.0 + specfunc = lambda x: spec(hoo=x) + with pytest.raises(nib.traits.TraitError): + specfunc(1) + infields = spec(foo=1) + hashval = ([('foo', 1), ('goo', '0.0000000000')], 'e89433b8c9141aa0fda2f8f4d662c047') + assert infields.get_hashval() == hashval + assert infields.__repr__() == '\nfoo = 1\ngoo = 0.0\n' + + +@pytest.mark.skip +def test_TraitedSpec_dynamic(): + from pickle import dumps, loads + a = nib.BaseTraitedSpec() + a.add_trait('foo', nib.traits.Int) + a.foo = 1 + assign_a = lambda: setattr(a, 'foo', 'a') + with pytest.raises(Exception): + assign_a + pkld_a = dumps(a) + unpkld_a = loads(pkld_a) + assign_a_again = lambda: setattr(unpkld_a, 'foo', 'a') + with pytest.raises(Exception): + assign_a_again + + +def test_TraitedSpec_logic(): + class spec3(nib.TraitedSpec): + _xor_inputs = ('foo', 'bar') + + foo = nib.traits.Int(xor=_xor_inputs, + desc='foo or bar, not both') + bar = nib.traits.Int(xor=_xor_inputs, + desc='bar or foo, not both') + kung = nib.traits.Float(requires=('foo',), + position=0, + desc='kung foo') + + class out3(nib.TraitedSpec): + output = nib.traits.Int + + class MyInterface(nib.BaseInterface): + input_spec = spec3 + output_spec = out3 + + myif = MyInterface() + # NOTE_dj, FAIL: I don't get a TypeError, only a UserWarning + # with pytest.raises(TypeError): + # setattr(myif.inputs, 'kung', 10.0) + myif.inputs.foo = 1 + assert myif.inputs.foo == 1 + set_bar = lambda: setattr(myif.inputs, 'bar', 1) + with pytest.raises(IOError): + set_bar() + assert myif.inputs.foo == 1 + myif.inputs.kung = 2 + assert myif.inputs.kung == 2.0 + + +def test_deprecation(): + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec1(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='0.1') + spec_instance = DeprecationSpec1() + set_foo = lambda: setattr(spec_instance, 'foo', 1) + with pytest.raises(nib.TraitError): + set_foo() + assert len(w) == 0, 'no warnings, just errors' + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec2(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='100', new_name='bar') + spec_instance = DeprecationSpec2() + set_foo = lambda: setattr(spec_instance, 'foo', 1) + with pytest.raises(nib.TraitError): + set_foo() + assert len(w) == 0, 'no warnings, just errors' + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec3(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='1000', new_name='bar') + bar = nib.traits.Int() + spec_instance = DeprecationSpec3() + not_raised = True + try: + spec_instance.foo = 1 + except nib.TraitError: + not_raised = False + assert not_raised + assert len(w) == 1, 'deprecated warning 1 %s' % [w1.message for w1 in w] + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec3(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='1000', new_name='bar') + bar = nib.traits.Int() + spec_instance = DeprecationSpec3() + not_raised = True + try: + spec_instance.foo = 1 + except nib.TraitError: + not_raised = False + assert not_raised + assert spec_instance.foo == Undefined + assert spec_instance.bar == 1 + assert len(w) == 1, 'deprecated warning 2 %s' % [w1.message for w1 in w] + + +def test_namesource(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec2(nib.CommandLineInputSpec): + moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", + position=2) + doo = nib.File(exists=True, argstr="%s", position=1) + goo = traits.Int(argstr="%d", position=4) + poo = nib.File(name_source=['goo'], hash_files=False, argstr="%s", + position=3) + + class TestName(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec2 + testobj = TestName() + testobj.inputs.doo = tmp_infile + testobj.inputs.goo = 99 + assert '%s_generated' % nme in testobj.cmdline + assert '%d_generated' % testobj.inputs.goo in testobj.cmdline + testobj.inputs.moo = "my_%s_template" + assert 'my_%s_template' % nme in testobj.cmdline + + +def test_chained_namesource(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec2(nib.CommandLineInputSpec): + doo = nib.File(exists=True, argstr="%s", position=1) + moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", + position=2, name_template='%s_mootpl') + poo = nib.File(name_source=['moo'], hash_files=False, + argstr="%s", position=3) + + class TestName(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec2 + + testobj = TestName() + testobj.inputs.doo = tmp_infile + res = testobj.cmdline + assert '%s' % tmp_infile in res + assert '%s_mootpl ' % nme in res + assert '%s_mootpl_generated' % nme in res + + +def test_cycle_namesource1(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec3(nib.CommandLineInputSpec): + moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", + position=1, name_template='%s_mootpl') + poo = nib.File(name_source=['moo'], hash_files=False, + argstr="%s", position=2) + doo = nib.File(name_source=['poo'], hash_files=False, + argstr="%s", position=3) + + class TestCycle(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec3 + + # Check that an exception is raised + to0 = TestCycle() + not_raised = True + try: + to0.cmdline + except nib.NipypeInterfaceError: + not_raised = False + assert not not_raised + + +def test_cycle_namesource2(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec3(nib.CommandLineInputSpec): + moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", + position=1, name_template='%s_mootpl') + poo = nib.File(name_source=['moo'], hash_files=False, + argstr="%s", position=2) + doo = nib.File(name_source=['poo'], hash_files=False, + argstr="%s", position=3) + + class TestCycle(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec3 + + # Check that loop can be broken by setting one of the inputs + to1 = TestCycle() + to1.inputs.poo = tmp_infile + + not_raised = True + try: + res = to1.cmdline + except nib.NipypeInterfaceError: + not_raised = False + print(res) + + assert not_raised + assert '%s' % tmp_infile in res + assert '%s_generated' % nme in res + assert '%s_generated_mootpl' % nme in res + + +def test_TraitedSpec_withFile(setup_file): + tmp_infile = setup_file + tmpd, nme = os.path.split(tmp_infile) + assert os.path.exists(tmp_infile) + + class spec2(nib.TraitedSpec): + moo = nib.File(exists=True) + doo = nib.traits.List(nib.File(exists=True)) + infields = spec2(moo=tmp_infile, doo=[tmp_infile]) + hashval = infields.get_hashval(hash_method='content') + assert hashval[1] == 'a00e9ee24f5bfa9545a515b7a759886b' + + +def test_TraitedSpec_withNoFileHashing(setup_file): + tmp_infile = setup_file + tmpd, nme = os.path.split(tmp_infile) + assert os.path.exists(tmp_infile) + + class spec2(nib.TraitedSpec): + moo = nib.File(exists=True, hash_files=False) + doo = nib.traits.List(nib.File(exists=True)) + infields = spec2(moo=nme, doo=[tmp_infile]) + hashval = infields.get_hashval(hash_method='content') + assert hashval[1] == '8da4669ff5d72f670a46ea3e7a203215' + + class spec3(nib.TraitedSpec): + moo = nib.File(exists=True, name_source="doo") + doo = nib.traits.List(nib.File(exists=True)) + infields = spec3(moo=nme, doo=[tmp_infile]) + hashval1 = infields.get_hashval(hash_method='content') + + class spec4(nib.TraitedSpec): + moo = nib.File(exists=True) + doo = nib.traits.List(nib.File(exists=True)) + infields = spec4(moo=nme, doo=[tmp_infile]) + hashval2 = infields.get_hashval(hash_method='content') + assert hashval1[1] != hashval2[1] + + +def test_ImageFile(): + x = nib.BaseInterface().inputs + + # setup traits + x.add_trait('nifti', nib.ImageFile(types=['nifti1', 'dicom'])) + x.add_trait('anytype', nib.ImageFile()) + x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) + x.add_trait('nocompress', nib.ImageFile(types=['mgh'], + allow_compressed=False)) + + with pytest.raises(nib.TraitError): + x.nifti = 'test.mgz' + x.nifti = 'test.nii' + x.anytype = 'test.xml' + with pytest.raises(AttributeError): + x.newtype = 'test.nii' + with pytest.raises(nib.TraitError): + x.nocompress = 'test.nii.gz' + x.nocompress = 'test.mgh' diff --git a/nipype/interfaces/base/tests/test_support.py b/nipype/interfaces/base/tests/test_support.py new file mode 100644 index 0000000000..260a5eb882 --- /dev/null +++ b/nipype/interfaces/base/tests/test_support.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +import os +import pytest +from builtins import open +from future import standard_library +from pkg_resources import resource_filename as pkgrf + +from ....utils.filemanip import md5 +from ... import base as nib + +standard_library.install_aliases() + + +@pytest.mark.parametrize("args", [ + {}, + {'a': 1, 'b': [2, 3]} +]) +def test_bunch(args): + b = nib.Bunch(**args) + assert b.__dict__ == args + + +def test_bunch_attribute(): + b = nib.Bunch(a=1, b=[2, 3], c=None) + assert b.a == 1 + assert b.b == [2, 3] + assert b.c is None + + +def test_bunch_repr(): + b = nib.Bunch(b=2, c=3, a=dict(n=1, m=2)) + assert repr(b) == "Bunch(a={'m': 2, 'n': 1}, b=2, c=3)" + + +def test_bunch_methods(): + b = nib.Bunch(a=2) + b.update(a=3) + newb = b.dictcopy() + assert b.a == 3 + assert b.get('a') == 3 + assert b.get('badkey', 'otherthing') == 'otherthing' + assert b != newb + assert type(dict()) == type(newb) + assert newb['a'] == 3 + + +def test_bunch_hash(): + # NOTE: Since the path to the json file is included in the Bunch, + # the hash will be unique to each machine. + json_pth = pkgrf( + 'nipype', os.path.join('testing', 'data', 'realign_json.json')) + + b = nib.Bunch(infile=json_pth, + otherthing='blue', + yat=True) + newbdict, bhash = b._get_bunch_hash() + assert bhash == 'd1f46750044c3de102efc847720fc35f' + # Make sure the hash stored in the json file for `infile` is correct. + jshash = md5() + with open(json_pth, 'r') as fp: + jshash.update(fp.read().encode('utf-8')) + assert newbdict['infile'][0][1] == jshash.hexdigest() + assert newbdict['yat'] is True diff --git a/nipype/interfaces/traits_extension.py b/nipype/interfaces/base/traits_extension.py similarity index 71% rename from nipype/interfaces/traits_extension.py rename to nipype/interfaces/base/traits_extension.py index 0e84c15bce..18bdd003c2 100644 --- a/nipype/interfaces/traits_extension.py +++ b/nipype/interfaces/base/traits_extension.py @@ -1,7 +1,11 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""This module contains Trait classes that we've pulled from the +""" +Traits extension +................ + +This module contains Trait classes that we've pulled from the traits source and fixed due to various bugs. File and Directory are redefined as the release version had dependencies on TraitsUI, which we do not want Nipype to depend on. At least not yet. @@ -18,13 +22,12 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import filter, object, str, bytes +from builtins import str, bytes import os +import collections # perform all external trait imports here -import traits -if traits.__version__ < '3.7.0': - raise ImportError('Traits version 3.7.0 or higher must be installed') +from traits import __version__ as traits_version import traits.api as traits from traits.trait_handlers import TraitDictObject, TraitListObject from traits.trait_errors import TraitError @@ -32,9 +35,23 @@ from traits.api import BaseUnicode from traits.api import Unicode +from future import standard_library + +if traits_version < '3.7.0': + raise ImportError('Traits version 3.7.0 or higher must be installed') + +standard_library.install_aliases() + +class Str(Unicode): + """Replacement for the default traits.Str based in bytes""" + + +# Monkeypatch Str and DictStrStr for Python 2 compatibility +traits.Str = Str DictStrStr = traits.Dict((bytes, str), (bytes, str)) -Str = Unicode +traits.DictStrStr = DictStrStr + class BaseFile(BaseUnicode): """ Defines a trait whose value must be the name of a file. @@ -227,16 +244,17 @@ def __init__(self, value='', auto_set=False, entries=0, # - uncompressed (tuple[0]) extension # - compressed (tuple[1]) extension img_fmt_types = { - 'nifti1': [('.nii', '.nii.gz'), - (('.hdr', '.img'), ('.hdr', '.img.gz'))], - 'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')], - 'nifti2': [('.nii', '.nii.gz')], - 'cifti2': [('.nii', '.nii.gz')], - 'gifti': [('.gii', '.gii.gz')], - 'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')], - 'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')], - 'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')] - } + 'nifti1': [('.nii', '.nii.gz'), + (('.hdr', '.img'), ('.hdr', '.img.gz'))], + 'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')], + 'nifti2': [('.nii', '.nii.gz')], + 'cifti2': [('.nii', '.nii.gz')], + 'gifti': [('.gii', '.gii.gz')], + 'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')], + 'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')], + 'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')] +} + class ImageFile(File): """ Defines a trait of specific neuroimaging files """ @@ -334,3 +352,117 @@ def has_metadata(trait, metadata, value=None, recursive=True): count += has_metadata(handler, metadata, recursive) return count > 0 + + +class MultiPath(traits.List): + """ Abstract class - shared functionality of input and output MultiPath + """ + + def validate(self, object, name, value): + + # want to treat range and other sequences (except str) as list + if not isinstance(value, (str, bytes)) and isinstance(value, collections.Sequence): + value = list(value) + + if not isdefined(value) or \ + (isinstance(value, list) and len(value) == 0): + return Undefined + + newvalue = value + + if not isinstance(value, list) \ + or (self.inner_traits() and + isinstance(self.inner_traits()[0].trait_type, + traits.List) and not + isinstance(self.inner_traits()[0].trait_type, + InputMultiPath) and + isinstance(value, list) and + value and not + isinstance(value[0], list)): + newvalue = [value] + value = super(MultiPath, self).validate(object, name, newvalue) + + if value: + return value + + self.error(object, name, value) + + +class OutputMultiPath(MultiPath): + """ Implements a user friendly traits that accepts one or more + paths to files or directories. This is the output version which + return a single string whenever possible (when it was set to a + single value or a list of length 1). Default value of this trait + is _Undefined. It does not accept empty lists. + + XXX This should only be used as a final resort. We should stick to + established Traits to the extent possible. + + XXX This needs to be vetted by somebody who understands traits + + >>> from nipype.interfaces.base import OutputMultiPath, TraitedSpec + >>> class A(TraitedSpec): + ... foo = OutputMultiPath(File(exists=False)) + >>> a = A() + >>> a.foo + + + >>> a.foo = '/software/temp/foo.txt' + >>> a.foo + '/software/temp/foo.txt' + + >>> a.foo = ['/software/temp/foo.txt'] + >>> a.foo + '/software/temp/foo.txt' + + >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] + >>> a.foo + ['/software/temp/foo.txt', '/software/temp/goo.txt'] + + """ + + def get(self, object, name): + value = self.get_value(object, name) + if len(value) == 0: + return Undefined + elif len(value) == 1: + return value[0] + else: + return value + + def set(self, object, name, value): + self.set_value(object, name, value) + + +class InputMultiPath(MultiPath): + """ Implements a user friendly traits that accepts one or more + paths to files or directories. This is the input version which + always returns a list. Default value of this trait + is _Undefined. It does not accept empty lists. + + XXX This should only be used as a final resort. We should stick to + established Traits to the extent possible. + + XXX This needs to be vetted by somebody who understands traits + + >>> from nipype.interfaces.base import InputMultiPath, TraitedSpec + >>> class A(TraitedSpec): + ... foo = InputMultiPath(File(exists=False)) + >>> a = A() + >>> a.foo + + + >>> a.foo = '/software/temp/foo.txt' + >>> a.foo + ['/software/temp/foo.txt'] + + >>> a.foo = ['/software/temp/foo.txt'] + >>> a.foo + ['/software/temp/foo.txt'] + + >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] + >>> a.foo + ['/software/temp/foo.txt', '/software/temp/goo.txt'] + + """ + pass diff --git a/nipype/interfaces/bids_utils.py b/nipype/interfaces/bids_utils.py new file mode 100644 index 0000000000..0259a80352 --- /dev/null +++ b/nipype/interfaces/bids_utils.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Set of interfaces that allow interaction with BIDS data. Currently +available interfaces are: + +BIDSDataGrabber: Query data from BIDS dataset using pybids grabbids. + + + Change directory to provide relative paths for doctests + >>> import os + >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) + >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) + >>> os.chdir(datadir) +""" +from os.path import join, dirname +import json +from .. import logging +from .base import (traits, + DynamicTraitedSpec, + Directory, + BaseInterface, + isdefined, + Str, + Undefined) + +have_pybids = True +try: + from bids import grabbids as gb +except ImportError: + have_pybids = False + +LOGGER = logging.getLogger('workflows') + + +class BIDSDataGrabberInputSpec(DynamicTraitedSpec): + base_dir = Directory(exists=True, + desc='Path to BIDS Directory.', + mandatory=True) + output_query = traits.Dict(key_trait=Str, + value_trait=traits.Dict, + desc='Queries for outfield outputs') + raise_on_empty = traits.Bool(True, usedefault=True, + desc='Generate exception if list is empty ' + 'for a given field') + return_type = traits.Enum('file', 'namedtuple', usedefault=True) + + +class BIDSDataGrabber(BaseInterface): + + """ BIDS datagrabber module that wraps around pybids to allow arbitrary + querying of BIDS datasets. + + Examples + -------- + + By default, the BIDSDataGrabber fetches anatomical and functional images + from a project, and makes BIDS entities (e.g. subject) available for + filtering outputs. + + >>> bg = BIDSDataGrabber() + >>> bg.inputs.base_dir = 'ds005/' + >>> bg.inputs.subject = '01' + >>> results = bg.run() # doctest: +SKIP + + + Dynamically created, user-defined output fields can also be defined to + return different types of outputs from the same project. All outputs + are filtered on common entities, which can be explicitly defined as + infields. + + >>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) + >>> bg.inputs.base_dir = 'ds005/' + >>> bg.inputs.subject = '01' + >>> bg.inputs.output_query['dwi'] = dict(modality='dwi') + >>> results = bg.run() # doctest: +SKIP + + """ + input_spec = BIDSDataGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def __init__(self, infields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + + outfields: list of str + Indicates output fields to be dynamically created. + If no matching items, returns Undefined. + """ + super(BIDSDataGrabber, self).__init__(**kwargs) + + if not isdefined(self.inputs.output_query): + self.inputs.output_query = {"func": {"modality": "func"}, + "anat": {"modality": "anat"}} + + # If infields is empty, use all BIDS entities + if not infields is None and have_pybids: + bids_config = join(dirname(gb.__file__), 'config', 'bids.json') + bids_config = json.load(open(bids_config, 'r')) + infields = [i['name'] for i in bids_config['entities']] + + self._infields = infields or [] + + # used for mandatory inputs check + undefined_traits = {} + for key in self._infields: + self.inputs.add_trait(key, traits.Any) + undefined_traits[key] = kwargs[key] if key in kwargs else Undefined + + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _run_interface(self, runtime): + if not have_pybids: + raise ImportError( + "The BIDSEventsGrabber interface requires pybids." + " Please make sure it is installed.") + return runtime + + def _list_outputs(self): + layout = gb.BIDSLayout(self.inputs.base_dir) + + # If infield is not given nm input value, silently ignore + filters = {} + for key in self._infields: + value = getattr(self.inputs, key) + if isdefined(value): + filters[key] = value + + outputs = {} + for key, query in self.inputs.output_query.items(): + args = query.copy() + args.update(filters) + filelist = layout.get(return_type=self.inputs.return_type, **args) + if len(filelist) == 0: + msg = 'Output key: %s returned no files' % key + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + LOGGER.warning(msg) + filelist = Undefined + + outputs[key] = filelist + return outputs diff --git a/nipype/interfaces/brainsuite/brainsuite.py b/nipype/interfaces/brainsuite/brainsuite.py index 21014f42ea..60141bcb00 100644 --- a/nipype/interfaces/brainsuite/brainsuite.py +++ b/nipype/interfaces/brainsuite/brainsuite.py @@ -5,7 +5,7 @@ import re as regex from ..base import TraitedSpec, CommandLineInputSpec, CommandLine, File, traits, isdefined -from ..traits_extension import str + """This script provides interfaces for BrainSuite command line tools. Please see brainsuite.org for more information. @@ -902,7 +902,7 @@ class SVRegInputSpec(CommandLineInputSpec): 'Cortical Surface Extraction Sequence' ) dataSinkDelay = traits.List( - str, argstr='%s', + traits.Str, argstr='%s', desc='Connect datasink out_file to dataSinkDelay to delay execution of SVReg ' 'until dataSink has finished sinking CSE outputs.' 'For use with parallel processing workflows including Brainsuites Cortical ' @@ -1087,7 +1087,7 @@ class BDPInputSpec(CommandLineInputSpec): 'bvec and .bval files can be used instead (see diffusionGradientFile and bValueFile). ' ) BVecBValPair = traits.List( - str, minlen=2, maxlen=2, mandatory=True, position=-1, xor=['bMatrixFile'], + traits.Str, minlen=2, maxlen=2, mandatory=True, position=-1, xor=['bMatrixFile'], argstr='--bvec %s --bval %s', desc='Must input a list containing first the BVector file, then the BValue file (both must be absolute paths)\n' 'Example: bdp.inputs.BVecBValPair = [\'/directory/subdir/prefix.dwi.bvec\', \'/directory/subdir/prefix.dwi.bval\'] ' @@ -1100,7 +1100,7 @@ class BDPInputSpec(CommandLineInputSpec): 'usually has an extension of .bvec ' ) dataSinkDelay = traits.List( - str, argstr='%s', + traits.Str, argstr='%s', desc='For use in parallel processing workflows including Brainsuite Cortical ' 'Surface Extraction sequence. Connect datasink out_file to dataSinkDelay ' 'to delay execution of BDP until dataSink has finished sinking outputs. ' diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py index a2cbc2a440..8a3526360c 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -68,7 +68,8 @@ def test_BDP_inputs(): ), ignoreMemory=dict(argstr='--ignore-memory', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputDiffusionData=dict(argstr='--nii %s', @@ -105,7 +106,8 @@ def test_BDP_inputs(): ), t1Mask=dict(argstr='--t1-mask %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threads=dict(argstr='--threads=%d', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py index f24900c6a4..8183daa886 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -31,7 +31,8 @@ def test_Bfc_inputs(): ), histogramType=dict(argstr='%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRIFile=dict(argstr='-i %s', @@ -62,7 +63,8 @@ def test_Bfc_inputs(): ), splineLambda=dict(argstr='-w %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py index a253bdcafc..d79dc8baa0 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -21,7 +21,8 @@ def test_Bse_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRIFile=dict(argstr='-i %s', @@ -52,7 +53,8 @@ def test_Bse_inputs(): radius=dict(argstr='-r %f', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py index f219aa82af..ac78853d2b 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -12,7 +12,8 @@ def test_Cerebro_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAtlasLabelFile=dict(argstr='--atlaslabels %s', @@ -46,7 +47,8 @@ def test_Cerebro_inputs(): ), tempDirectoryBase=dict(argstr='--tempdirbase %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useCentroids=dict(argstr='--centroids', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py index 6e0fe3851c..badbcd7738 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -14,7 +14,8 @@ def test_Cortex_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), includeAllSubcorticalAreas=dict(argstr='-a', @@ -29,7 +30,8 @@ def test_Cortex_inputs(): outputCerebrumMask=dict(argstr='-o %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py index be334c7096..b96d456fce 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -9,7 +9,8 @@ def test_Dewisp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskFile=dict(argstr='-i %s', @@ -22,7 +23,8 @@ def test_Dewisp_inputs(): ), sizeThreshold=dict(argstr='-t %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py index 42887e8883..d9a1752fc7 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -12,7 +12,8 @@ def test_Dfs_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputShadingVolume=dict(argstr='-c %s', @@ -43,7 +44,8 @@ def test_Dfs_inputs(): requires=['tessellationThreshold'], xor=('nonZeroTessellation', 'specialTessellation'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tessellationThreshold=dict(argstr='%f', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py index 5bdfa45f0e..3909149567 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -9,7 +9,8 @@ def test_Hemisplit_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputHemisphereLabelFile=dict(argstr='-l %s', @@ -32,7 +33,8 @@ def test_Hemisplit_inputs(): ), pialSurfaceFile=dict(argstr='-p %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py index d4511fee33..d161e2e6c0 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -11,7 +11,8 @@ def test_Pialmesh_inputs(): ), exportPrefix=dict(argstr='--prefix %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskFile=dict(argstr='-m %s', @@ -51,7 +52,8 @@ def test_Pialmesh_inputs(): ), tangentSmoother=dict(argstr='--tc %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py index 08c7f3b894..06695eab51 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -9,7 +9,8 @@ def test_Pvc_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRIFile=dict(argstr='-i %s', @@ -25,7 +26,8 @@ def test_Pvc_inputs(): ), spatialPrior=dict(argstr='-l %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threeClassFlag=dict(argstr='-3', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py index 305fd26bf8..a5d7408cc6 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -20,7 +20,8 @@ def test_SVReg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), iterations=dict(argstr="'-H %d'", @@ -43,7 +44,8 @@ def test_SVReg_inputs(): mandatory=True, position=0, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useCerebrumMask=dict(argstr="'-C'", ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py index 5a2b0931f8..404ce27a25 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -15,7 +15,8 @@ def test_Scrubmask_inputs(): foregroundTrimThreshold=dict(argstr='-f %d', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskFile=dict(argstr='-i %s', @@ -26,7 +27,8 @@ def test_Scrubmask_inputs(): outputMaskFile=dict(argstr='-o %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py index e96363e4f7..254461d7eb 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -13,7 +13,8 @@ def test_Skullfinder_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRIFile=dict(argstr='-i %s', @@ -37,7 +38,8 @@ def test_Skullfinder_inputs(): ), surfaceFilePrefix=dict(argstr='-s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upperThreshold=dict(argstr='-u %d', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py index 498dd56e05..ec25d193ba 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -12,7 +12,8 @@ def test_Tca_inputs(): foregroundDelta=dict(argstr='--delta %d', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskFile=dict(argstr='-i %s', @@ -26,7 +27,8 @@ def test_Tca_inputs(): outputMaskFile=dict(argstr='-o %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timer=dict(argstr='--timer', ), diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py index 8bd388c36c..8956e36da5 100644 --- a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -9,13 +9,15 @@ def test_ThicknessPVC_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), subjectFilePrefix=dict(argstr='%s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ThicknessPVC.input_spec() diff --git a/nipype/interfaces/bru2nii.py b/nipype/interfaces/bru2nii.py index d469f8bda6..579b5229b9 100644 --- a/nipype/interfaces/bru2nii.py +++ b/nipype/interfaces/bru2nii.py @@ -42,7 +42,7 @@ class Bru2(CommandLine): >>> from nipype.interfaces.bru2nii import Bru2 >>> converter = Bru2() >>> converter.inputs.input_dir = "brukerdir" - >>> converter.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> converter.cmdline # doctest: +ELLIPSIS 'Bru2 -o .../nipype/testing/data/brukerdir brukerdir' """ input_spec = Bru2InputSpec diff --git a/nipype/interfaces/c3.py b/nipype/interfaces/c3.py index 8288ab3b17..334500874c 100644 --- a/nipype/interfaces/c3.py +++ b/nipype/interfaces/c3.py @@ -38,7 +38,7 @@ class C3dAffineTool(SEMLikeCommandLine): >>> c3.inputs.source_file = 'cmatrix.mat' >>> c3.inputs.itk_transform = 'affine.txt' >>> c3.inputs.fsl2ras = True - >>> c3.cmdline # doctest: +ALLOW_UNICODE + >>> c3.cmdline 'c3d_affine_tool -src cmatrix.mat -fsl2ras -oitk affine.txt' """ input_spec = C3dAffineToolInputSpec diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py index 39700f5304..953a2688ac 100644 --- a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -23,7 +23,8 @@ def test_AnalyzeHeader_inputs(): greylevels=dict(argstr='-gl %s', units='NA', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -74,7 +75,8 @@ def test_AnalyzeHeader_inputs(): scheme_file=dict(argstr='%s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_dims=dict(argstr='-voxeldims %s', units='mm', diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py index 7016825269..422e6eceeb 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -9,7 +9,8 @@ def test_ComputeEigensystem_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -30,7 +31,8 @@ def test_ComputeEigensystem_inputs(): outputdatatype=dict(argstr='-outputdatatype %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ComputeEigensystem.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py index 6bf41d7b95..81afa66e5f 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -9,7 +9,8 @@ def test_ComputeFractionalAnisotropy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -29,7 +30,8 @@ def test_ComputeFractionalAnisotropy_inputs(): scheme_file=dict(argstr='%s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ComputeFractionalAnisotropy.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py index 16b3e6f163..9981b8d017 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -9,7 +9,8 @@ def test_ComputeMeanDiffusivity_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -29,7 +30,8 @@ def test_ComputeMeanDiffusivity_inputs(): scheme_file=dict(argstr='%s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ComputeMeanDiffusivity.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py index 3adc971f7b..c3a41aa877 100644 --- a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -9,7 +9,8 @@ def test_ComputeTensorTrace_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -29,7 +30,8 @@ def test_ComputeTensorTrace_inputs(): scheme_file=dict(argstr='%s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ComputeTensorTrace.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py index 715db443da..b55c60be67 100644 --- a/nipype/interfaces/camino/tests/test_auto_Conmat.py +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -9,7 +9,8 @@ def test_Conmat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -26,7 +27,8 @@ def test_Conmat_inputs(): ), targetname_file=dict(argstr='-targetnamefile %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tract_prop=dict(argstr='-tractstat %s', units='NA', diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py index f0f1c789c4..1de87ea032 100644 --- a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -13,7 +13,8 @@ def test_DT2NIfTI_inputs(): mandatory=True, position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -24,7 +25,8 @@ def test_DT2NIfTI_inputs(): genfile=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DT2NIfTI.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py index e4a0115dc3..2210c1c41c 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -11,7 +11,8 @@ def test_DTIFit_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_DTIFit_inputs(): mandatory=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DTIFit.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py index 285891f0cf..77f8f701dd 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -17,7 +17,8 @@ def test_DTLUTGen_inputs(): position=1, units='NA', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inversion=dict(argstr='-inversion %d', @@ -44,7 +45,8 @@ def test_DTLUTGen_inputs(): step=dict(argstr='-step %f', units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trace=dict(argstr='-trace %G', units='NA', diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py index ebde9241a1..ffb95b0e89 100644 --- a/nipype/interfaces/camino/tests/test_auto_DTMetric.py +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -14,7 +14,8 @@ def test_DTMetric_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputdatatype=dict(argstr='-inputdatatype %s', @@ -29,7 +30,8 @@ def test_DTMetric_inputs(): outputfile=dict(argstr='-outputfile %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DTMetric.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py index efbaa1e95f..d3aaf0ec2e 100644 --- a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -29,7 +29,8 @@ def test_FSL2Scheme_inputs(): ), flipz=dict(argstr='-flipz', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interleave=dict(argstr='-interleave', @@ -41,7 +42,8 @@ def test_FSL2Scheme_inputs(): genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), usegradmod=dict(argstr='-usegradmod', ), diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py index 2a17d57bc8..bbb8690129 100644 --- a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -9,7 +9,8 @@ def test_Image2Voxel_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-4dimage %s', @@ -24,7 +25,8 @@ def test_Image2Voxel_inputs(): position=2, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Image2Voxel.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py index cd0aa1380e..be0425cd89 100644 --- a/nipype/interfaces/camino/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -9,7 +9,8 @@ def test_ImageStats_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='-images %s', @@ -26,7 +27,8 @@ def test_ImageStats_inputs(): mandatory=True, units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ImageStats.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py index a8f03034d3..193bf51422 100644 --- a/nipype/interfaces/camino/tests/test_auto_LinRecon.py +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -11,7 +11,8 @@ def test_LinRecon_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -34,7 +35,8 @@ def test_LinRecon_inputs(): mandatory=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = LinRecon.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py index c9ac46d3d1..ccd2f94a0c 100644 --- a/nipype/interfaces/camino/tests/test_auto_MESD.py +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -14,7 +14,8 @@ def test_MESD_inputs(): fastmesd=dict(argstr='-fastmesd', requires=['mepointset'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -42,7 +43,8 @@ def test_MESD_inputs(): scheme_file=dict(argstr='-schemefile %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MESD.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py index c3555de524..bb88c3032b 100644 --- a/nipype/interfaces/camino/tests/test_auto_ModelFit.py +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -19,7 +19,8 @@ def test_ModelFit_inputs(): ), fixedmodq=dict(argstr='-fixedmod %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -49,7 +50,8 @@ def test_ModelFit_inputs(): ), tau=dict(argstr='-tau %G', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ModelFit.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py index 999db17138..dd3c97bb5f 100644 --- a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -11,7 +11,8 @@ def test_NIfTIDT2Camino_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -30,7 +31,8 @@ def test_NIfTIDT2Camino_inputs(): ), scaleslope=dict(argstr='-scaleslope %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uppertriangular=dict(argstr='-uppertriangular %s', ), diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py index 1a64aa285c..db40520152 100644 --- a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -11,7 +11,8 @@ def test_PicoPDFs_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -39,7 +40,8 @@ def test_PicoPDFs_inputs(): position=4, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PicoPDFs.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py index da68661ea7..ad2c4df2a5 100644 --- a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -25,7 +25,8 @@ def test_ProcStreamlines_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -88,7 +89,8 @@ def test_ProcStreamlines_inputs(): ), targetfile=dict(argstr='-targetfile %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), truncateinexclusion=dict(argstr='-truncateinexclusion', ), diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py index d55474e837..bcdf5ba627 100644 --- a/nipype/interfaces/camino/tests/test_auto_QBallMX.py +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -12,7 +12,8 @@ def test_QBallMX_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), order=dict(argstr='-order %d', @@ -34,7 +35,8 @@ def test_QBallMX_inputs(): smoothingsigma=dict(argstr='-smoothingsigma %f', units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = QBallMX.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py index ca5044349d..3bb61363e1 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -14,7 +14,8 @@ def test_SFLUTGen_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -39,7 +40,8 @@ def test_SFLUTGen_inputs(): pdf=dict(argstr='-pdf %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SFLUTGen.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py index ba9993d7bb..2d2cbc6ba5 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -9,7 +9,8 @@ def test_SFPICOCalibData_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), info_file=dict(argstr='-infooutputfile %s', @@ -36,7 +37,8 @@ def test_SFPICOCalibData_inputs(): snr=dict(argstr='-snr %f', units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trace=dict(argstr='-trace %f', units='NA', diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py index f95f139256..00bb953015 100644 --- a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -12,7 +12,8 @@ def test_SFPeaks_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -53,7 +54,8 @@ def test_SFPeaks_inputs(): stdsfrommean=dict(argstr='-stdsfrommean %f', units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SFPeaks.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py index f74dee86b3..695c529d3c 100644 --- a/nipype/interfaces/camino/tests/test_auto_Shredder.py +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -13,7 +13,8 @@ def test_Shredder_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -32,7 +33,8 @@ def test_Shredder_inputs(): position=3, units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Shredder.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py index 4903bbf163..510a970f39 100644 --- a/nipype/interfaces/camino/tests/test_auto_Track.py +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -23,7 +23,8 @@ def test_Track_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -59,7 +60,8 @@ def test_Track_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py index 94b2abedaf..d422972863 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -23,7 +23,8 @@ def test_TrackBallStick_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -59,7 +60,8 @@ def test_TrackBallStick_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py index 3855f8ecc1..9fd6c6caac 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -33,7 +33,8 @@ def test_TrackBayesDirac_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -79,7 +80,8 @@ def test_TrackBayesDirac_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py index e3572430b7..7d23187cd5 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -26,7 +26,8 @@ def test_TrackBedpostxDeter_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -65,7 +66,8 @@ def test_TrackBedpostxDeter_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py index bb4c0ed898..37d2e719dd 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -26,7 +26,8 @@ def test_TrackBedpostxProba_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -68,7 +69,8 @@ def test_TrackBedpostxProba_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py index 30d87816b8..6f340c75bf 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -28,7 +28,8 @@ def test_TrackBootstrap_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -72,7 +73,8 @@ def test_TrackBootstrap_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py index 1edd055921..6e4e13fd67 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackDT.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -23,7 +23,8 @@ def test_TrackDT_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -59,7 +60,8 @@ def test_TrackDT_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py index b62e25cd93..814aa7d597 100644 --- a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -23,7 +23,8 @@ def test_TrackPICo_inputs(): ), gzip=dict(argstr='-gzip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-inputfile %s', @@ -64,7 +65,8 @@ def test_TrackPICo_inputs(): stepsize=dict(argstr='-stepsize %f', requires=['tracker'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracker=dict(argstr='-tracker %s', usedefault=True, diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py index 5f991d4090..9d53cd246f 100644 --- a/nipype/interfaces/camino/tests/test_auto_TractShredder.py +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -13,7 +13,8 @@ def test_TractShredder_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='< %s', @@ -32,7 +33,8 @@ def test_TractShredder_inputs(): position=3, units='NA', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TractShredder.input_spec() diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py index 775f3eedd9..1f7d8483a3 100644 --- a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -11,7 +11,8 @@ def test_VtkStreamlines_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr=' < %s', @@ -38,7 +39,8 @@ def test_VtkStreamlines_inputs(): target_file=dict(argstr='-targetfile %s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxeldims=dict(argstr='-voxeldims %s', position=4, diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py index 258286bd9d..7c3049e98a 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -14,7 +14,8 @@ def test_Camino2Trackvis_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -32,7 +33,8 @@ def test_Camino2Trackvis_inputs(): genfile=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_dims=dict(argstr='-x %s', mandatory=True, diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py index 9ebd53f272..4caa6e1ab9 100644 --- a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -12,7 +12,8 @@ def test_Trackvis2Camino_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -23,7 +24,8 @@ def test_Trackvis2Camino_inputs(): genfile=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Trackvis2Camino.input_spec() diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py index 7d65af99a7..2f29bbb2e2 100644 --- a/nipype/interfaces/cmtk/cmtk.py +++ b/nipype/interfaces/cmtk/cmtk.py @@ -120,8 +120,11 @@ def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): connectivity_matrix = get_connectivity_matrix(n_rois, list_of_roi_crossed_lists) dis = n_fib - len(final_fiber_ids) - iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n_fib, n_fib)) - iflogger.info("Valid fibers: %i (%f percent)" % (n_fib - dis, 100 - dis * 100.0 / n_fib)) + iflogger.info('Found %i (%f percent out of %i fibers) fibers that start or ' + 'terminate in a voxel which is not labeled. (orphans)', + dis, dis * 100.0 / n_fib, n_fib) + iflogger.info('Valid fibers: %i (%f percent)', n_fib - dis, + 100 - dis * 100.0 / n_fib) iflogger.info('Returning the intersecting point connectivity matrix') return connectivity_matrix, final_fiber_ids @@ -181,7 +184,7 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ en_fname = op.abspath(endpoint_name + '_endpoints.npy') en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy') - iflogger.info('Reading Trackvis file {trk}'.format(trk=track_file)) + iflogger.info('Reading Trackvis file %s', track_file) fib, hdr = nb.trackvis.read(track_file, False) stats['orig_n_fib'] = len(fib) @@ -191,13 +194,13 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize) # Output endpoint arrays - iflogger.info('Saving endpoint array: {array}'.format(array=en_fname)) + iflogger.info('Saving endpoint array: %s', en_fname) np.save(en_fname, endpoints) - iflogger.info('Saving endpoint array in mm: {array}'.format(array=en_fnamemm)) + iflogger.info('Saving endpoint array in mm: %s', en_fnamemm) np.save(en_fnamemm, endpointsmm) n = len(fib) - iflogger.info('Number of fibers {num}'.format(num=n)) + iflogger.info('Number of fibers: %i', n) # Create empty fiber label array fiberlabels = np.zeros((n, 2)) @@ -214,16 +217,16 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ nROIs = len(gp.nodes()) # add node information from parcellation - if 'dn_position' in gp.node[gp.nodes()[0]]: + if 'dn_position' in gp.nodes[list(gp.nodes())[0]]: G = gp.copy() else: G = nx.Graph() - for u, d in gp.nodes_iter(data=True): - G.add_node(int(u), d) + for u, d in gp.nodes(data=True): + G.add_node(int(u), **d) # compute a position for the node based on the mean position of the # ROI in voxel coordinates (segmentation volume ) xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), axis=1)) - G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) if intersections: iflogger.info("Filtering tractography from intersections") @@ -244,7 +247,8 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], endpoints[i, 0, 2]]) endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], endpoints[i, 1, 2]]) except IndexError: - iflogger.error(("AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. PLEASE CHECK ENDPOINT GENERATION" % i)) + iflogger.error('AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. ' + 'PLEASE CHECK ENDPOINT GENERATION', i) break # Filter @@ -256,7 +260,7 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ if startROI > nROIs or endROI > nROIs: iflogger.error("Start or endpoint of fiber terminate in a voxel which is labeled higher") iflogger.error("than is expected by the parcellation node information.") - iflogger.error("Start ROI: %i, End ROI: %i" % (startROI, endROI)) + iflogger.error("Start ROI: %i, End ROI: %i", startROI, endROI) iflogger.error("This needs bugfixing!") continue @@ -296,15 +300,17 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ # make final fiber labels as array final_fiberlabels_array = np.array(final_fiberlabels, dtype=int) - iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n, n)) - iflogger.info("Valid fibers: %i (%f percent)" % (n - dis, 100 - dis * 100.0 / n)) + iflogger.info('Found %i (%f percent out of %i fibers) fibers that start or ' + 'terminate in a voxel which is not labeled. (orphans)', + dis, dis * 100.0 / n, n) + iflogger.info('Valid fibers: %i (%f%%)', n - dis, 100 - dis * 100.0 / n) numfib = nx.Graph() numfib.add_nodes_from(G) fibmean = numfib.copy() fibmedian = numfib.copy() fibdev = numfib.copy() - for u, v, d in G.edges_iter(data=True): + for u, v, d in G.edges(data=True): G.remove_edge(u, v) di = {} if 'fiblist' in d: @@ -319,14 +325,14 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ di['fiber_length_median'] = 0 di['fiber_length_std'] = 0 if not u == v: # Fix for self loop problem - G.add_edge(u, v, di) + G.add_edge(u, v, **di) if 'fiblist' in d: numfib.add_edge(u, v, weight=di['number_of_fibers']) fibmean.add_edge(u, v, weight=di['fiber_length_mean']) fibmedian.add_edge(u, v, weight=di['fiber_length_median']) fibdev.add_edge(u, v, weight=di['fiber_length_std']) - iflogger.info('Writing network as {ntwk}'.format(ntwk=matrix_name)) + iflogger.info('Writing network as %s', matrix_name) nx.write_gpickle(G, op.abspath(matrix_name)) numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int) @@ -341,7 +347,7 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ if intersections: path, name, ext = split_filename(matrix_name) intersection_matrix_name = op.abspath(name + '_intersections') + ext - iflogger.info('Writing intersection network as {ntwk}'.format(ntwk=intersection_matrix_name)) + iflogger.info('Writing intersection network as %s', intersection_matrix_name) nx.write_gpickle(I, intersection_matrix_name) path, name, ext = split_filename(matrix_mat_name) @@ -349,37 +355,41 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ ext = '.mat' matrix_mat_name = matrix_mat_name + ext - iflogger.info('Writing matlab matrix as {mat}'.format(mat=matrix_mat_name)) + iflogger.info('Writing matlab matrix as %s', matrix_mat_name) sio.savemat(matrix_mat_name, numfib_dict) if intersections: intersect_dict = {'intersections': intersection_matrix} intersection_matrix_mat_name = op.abspath(name + '_intersections') + ext - iflogger.info('Writing intersection matrix as {mat}'.format(mat=intersection_matrix_mat_name)) + iflogger.info('Writing intersection matrix as %s', intersection_matrix_mat_name) sio.savemat(intersection_matrix_mat_name, intersect_dict) mean_fiber_length_matrix_name = op.abspath(name + '_mean_fiber_length') + ext - iflogger.info('Writing matlab mean fiber length matrix as {mat}'.format(mat=mean_fiber_length_matrix_name)) + iflogger.info('Writing matlab mean fiber length matrix as %s', + mean_fiber_length_matrix_name) sio.savemat(mean_fiber_length_matrix_name, fibmean_dict) median_fiber_length_matrix_name = op.abspath(name + '_median_fiber_length') + ext - iflogger.info('Writing matlab median fiber length matrix as {mat}'.format(mat=median_fiber_length_matrix_name)) + iflogger.info('Writing matlab median fiber length matrix as %s', + median_fiber_length_matrix_name) sio.savemat(median_fiber_length_matrix_name, fibmedian_dict) fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext - iflogger.info('Writing matlab fiber length deviation matrix as {mat}'.format(mat=fiber_length_std_matrix_name)) + iflogger.info('Writing matlab fiber length deviation matrix as %s', + fiber_length_std_matrix_name) sio.savemat(fiber_length_std_matrix_name, fibdev_dict) fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy') - iflogger.info("Storing final fiber length array as %s" % fiberlengths_fname) + iflogger.info('Storing final fiber length array as %s', fiberlengths_fname) np.save(fiberlengths_fname, final_fiberlength_array) fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy') - iflogger.info("Storing all fiber labels (with orphans) as %s" % fiberlabels_fname) + iflogger.info('Storing all fiber labels (with orphans) as %s', fiberlabels_fname) np.save(fiberlabels_fname, np.array(fiberlabels, dtype=np.int32),) fiberlabels_noorphans_fname = op.abspath(endpoint_name + '_final_fiberslabels.npy') - iflogger.info("Storing final fiber labels (no orphans) as %s" % fiberlabels_noorphans_fname) + iflogger.info('Storing final fiber labels (no orphans) as %s', + fiberlabels_noorphans_fname) np.save(fiberlabels_noorphans_fname, final_fiberlabels_array) iflogger.info("Filtering tractography - keeping only no orphan fibers") @@ -389,7 +399,7 @@ def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_ stats['intersections_percent'] = float(stats['intersections_n_fib']) / float(stats['orig_n_fib']) * 100 out_stats_file = op.abspath(endpoint_name + '_statistics.mat') - iflogger.info("Saving matrix creation statistics as %s" % out_stats_file) + iflogger.info('Saving matrix creation statistics as %s', out_stats_file) sio.savemat(out_stats_file, stats) @@ -401,7 +411,7 @@ def save_fibers(oldhdr, oldfib, fname, indices): outstreams.append(oldfib[i]) n_fib_out = len(outstreams) hdrnew['n_count'] = n_fib_out - iflogger.info("Writing final non-orphan fibers as %s" % fname) + iflogger.info('Writing final non-orphan fibers as %s', fname) nb.trackvis.write(fname, outstreams, hdrnew) return n_fib_out @@ -620,22 +630,22 @@ class ROIGen(BaseInterface): def _run_interface(self, runtime): aparc_aseg_file = self.inputs.aparc_aseg_file aparcpath, aparcname, aparcext = split_filename(aparc_aseg_file) - iflogger.info('Using Aparc+Aseg file: {name}'.format(name=aparcname + aparcext)) + iflogger.info('Using Aparc+Aseg file: %s', aparcname + aparcext) niiAPARCimg = nb.load(aparc_aseg_file, mmap=NUMPY_MMAP) niiAPARCdata = niiAPARCimg.get_data() niiDataLabels = np.unique(niiAPARCdata) numDataLabels = np.size(niiDataLabels) - iflogger.info('Number of labels in image: {n}'.format(n=numDataLabels)) + iflogger.info('Number of labels in image: %s', numDataLabels) write_dict = True if self.inputs.use_freesurfer_LUT: self.LUT_file = self.inputs.freesurfer_dir + '/FreeSurferColorLUT.txt' - iflogger.info('Using Freesurfer LUT: {name}'.format(name=self.LUT_file)) + iflogger.info('Using Freesurfer LUT: %s', self.LUT_file) prefix = 'fsLUT' elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file): self.LUT_file = op.abspath(self.inputs.LUT_file) lutpath, lutname, lutext = split_filename(self.LUT_file) - iflogger.info('Using Custom LUT file: {name}'.format(name=lutname + lutext)) + iflogger.info('Using Custom LUT file: %s', lutname + lutext) prefix = lutname else: prefix = 'hardcoded' @@ -652,14 +662,14 @@ def _run_interface(self, runtime): dict_file = op.abspath(prefix + '_' + aparcname + '.pck') if write_dict: - iflogger.info('Lookup table: {name}'.format(name=op.abspath(self.LUT_file))) + iflogger.info('Lookup table: %s', op.abspath(self.LUT_file)) LUTlabelsRGBA = np.loadtxt(self.LUT_file, skiprows=4, usecols=[0, 1, 2, 3, 4, 5], comments='#', dtype={'names': ('index', 'label', 'R', 'G', 'B', 'A'), 'formats': ('int', '|S30', 'int', 'int', 'int', 'int')}) numLUTLabels = np.size(LUTlabelsRGBA) if numLUTLabels < numDataLabels: iflogger.error('LUT file provided does not contain all of the regions in the image') iflogger.error('Removing unmapped regions') - iflogger.info('Number of labels in LUT: {n}'.format(n=numLUTLabels)) + iflogger.info('Number of labels in LUT: %s', numLUTLabels) LUTlabelDict = {} """ Create dictionary for input LUT table""" @@ -687,7 +697,7 @@ def _run_interface(self, runtime): iflogger.info('Grey matter mask created') greyMaskLabels = np.unique(niiGM) numGMLabels = np.size(greyMaskLabels) - iflogger.info('Number of grey matter labels: {num}'.format(num=numGMLabels)) + iflogger.info('Number of grey matter labels: %s', numGMLabels) labelDict = {} GMlabelDict = {} @@ -697,7 +707,7 @@ def _run_interface(self, runtime): if write_dict: GMlabelDict['originalID'] = mapDict[label] except: - iflogger.info('Label {lbl} not in provided mapping'.format(lbl=label)) + iflogger.info('Label %s not in provided mapping', label) if write_dict: del GMlabelDict GMlabelDict = {} @@ -708,11 +718,11 @@ def _run_interface(self, runtime): roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.affine, niiAPARCimg.header) - iflogger.info('Saving ROI File to {path}'.format(path=roi_file)) + iflogger.info('Saving ROI File to %s', roi_file) nb.save(roi_image, roi_file) if write_dict: - iflogger.info('Saving Dictionary File to {path} in Pickle format'.format(path=dict_file)) + iflogger.info('Saving Dictionary File to %s in Pickle format', dict_file) with open(dict_file, 'w') as f: pickle.dump(labelDict, f) return runtime @@ -747,10 +757,10 @@ def create_nodes(roi_file, resolution_network_file, out_filename): roi_image = nb.load(roi_file, mmap=NUMPY_MMAP) roiData = roi_image.get_data() nROIs = len(gp.nodes()) - for u, d in gp.nodes_iter(data=True): - G.add_node(int(u), d) + for u, d in gp.nodes(data=True): + G.add_node(int(u), **d) xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), axis=1)) - G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) nx.write_gpickle(G, out_filename) return out_filename @@ -785,7 +795,7 @@ class CreateNodes(BaseInterface): def _run_interface(self, runtime): iflogger.info('Creating nodes...') create_nodes(self.inputs.roi_file, self.inputs.resolution_network_file, self.inputs.out_filename) - iflogger.info('Saving node network to {path}'.format(path=op.abspath(self.inputs.out_filename))) + iflogger.info('Saving node network to %s', op.abspath(self.inputs.out_filename)) return runtime def _list_outputs(self): diff --git a/nipype/interfaces/cmtk/nbs.py b/nipype/interfaces/cmtk/nbs.py index fd4ff0e050..7410227565 100644 --- a/nipype/interfaces/cmtk/nbs.py +++ b/nipype/interfaces/cmtk/nbs.py @@ -32,7 +32,10 @@ def ntwks_to_matrices(in_files, edge_key): for idx, name in enumerate(in_files): graph = nx.read_gpickle(name) for u, v, d in graph.edges(data=True): - graph[u][v]['weight'] = d[edge_key] # Setting the edge requested edge value as weight value + try: + graph[u][v]['weight'] = d[edge_key] # Setting the edge requested edge value as weight value + except: + raise KeyError("the graph edges do not have {} attribute".format(edge_key)) matrix[:, :, idx] = nx.to_numpy_matrix(graph) # Retrieve the matrix return matrix @@ -77,6 +80,10 @@ class NetworkBasedStatistic(BaseInterface): output_spec = NetworkBasedStatisticOutputSpec def _run_interface(self, runtime): + + if not have_cv: + raise ImportError("cviewer library is not available") + THRESH = self.inputs.threshold K = self.inputs.number_of_permutations TAIL = self.inputs.t_tail @@ -111,21 +118,22 @@ def _run_interface(self, runtime): node_ntwk_name = self.inputs.in_group1[0] node_network = nx.read_gpickle(node_ntwk_name) - iflogger.info('Populating node dictionaries with attributes from {node}'.format(node=node_ntwk_name)) + iflogger.info('Populating node dictionaries with attributes from %s', + node_ntwk_name) - for nid, ndata in node_network.nodes_iter(data=True): - nbsgraph.node[nid] = ndata - nbs_pval_graph.node[nid] = ndata + for nid, ndata in node_network.nodes(data=True): + nbsgraph.nodes[nid] = ndata + nbs_pval_graph.nodes[nid] = ndata path = op.abspath('NBS_Result_' + details) iflogger.info(path) nx.write_gpickle(nbsgraph, path) - iflogger.info('Saving output NBS edge network as {out}'.format(out=path)) + iflogger.info('Saving output NBS edge network as %s', path) pval_path = op.abspath('NBS_P_vals_' + details) iflogger.info(pval_path) nx.write_gpickle(nbs_pval_graph, pval_path) - iflogger.info('Saving output p-value network as {out}'.format(out=pval_path)) + iflogger.info('Saving output p-value network as %s', pval_path) return runtime def _list_outputs(self): diff --git a/nipype/interfaces/cmtk/nx.py b/nipype/interfaces/cmtk/nx.py index 48763256f7..ec3c01336c 100644 --- a/nipype/interfaces/cmtk/nx.py +++ b/nipype/interfaces/cmtk/nx.py @@ -38,7 +38,7 @@ def read_unknown_ntwk(ntwk): if not isinstance(ntwk, nx.classes.graph.Graph): - path, name, ext = split_filename(ntwk) + _, _, ext = split_filename(ntwk) if ext == '.pck': ntwk = nx.read_gpickle(ntwk) elif ext == '.graphml': @@ -48,7 +48,7 @@ def read_unknown_ntwk(ntwk): def remove_all_edges(ntwk): ntwktmp = ntwk.copy() - edges = ntwktmp.edges_iter() + edges = list(ntwktmp.edges()) for edge in edges: ntwk.remove_edge(edge[0], edge[1]) return ntwk @@ -60,20 +60,20 @@ def fix_keys_for_gexf(orig): """ import networkx as nx ntwk = nx.Graph() - nodes = orig.nodes_iter() - edges = orig.edges_iter() + nodes = list(orig.nodes()) + edges = list(orig.edges()) for node in nodes: newnodedata = {} - newnodedata.update(orig.node[node]) - if 'dn_fsname' in orig.node[node]: - newnodedata['label'] = orig.node[node]['dn_fsname'] - ntwk.add_node(str(node), newnodedata) - if 'dn_position' in ntwk.node[str(node)] and 'dn_position' in newnodedata: - ntwk.node[str(node)]['dn_position'] = str(newnodedata['dn_position']) + newnodedata.update(orig.nodes[node]) + if 'dn_fsname' in orig.nodes[node]: + newnodedata['label'] = orig.nodes[node]['dn_fsname'] + ntwk.add_node(str(node), **newnodedata) + if 'dn_position' in ntwk.nodes[str(node)] and 'dn_position' in newnodedata: + ntwk.nodes[str(node)]['dn_position'] = str(newnodedata['dn_position']) for edge in edges: data = {} data = orig.edge[edge[0]][edge[1]] - ntwk.add_edge(str(edge[0]), str(edge[1]), data) + ntwk.add_edge(str(edge[0]), str(edge[1]), **data) if 'fiber_length_mean' in ntwk.edge[str(edge[0])][str(edge[1])]: ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str(data['fiber_length_mean']) if 'fiber_length_std' in ntwk.edge[str(edge[0])][str(edge[1])]: @@ -104,28 +104,25 @@ def average_networks(in_files, ntwk_res_file, group_id): """ import networkx as nx import os.path as op - iflogger.info(("Creating average network for group: " - "{grp}").format(grp=group_id)) + iflogger.info('Creating average network for group: %s', group_id) matlab_network_list = [] if len(in_files) == 1: avg_ntwk = read_unknown_ntwk(in_files[0]) else: count_to_keep_edge = np.round(len(in_files) / 2.0) - iflogger.info(("Number of networks: {L}, an edge must occur in at " - "least {c} to remain in the " - "average network").format(L=len(in_files), - c=count_to_keep_edge)) + iflogger.info('Number of networks: %i, an edge must occur in at ' + 'least %i to remain in the average network', + len(in_files), count_to_keep_edge) ntwk_res_file = read_unknown_ntwk(ntwk_res_file) - iflogger.info(("{n} Nodes found in network resolution " - "file").format(n=ntwk_res_file.number_of_nodes())) + iflogger.info('%i nodes found in network resolution file', + ntwk_res_file.number_of_nodes()) ntwk = remove_all_edges(ntwk_res_file) counting_ntwk = ntwk.copy() # Sums all the relevant variables for index, subject in enumerate(in_files): tmp = nx.read_gpickle(subject) - iflogger.info(('File {s} has {n} ' - 'edges').format(s=subject, n=tmp.number_of_edges())) - edges = tmp.edges_iter() + iflogger.info('File %s has %i edges', subject, tmp.number_of_edges()) + edges = list(tmp.edges()) for edge in edges: data = {} data = tmp.edge[edge[0]][edge[1]] @@ -134,29 +131,28 @@ def average_networks(in_files, ntwk_res_file, group_id): current = {} current = ntwk.edge[edge[0]][edge[1]] data = add_dicts_by_key(current, data) - ntwk.add_edge(edge[0], edge[1], data) - nodes = tmp.nodes_iter() + ntwk.add_edge(edge[0], edge[1], **data) + nodes = list(nodes()) for node in nodes: data = {} - data = ntwk.node[node] - if 'value' in tmp.node[node]: - data['value'] = data['value'] + tmp.node[node]['value'] - ntwk.add_node(node, data) + data = ntwk.nodes[node] + if 'value' in tmp.nodes[node]: + data['value'] = data['value'] + tmp.nodes[node]['value'] + ntwk.add_node(node, **data) # Divides each value by the number of files - nodes = ntwk.nodes_iter() - edges = ntwk.edges_iter() - iflogger.info(('Total network has {n} ' - 'edges').format(n=ntwk.number_of_edges())) + nodes = list(ntwk.nodes()) + edges = list(ntwk.edges()) + iflogger.info('Total network has %i edges', ntwk.number_of_edges()) avg_ntwk = nx.Graph() newdata = {} for node in nodes: - data = ntwk.node[node] + data = ntwk.nodes[node] newdata = data if 'value' in data: newdata['value'] = data['value'] / len(in_files) - ntwk.node[node]['value'] = newdata - avg_ntwk.add_node(node, newdata) + ntwk.nodes[node]['value'] = newdata + avg_ntwk.add_node(node, **newdata) edge_dict = {} edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), @@ -168,12 +164,13 @@ def average_networks(in_files, ntwk_res_file, group_id): if not key == 'count': data[key] = data[key] / len(in_files) ntwk.edge[edge[0]][edge[1]] = data - avg_ntwk.add_edge(edge[0], edge[1], data) + avg_ntwk.add_edge(edge[0], edge[1], **data) edge_dict['count'][edge[0] - 1][edge[1] - 1] = ntwk.edge[edge[0]][edge[1]]['count'] - iflogger.info('After thresholding, the average network has has {n} edges'.format(n=avg_ntwk.number_of_edges())) + iflogger.info('After thresholding, the average network has %i edges', + avg_ntwk.number_of_edges()) - avg_edges = avg_ntwk.edges_iter() + avg_edges = avg_ntwk.edges() for edge in avg_edges: data = avg_ntwk.edge[edge[0]][edge[1]] for key in list(data.keys()): @@ -187,16 +184,17 @@ def average_networks(in_files, ntwk_res_file, group_id): matlab_network_list.append(op.abspath(network_name)) tmp[key] = edge_dict[key] sio.savemat(op.abspath(network_name), tmp) - iflogger.info('Saving average network for key: {k} as {out}'.format(k=key, out=op.abspath(network_name))) + iflogger.info('Saving average network for key: %s as %s', key, + op.abspath(network_name)) # Writes the networks and returns the name network_name = group_id + '_average.pck' nx.write_gpickle(avg_ntwk, op.abspath(network_name)) - iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) + iflogger.info('Saving average network as %s', op.abspath(network_name)) avg_ntwk = fix_keys_for_gexf(avg_ntwk) network_name = group_id + '_average.gexf' nx.write_gexf(avg_ntwk, op.abspath(network_name)) - iflogger.info('Saving average network as {out}'.format(out=op.abspath(network_name))) + iflogger.info('Saving average network as %s', op.abspath(network_name)) return network_name, matlab_network_list @@ -319,11 +317,11 @@ def compute_network_measures(ntwk): def add_node_data(node_array, ntwk): node_ntwk = nx.Graph() newdata = {} - for idx, data in ntwk.nodes_iter(data=True): + for idx, data in ntwk.nodes(data=True): if not int(idx) == 0: newdata['value'] = node_array[int(idx) - 1] data.update(newdata) - node_ntwk.add_node(int(idx), data) + node_ntwk.add_node(int(idx), **data) return node_ntwk @@ -339,7 +337,7 @@ def add_edge_data(edge_array, ntwk, above=0, below=0): old_edge_dict = edge_ntwk.edge[x + 1][y + 1] edge_ntwk.remove_edge(x + 1, y + 1) data.update(old_edge_dict) - edge_ntwk.add_edge(x + 1, y + 1, data) + edge_ntwk.add_edge(x + 1, y + 1, **data) return edge_ntwk @@ -453,12 +451,12 @@ def _run_interface(self, runtime): out_pickled_extra_measures = op.abspath(self._gen_outfilename(self.inputs.out_pickled_extra_measures, 'pck')) dict_measures = compute_dict_measures(ntwk) - iflogger.info('Saving extra measure file to {path} in Pickle format'.format(path=op.abspath(out_pickled_extra_measures))) - file = open(out_pickled_extra_measures, 'w') - pickle.dump(dict_measures, file) - file.close() + iflogger.info('Saving extra measure file to %s in Pickle format', + op.abspath(out_pickled_extra_measures)) + with open(out_pickled_extra_measures, 'w') as fo: + pickle.dump(dict_measures, fo) - iflogger.info('Saving MATLAB measures as {m}'.format(m=matlab)) + iflogger.info('Saving MATLAB measures as %s', matlab) # Loops through the measures which return a dictionary, # converts the keys and values to a Numpy array, diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py index 5a510bcdf7..22214c0036 100644 --- a/nipype/interfaces/cmtk/parcellation.py +++ b/nipype/interfaces/cmtk/parcellation.py @@ -213,7 +213,7 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): rois = np.zeros((256, 256, 256), dtype=np.int16) count = 0 - for brk, brv in pg.nodes_iter(data=True): + for brk, brv in pg.nodes(data=True): count = count + 1 iflogger.info(brv) iflogger.info(brk) @@ -223,22 +223,22 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): hemi = 'rh' if brv['dn_region'] == 'subcortical': iflogger.info(brv) - iflogger.info("---------------------") - iflogger.info("Work on brain region: %s" % (brv['dn_region'])) - iflogger.info("Freesurfer Name: %s" % brv['dn_fsname']) - iflogger.info("Region %s of %s " % (count, pg.number_of_nodes())) - iflogger.info("---------------------") + iflogger.info('---------------------') + iflogger.info('Work on brain region: %s', brv['dn_region']) + iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) + iflogger.info('Region %s of %s', count, pg.number_of_nodes()) + iflogger.info('---------------------') # if it is subcortical, retrieve roi from aseg idx = np.where(asegd == int(brv['dn_fs_aseg_val'])) rois[idx] = int(brv['dn_correspondence_id']) elif brv['dn_region'] == 'cortical': iflogger.info(brv) - iflogger.info("---------------------") - iflogger.info("Work on brain region: %s" % (brv['dn_region'])) - iflogger.info("Freesurfer Name: %s" % brv['dn_fsname']) - iflogger.info("Region %s of %s " % (count, pg.number_of_nodes())) - iflogger.info("---------------------") + iflogger.info('---------------------') + iflogger.info('Work on brain region: %s', brv['dn_region']) + iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) + iflogger.info('Region %s of %s', count, pg.number_of_nodes()) + iflogger.info('---------------------') labelpath = op.join( output_dir, parval['fs_label_subdir_name'] % hemi) @@ -294,7 +294,7 @@ def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): # store volume eg in ROIv_scale33.nii.gz out_roi = op.abspath('ROIv_%s.nii.gz' % parcellation_name) - iflogger.info("Save output image to %s" % out_roi) + iflogger.info('Save output image to %s', out_roi) img = nb.Nifti1Image(rois, aseg.affine, hdr2) nb.save(img, out_roi) @@ -424,22 +424,23 @@ def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): wmmask[idx] = 1 # check if we should subtract the cortical rois from this parcellation - iflogger.info("Loading %s to subtract cortical ROIs from white matter mask" % ('ROI_%s.nii.gz' % parcellation_name)) + iflogger.info('Loading ROI_%s.nii.gz to subtract cortical ROIs from white ' + 'matter mask', parcellation_name) roi = nb.load(op.join(op.curdir, 'ROI_%s.nii.gz' % parcellation_name)) roid = roi.get_data() assert roid.shape[0] == wmmask.shape[0] pg = nx.read_graphml(pgpath) - for brk, brv in pg.nodes_iter(data=True): + for brk, brv in pg.nodes(data=True): if brv['dn_region'] == 'cortical': - iflogger.info("Subtracting region %s with intensity value %s" % - (brv['dn_region'], brv['dn_correspondence_id'])) + iflogger.info('Subtracting region %s with intensity value %s', + brv['dn_region'], brv['dn_correspondence_id']) idx = np.where(roid == int(brv['dn_correspondence_id'])) wmmask[idx] = 0 # output white matter mask. crop and move it afterwards wm_out = op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz') img = nb.Nifti1Image(wmmask, fsmask.affine, fsmask.header) - iflogger.info("Save white matter mask: %s" % wm_out) + iflogger.info('Save white matter mask: %s', wm_out) nb.save(img, wm_out) @@ -450,7 +451,7 @@ def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, log = cmp_config.get_logger() output_dir = op.abspath(op.curdir) - iflogger.info("Cropping and moving datasets to %s" % output_dir) + iflogger.info('Cropping and moving datasets to %s', output_dir) ds = [ (op.join(fs_dir, 'mri', 'aseg.nii.gz'), op.abspath('aseg.nii.gz')), @@ -469,7 +470,7 @@ def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, op.abspath('ROIv_HR_th.nii.gz'))) orig = op.join(fs_dir, 'mri', 'orig', '001.mgz') for d in ds: - iflogger.info("Processing %s:" % d[0]) + iflogger.info('Processing %s:', d[0]) if not op.exists(d[0]): raise Exception('File %s does not exist.' % d[0]) # reslice to original volume because the roi creation with freesurfer diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py index 6252ee9218..567de03ab3 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -6,7 +6,8 @@ def test_AverageNetworks_inputs(): input_map = dict(group_id=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(mandatory=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py index ea97eaecd8..cf62691f3b 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -13,7 +13,8 @@ def test_CFFConverter_inputs(): gifti_surfaces=dict(), gpickled_networks=dict(), graphml_networks=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), license=dict(), diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py index 3126066243..42efb72dfd 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -4,7 +4,8 @@ def test_CreateNodes_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_filename=dict(usedefault=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py index 18dd6c1ec6..2323461f35 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -4,7 +4,8 @@ def test_MergeCNetworks_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(mandatory=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py index e031b8cae2..27f607c530 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -6,7 +6,8 @@ def test_NetworkBasedStatistic_inputs(): input_map = dict(edge_key=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_group1=dict(mandatory=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py index 46c077af1b..a8ccc8dd06 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -6,7 +6,8 @@ def test_NetworkXMetrics_inputs(): input_map = dict(compute_clique_related_measures=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py index f62f98b51e..8de2e0bf9a 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -7,7 +7,8 @@ def test_Parcellate_inputs(): input_map = dict(dilation=dict(usedefault=True, ), freesurfer_dir=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_roi_file=dict(genfile=True, diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py index 41f99aa5bf..a1c65f4db6 100644 --- a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -10,7 +10,8 @@ def test_ROIGen_inputs(): ), freesurfer_dir=dict(requires=['use_freesurfer_LUT'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_dict_file=dict(genfile=True, diff --git a/nipype/interfaces/cmtk/tests/test_nbs.py b/nipype/interfaces/cmtk/tests/test_nbs.py new file mode 100644 index 0000000000..03a7aa8619 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_nbs.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals +from ..nbs import NetworkBasedStatistic +from ....utils.misc import package_check +import numpy as np +import networkx as nx +import pytest + +have_cv = True +try: + package_check('cviewer') +except Exception as e: + have_cv = False + +@pytest.fixture() +def creating_graphs(tmpdir): + graphlist = [] + graphnames = ["name"+str(i) for i in range(6)] + for idx, name in enumerate(graphnames): + graph = np.random.rand(10,10) + G = nx.from_numpy_matrix(graph) + out_file = tmpdir.strpath + graphnames[idx] + '.pck' + # Save as pck file + nx.write_gpickle(G, out_file) + graphlist.append(out_file) + return graphlist + + +@pytest.mark.skipif(have_cv, reason="tests for import error, cviewer available") +def test_importerror(creating_graphs, tmpdir): + tmpdir.chdir() + graphlist = creating_graphs + group1 = graphlist[:3] + group2 = graphlist[3:] + + nbs = NetworkBasedStatistic() + nbs.inputs.in_group1 = group1 + nbs.inputs.in_group2 = group2 + nbs.inputs.edge_key = "weight" + + with pytest.raises(ImportError) as e: + nbs.run() + assert "cviewer library is not available" == str(e.value) + + +@pytest.mark.skipif(not have_cv, reason="cviewer has to be available") +def test_keyerror(creating_graphs): + graphlist =creating_graphs + + group1 = graphlist[:3] + group2 = graphlist[3:] + + nbs = NetworkBasedStatistic() + nbs.inputs.in_group1 = group1 + nbs.inputs.in_group2 = group2 + nbs.inputs.edge_key = "Your_edge" + + with pytest.raises(KeyError) as e: + nbs.run() + assert "the graph edges do not have Your_edge attribute" in str(e.value) diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py index 62e62b6672..22e9375609 100644 --- a/nipype/interfaces/dcm2nii.py +++ b/nipype/interfaces/dcm2nii.py @@ -70,13 +70,25 @@ class Dcm2nii(CommandLine): Examples ======== + .. testsetup:: + + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() # changing to a temporary directory + + .. doctest:: + >>> from nipype.interfaces.dcm2nii import Dcm2nii >>> converter = Dcm2nii() - >>> converter.inputs.source_names = ['functional_1.dcm', 'functional_2.dcm'] + >>> converter.inputs.source_names = [os.path.join(datadir, 'functional_1.dcm'), os.path.join(datadir, 'functional_2.dcm')] >>> converter.inputs.gzip_output = True >>> converter.inputs.output_dir = '.' - >>> converter.cmdline # doctest: +ALLOW_UNICODE - 'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n functional_1.dcm' + >>> converter.cmdline #doctest: +ELLIPSIS + 'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n ...functional_1.dcm' + + .. testsetup:: + + >>> os.chdir(old.strpath) + """ input_spec = Dcm2niiInputSpec @@ -250,7 +262,7 @@ class Dcm2niix(CommandLine): 'dcm2niix -b y -z i -x n -t n -m n -f %t%p -o . -s y -v n functional_1.dcm' >>> flags = '-'.join([val.strip() + ' ' for val in sorted(' '.join(converter.cmdline.split()[1:-1]).split('-'))]) - >>> flags # doctest: +ALLOW_UNICODE + >>> flags ' -b y -f %t%p -m n -o . -s y -t n -v n -x n -z i ' """ @@ -304,7 +316,7 @@ def _parse_stdout(self, stdout): bvals.append(out_file + ".bval") find_b = False # next scan will have bvals/bvecs - elif 'DTI gradients' in line or 'DTI gradient directions' in line: + elif 'DTI gradients' in line or 'DTI gradient directions' in line or 'DTI vectors' in line: find_b = True else: pass diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py index e9dab240f6..9e49f2b326 100644 --- a/nipype/interfaces/dcmstack.py +++ b/nipype/interfaces/dcmstack.py @@ -19,10 +19,12 @@ import nibabel as nb import imghdr -from .base import (TraitedSpec, DynamicTraitedSpec, - InputMultiPath, File, Directory, - traits, BaseInterface) -from .traits_extension import isdefined, Undefined +from .base import ( + TraitedSpec, DynamicTraitedSpec, + InputMultiPath, File, Directory, + traits, BaseInterface, + isdefined, Undefined +) from ..utils import NUMPY_MMAP diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py index 3ba0beeafc..ead0e222c6 100644 --- a/nipype/interfaces/diffusion_toolkit/dti.py +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -129,7 +129,7 @@ class DTITrackerInputSpec(CommandLineInputSpec): angle_threshold_weight = traits.Float(desc="set angle threshold weighting factor. weighting will be be applied \ on top of the angle_threshold", argstr="-atw %f") random_seed = traits.Int(desc="use random location in a voxel instead of the center of the voxel \ - to seed. can also define number of seed per voxel. default is 1", argstr="-rseed") + to seed. can also define number of seed per voxel. default is 1", argstr="-rseed %d") invert_x = traits.Bool(desc="invert x component of the vector", argstr="-ix") invert_y = traits.Bool(desc="invert y component of the vector", argstr="-iy") invert_z = traits.Bool(desc="invert z component of the vector", argstr="-iz") diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py index 333578742e..569f1ac79f 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -20,7 +20,8 @@ def test_DTIRecon_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', @@ -36,7 +37,8 @@ def test_DTIRecon_inputs(): output_type=dict(argstr='-ot %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DTIRecon.input_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py index ea20252ae3..0a0a66ffb1 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -13,7 +13,8 @@ def test_DTITracker_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_data_prefix=dict(argstr='%s', @@ -47,7 +48,7 @@ def test_DTITracker_inputs(): ), primary_vector=dict(argstr='-%s', ), - random_seed=dict(argstr='-rseed', + random_seed=dict(argstr='-rseed %d', ), step_length=dict(argstr='-l %f', ), @@ -58,7 +59,8 @@ def test_DTITracker_inputs(): swap_zx=dict(argstr='-szx', ), tensor_file=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracking_method=dict(argstr='-%s', ), diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py index 699c5c920d..d85c9558de 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -15,7 +15,8 @@ def test_HARDIMat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_info=dict(argstr='-info %s', @@ -34,7 +35,8 @@ def test_HARDIMat_inputs(): ), reference_file=dict(argstr='-ref %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = HARDIMat.input_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py index 8fa38aab42..f4d116561e 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -17,7 +17,8 @@ def test_ODFRecon_inputs(): ), filter=dict(argstr='-f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', @@ -51,7 +52,8 @@ def test_ODFRecon_inputs(): ), subtract_background=dict(argstr='-bg', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ODFRecon.input_spec() diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py index 647cb3767e..931375abd1 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -17,7 +17,8 @@ def test_ODFTracker_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_orientation_vectors=dict(argstr='-iop %f', @@ -68,7 +69,8 @@ def test_ODFTracker_inputs(): ), swap_zx=dict(argstr='-szx', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_order=dict(argstr='-vorder %s', ), diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py index 0ce7d67281..7db4168340 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -9,7 +9,8 @@ def test_SplineFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_file=dict(argstr='%s', @@ -20,7 +21,8 @@ def test_SplineFilter_inputs(): mandatory=True, position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), track_file=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py index 296c311663..8a9a0d9201 100644 --- a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -9,14 +9,16 @@ def test_TrackMerge_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_file=dict(argstr='%s', position=-1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), track_files=dict(argstr='%s...', mandatory=True, diff --git a/nipype/interfaces/dipy/anisotropic_power.py b/nipype/interfaces/dipy/anisotropic_power.py index f1d41ab118..2a678dfd1f 100644 --- a/nipype/interfaces/dipy/anisotropic_power.py +++ b/nipype/interfaces/dipy/anisotropic_power.py @@ -67,7 +67,7 @@ def _run_interface(self, runtime): apm = shm.anisotropic_power(peaks.shm_coeff) out_file = self._gen_filename('apm') nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file) - IFLOGGER.info('APM qball image saved as {i}'.format(i=out_file)) + IFLOGGER.info('APM qball image saved as %s', out_file) return runtime diff --git a/nipype/interfaces/dipy/preprocess.py b/nipype/interfaces/dipy/preprocess.py index 19b76b800b..bfe197cae2 100644 --- a/nipype/interfaces/dipy/preprocess.py +++ b/nipype/interfaces/dipy/preprocess.py @@ -68,7 +68,7 @@ def _run_interface(self, runtime): resample_proxy(self.inputs.in_file, order=order, new_zooms=vox_size, out_file=out_file) - IFLOGGER.info('Resliced image saved as {i}'.format(i=out_file)) + IFLOGGER.info('Resliced image saved as %s', out_file) return runtime def _list_outputs(self): @@ -159,8 +159,8 @@ def _run_interface(self, runtime): smask=signal_mask, nmask=noise_mask, out_file=out_file) - IFLOGGER.info(('Denoised image saved as {i}, estimated ' - 'SNR={s}').format(i=out_file, s=str(s))) + IFLOGGER.info('Denoised image saved as %s, estimated SNR=%s', + out_file, str(s)) return runtime def _list_outputs(self): diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py index ee3fffce9a..d10e51dede 100644 --- a/nipype/interfaces/dipy/reconstruction.py +++ b/nipype/interfaces/dipy/reconstruction.py @@ -123,13 +123,12 @@ def _run_interface(self, runtime): sigma = mean_std * (1 + bias) if sigma == 0: - IFLOGGER.warn( - ('Noise std is 0.0, looks like data was masked and noise' - ' cannot be estimated correctly. Using default tensor ' - 'model instead of RESTORE.')) + IFLOGGER.warn('Noise std is 0.0, looks like data was masked and noise ' + 'cannot be estimated correctly. Using default tensor ' + 'model instead of RESTORE.') dti = TensorModel(gtab) else: - IFLOGGER.info(('Performing RESTORE with noise std=%.4f.') % sigma) + IFLOGGER.info('Performing RESTORE with noise std=%.4f.', sigma) dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma) try: @@ -252,14 +251,13 @@ def _run_interface(self, runtime): ratio = abs(response[1] / response[0]) if ratio > 0.25: - IFLOGGER.warn(('Estimated response is not prolate enough. ' - 'Ratio=%0.3f.') % ratio) + IFLOGGER.warn('Estimated response is not prolate enough. ' + 'Ratio=%0.3f.', ratio) elif ratio < 1.e-5 or np.any(np.isnan(response)): response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) - IFLOGGER.warn( - ('Estimated response is not valid, using a default one')) + IFLOGGER.warn('Estimated response is not valid, using a default one') else: - IFLOGGER.info(('Estimated response: %s') % str(response[:3])) + IFLOGGER.info('Estimated response: %s', str(response[:3])) np.savetxt(op.abspath(self.inputs.response), response) @@ -343,8 +341,8 @@ def _run_interface(self, runtime): ratio = response[0][1] / response[0][0] if abs(ratio - 0.2) > 0.1: - IFLOGGER.warn(('Estimated response is not prolate enough. ' - 'Ratio=%0.3f.') % ratio) + IFLOGGER.warn('Estimated response is not prolate enough. ' + 'Ratio=%0.3f.', ratio) csd_model = ConstrainedSphericalDeconvModel( gtab, response, sh_order=self.inputs.sh_order) diff --git a/nipype/interfaces/dipy/simulate.py b/nipype/interfaces/dipy/simulate.py index 0331171811..f008948c97 100644 --- a/nipype/interfaces/dipy/simulate.py +++ b/nipype/interfaces/dipy/simulate.py @@ -10,6 +10,7 @@ import os.path as op from builtins import range +import numpy as np import nibabel as nb from ... import logging @@ -227,8 +228,8 @@ def _run_interface(self, runtime): pool = Pool(processes=n_proc) # Simulate sticks using dipy - IFLOGGER.info(('Starting simulation of %d voxels, %d diffusion' - ' directions.') % (len(args), ndirs)) + IFLOGGER.info('Starting simulation of %d voxels, %d diffusion directions.', + len(args), ndirs) result = np.array(pool.map(_compute_voxel, args)) if np.shape(result)[1] != ndirs: raise RuntimeError(('Computed directions do not match number' @@ -288,7 +289,6 @@ def _compute_voxel(args): angles=args['sticks'], fractions=ffs, snr=snr) except Exception as e: pass - # IFLOGGER.warn('Exception simulating dwi signal: %s' % e) return signal.tolist() diff --git a/nipype/interfaces/dipy/tensors.py b/nipype/interfaces/dipy/tensors.py index 7d9ab3867f..e5518f4ea0 100644 --- a/nipype/interfaces/dipy/tensors.py +++ b/nipype/interfaces/dipy/tensors.py @@ -65,14 +65,14 @@ def _run_interface(self, runtime): img = nifti1_symmat(lower_triangular, affine) out_file = self._gen_filename('dti') nb.save(img, out_file) - IFLOGGER.info('DTI parameters image saved as {i}'.format(i=out_file)) + IFLOGGER.info('DTI parameters image saved as %s', out_file) #FA MD RD and AD for metric in ["fa", "md", "rd", "ad"]: data = getattr(ten_fit,metric).astype("float32") out_name = self._gen_filename(metric) nb.Nifti1Image(data, affine).to_filename(out_name) - IFLOGGER.info('DTI {metric} image saved as {i}'.format(i=out_name, metric=metric)) + IFLOGGER.info('DTI %s image saved as %s', metric, out_name) return runtime @@ -147,7 +147,7 @@ def _run_interface(self, runtime): img = nb.Nifti1Image(mode_data, affine) out_file = self._gen_filename('mode') nb.save(img, out_file) - IFLOGGER.info('Tensor mode image saved as {i}'.format(i=out_file)) + IFLOGGER.info('Tensor mode image saved as %s', out_file) return runtime def _list_outputs(self): diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py index 934bc3efff..33a5a512c7 100644 --- a/nipype/interfaces/dipy/tests/test_auto_APMQball.py +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -6,7 +6,8 @@ def test_APMQball_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py index 658294df02..bed97a5660 100644 --- a/nipype/interfaces/dipy/tests/test_auto_CSD.py +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -6,7 +6,8 @@ def test_CSD_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py index fddaeb3bcb..524072e1e5 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DTI.py +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -6,7 +6,8 @@ def test_DTI_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py index 5b98c1353d..3807c48139 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py @@ -4,7 +4,8 @@ def test_DipyBaseInterface_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py index 6bee2dde83..63fabe8f88 100644 --- a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -6,7 +6,8 @@ def test_DipyDiffusionInterface_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py index d03508c8ce..05f29f6dc3 100644 --- a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -10,7 +10,8 @@ def test_EstimateResponseSH_inputs(): ), fa_thresh=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py index 5458d5bb98..61e48c856a 100644 --- a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -6,7 +6,8 @@ def test_RESTORE_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py index a3d708fb71..546627441c 100644 --- a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -13,7 +13,8 @@ def test_SimulateMultiTensor_inputs(): diff_sf=dict(usedefault=True, ), gradients=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py index 80ef4ecab4..09c03dafbc 100644 --- a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -7,7 +7,8 @@ def test_StreamlineTractography_inputs(): input_map = dict(gfa_thresh=dict(mandatory=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py index 02000714a1..cb807e7ef8 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -6,7 +6,8 @@ def test_TensorMode_inputs(): input_map = dict(b0_thres=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py index 8308be79e8..2a352d6e36 100644 --- a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -5,7 +5,8 @@ def test_TrackDensityMap_inputs(): input_map = dict(data_dims=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py index 4a74b36b53..bd52fe937e 100644 --- a/nipype/interfaces/dipy/tracks.py +++ b/nipype/interfaces/dipy/tracks.py @@ -71,9 +71,8 @@ def _run_interface(self, runtime): data_dims = refnii.shape[:3] kwargs = dict(affine=affine) else: - IFLOGGER.warn( - 'voxel_dims and data_dims are deprecated as of dipy 0.7.1. Please use reference ' - 'input instead') + IFLOGGER.warn('voxel_dims and data_dims are deprecated as of dipy ' + '0.7.1. Please use reference input instead') if not isdefined(self.inputs.data_dims): data_dims = header['dim'] @@ -93,9 +92,8 @@ def _run_interface(self, runtime): out_file = op.abspath(self.inputs.out_filename) nb.save(img, out_file) - IFLOGGER.info( - 'Track density map saved as %s, size=%s, dimensions=%s', - out_file, img.shape, img.header.get_zooms()) + IFLOGGER.info('Track density map saved as %s, size=%s, dimensions=%s', + out_file, img.shape, img.header.get_zooms()) return runtime @@ -238,12 +236,12 @@ def _run_interface(self, runtime): seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T vseeds = seedps.shape[0] nsperv = (seeds // vseeds) + 1 - IFLOGGER.info(('Seed mask is provided (%d voxels inside ' - 'mask), computing seeds (%d seeds/voxel).') % - (vseeds, nsperv)) + IFLOGGER.info('Seed mask is provided (%d voxels inside ' + 'mask), computing seeds (%d seeds/voxel).', + vseeds, nsperv) if nsperv > 1: - IFLOGGER.info(('Needed %d seeds per selected voxel ' - '(total %d).') % (nsperv, vseeds)) + IFLOGGER.info('Needed %d seeds per selected voxel (total %d).', + nsperv, vseeds) seedps = np.vstack(np.array([seedps] * nsperv)) voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape) nseeds = voxcoord.shape[0] diff --git a/nipype/interfaces/dynamic_slicer.py b/nipype/interfaces/dynamic_slicer.py index d38f4171f3..4d1df1e136 100644 --- a/nipype/interfaces/dynamic_slicer.py +++ b/nipype/interfaces/dynamic_slicer.py @@ -25,7 +25,8 @@ class SlicerCommandLine(CommandLine): output_spec = DynamicTraitedSpec def _grab_xml(self, module): - cmd = CommandLine(command="Slicer3", args="--launch %s --xml" % module) + cmd = CommandLine(command="Slicer3", resource_monitor=False, + args="--launch %s --xml" % module) ret = cmd.run() if ret.runtime.returncode == 0: return xml.dom.minidom.parseString(ret.runtime.stdout) diff --git a/nipype/interfaces/elastix/base.py b/nipype/interfaces/elastix/base.py index afdb0a1ff4..746e571f3f 100644 --- a/nipype/interfaces/elastix/base.py +++ b/nipype/interfaces/elastix/base.py @@ -14,7 +14,7 @@ from ... import logging from ..base import CommandLineInputSpec, Directory, traits -logger = logging.getLogger('interface') +iflogger = logging.getLogger('interface') class ElastixBaseInputSpec(CommandLineInputSpec): diff --git a/nipype/interfaces/elastix/registration.py b/nipype/interfaces/elastix/registration.py index 205346ed80..5038447465 100644 --- a/nipype/interfaces/elastix/registration.py +++ b/nipype/interfaces/elastix/registration.py @@ -18,7 +18,7 @@ from .base import ElastixBaseInputSpec from ..base import CommandLine, TraitedSpec, File, traits, InputMultiPath -logger = logging.getLogger('interface') +iflogger = logging.getLogger('interface') class RegistrationInputSpec(ElastixBaseInputSpec): @@ -55,7 +55,7 @@ class Registration(CommandLine): >>> reg.inputs.fixed_image = 'fixed1.nii' >>> reg.inputs.moving_image = 'moving1.nii' >>> reg.inputs.parameters = ['elastix.txt'] - >>> reg.cmdline # doctest: +ALLOW_UNICODE + >>> reg.cmdline 'elastix -f fixed1.nii -m moving1.nii -out ./ -p elastix.txt' @@ -147,7 +147,7 @@ class ApplyWarp(CommandLine): >>> reg = ApplyWarp() >>> reg.inputs.moving_image = 'moving1.nii' >>> reg.inputs.transform_file = 'TransformParameters.0.txt' - >>> reg.cmdline # doctest: +ALLOW_UNICODE + >>> reg.cmdline 'transformix -in moving1.nii -out ./ -tp TransformParameters.0.txt' @@ -187,7 +187,7 @@ class AnalyzeWarp(CommandLine): >>> from nipype.interfaces.elastix import AnalyzeWarp >>> reg = AnalyzeWarp() >>> reg.inputs.transform_file = 'TransformParameters.0.txt' - >>> reg.cmdline # doctest: +ALLOW_UNICODE + >>> reg.cmdline 'transformix -def all -jac all -jacmat all -out ./ -tp TransformParameters.0.txt' @@ -228,7 +228,7 @@ class PointsWarp(CommandLine): >>> reg = PointsWarp() >>> reg.inputs.points_file = 'surf1.vtk' >>> reg.inputs.transform_file = 'TransformParameters.0.txt' - >>> reg.cmdline # doctest: +ALLOW_UNICODE + >>> reg.cmdline 'transformix -out ./ -def surf1.vtk -tp TransformParameters.0.txt' diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py index 1be5007b28..2f53e07c77 100644 --- a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -9,7 +9,8 @@ def test_AnalyzeWarp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(argstr='-threads %01d', @@ -19,7 +20,8 @@ def test_AnalyzeWarp_inputs(): mandatory=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_file=dict(argstr='-tp %s', mandatory=True, diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py index eb88b4c7e5..7b6913f96f 100644 --- a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -9,7 +9,8 @@ def test_ApplyWarp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), moving_image=dict(argstr='-in %s', @@ -22,7 +23,8 @@ def test_ApplyWarp_inputs(): mandatory=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_file=dict(argstr='-tp %s', mandatory=True, diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py index cd995b8aa2..58a7d72e01 100644 --- a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -4,7 +4,8 @@ def test_EditTransform_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interpolation=dict(argstr='FinalBSplineInterpolationOrder', diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py index 713f912ef7..3853fe0e8c 100644 --- a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -9,7 +9,8 @@ def test_PointsWarp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(argstr='-threads %01d', @@ -22,7 +23,8 @@ def test_PointsWarp_inputs(): points_file=dict(argstr='-def %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_file=dict(argstr='-tp %s', mandatory=True, diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py index b14af447c8..bf4c322e54 100644 --- a/nipype/interfaces/elastix/tests/test_auto_Registration.py +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -14,7 +14,8 @@ def test_Registration_inputs(): ), fixed_mask=dict(argstr='-fMask %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initial_transform=dict(argstr='-t0 %s', @@ -34,7 +35,8 @@ def test_Registration_inputs(): parameters=dict(argstr='-p %s...', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Registration.input_spec() diff --git a/nipype/interfaces/elastix/utils.py b/nipype/interfaces/elastix/utils.py index 42fab68377..718f5310fd 100644 --- a/nipype/interfaces/elastix/utils.py +++ b/nipype/interfaces/elastix/utils.py @@ -16,7 +16,7 @@ from ... import logging from ..base import (BaseInterface, BaseInterfaceInputSpec, isdefined, TraitedSpec, File, traits) -logger = logging.getLogger('interface') +iflogger = logging.getLogger('interface') class EditTransformInputSpec(BaseInterfaceInputSpec): diff --git a/nipype/interfaces/freesurfer/__init__.py b/nipype/interfaces/freesurfer/__init__.py index 7f72ff3f6c..46e2dc9c0d 100644 --- a/nipype/interfaces/freesurfer/__init__.py +++ b/nipype/interfaces/freesurfer/__init__.py @@ -24,4 +24,4 @@ RelabelHypointensities, Aparc2Aseg, Apas2Aseg, MRIsExpand, MRIsCombine) from .longitudinal import (RobustTemplate, FuseSegmentations) from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, Register, - Paint) + Paint, MRICoreg) diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py index c3107b299c..56a4d5df77 100644 --- a/nipype/interfaces/freesurfer/base.py +++ b/nipype/interfaces/freesurfer/base.py @@ -23,12 +23,13 @@ from ...utils.filemanip import fname_presuffix from ..base import (CommandLine, Directory, CommandLineInputSpec, isdefined, - traits, TraitedSpec, File) + traits, TraitedSpec, File, + PackageInfo) __docformat__ = 'restructuredtext' -class Info(object): +class Info(PackageInfo): """ Freesurfer subject directory and version information. Examples @@ -39,32 +40,13 @@ class Info(object): >>> Info.subjectsdir() # doctest: +SKIP """ + if os.getenv('FREESURFER_HOME'): + version_file = os.path.join(os.getenv('FREESURFER_HOME'), + 'build-stamp.txt') @staticmethod - def version(): - """Check for freesurfer version on system - - Find which freesurfer is being used....and get version from - /path/to/freesurfer/build-stamp.txt - - Returns - ------- - - version : string - version number as string - or None if freesurfer version not found - - """ - fs_home = os.getenv('FREESURFER_HOME') - if fs_home is None: - return None - versionfile = os.path.join(fs_home, 'build-stamp.txt') - if not os.path.exists(versionfile): - return None - fid = open(versionfile, 'rt') - version = fid.readline() - fid.close() - return version + def parse_version(raw_info): + return raw_info.splitlines()[0] @classmethod def looseversion(cls): @@ -232,23 +214,19 @@ def _associated_file(in_file, out_name): class FSScriptCommand(FSCommand): - """ Support for Freesurfer script commands with log inputs.terminal_output + """ Support for Freesurfer script commands with log terminal_output """ _terminal_output = 'file' _always_run = False - def __init__(self, **inputs): - super(FSScriptCommand, self).__init__(**inputs) - self.set_default_terminal_output(self._terminal_output) - def _list_outputs(self): outputs = self._outputs().get() - outputs['log_file'] = os.path.abspath('stdout.nipype') + outputs['log_file'] = os.path.abspath('output.nipype') return outputs class FSScriptOutputSpec(TraitedSpec): - log_file = File('stdout.nipype', usedefault=True, + log_file = File('output.nipype', usedefault=True, exists=True, desc="The output log") diff --git a/nipype/interfaces/freesurfer/longitudinal.py b/nipype/interfaces/freesurfer/longitudinal.py index 4b18602ff7..84559c90d9 100644 --- a/nipype/interfaces/freesurfer/longitudinal.py +++ b/nipype/interfaces/freesurfer/longitudinal.py @@ -13,55 +13,67 @@ from __future__ import print_function, division, unicode_literals, absolute_import import os -#import itertools from ... import logging from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined) -from .base import FSCommand, FSTraitedSpec +from .base import (FSCommand, FSTraitedSpec, FSCommandOpenMP, + FSTraitedSpecOpenMP) __docformat__ = 'restructuredtext' iflogger = logging.getLogger('interface') -class RobustTemplateInputSpec(FSTraitedSpec): +class RobustTemplateInputSpec(FSTraitedSpecOpenMP): # required - in_files = InputMultiPath(File(exists=True), mandatory=True, argstr='--mov %s', - desc='input movable volumes to be aligned to common mean/median template') + in_files = InputMultiPath( + File(exists=True), mandatory=True, argstr='--mov %s', + desc='input movable volumes to be aligned to common mean/median ' + 'template') out_file = File('mri_robust_template_out.mgz', mandatory=True, usedefault=True, argstr='--template %s', desc='output template volume (final mean/median image)') - auto_detect_sensitivity = traits.Bool(argstr='--satit', xor=['outlier_sensitivity'], mandatory=True, - desc='auto-detect good sensitivity (recommended for head or full brain scans)') - outlier_sensitivity = traits.Float(argstr='--sat %.4f', xor=['auto_detect_sensitivity'], mandatory=True, - desc='set outlier sensitivity manually (e.g. "--sat 4.685" ). Higher values mean ' + - 'less sensitivity.') + auto_detect_sensitivity = traits.Bool( + argstr='--satit', xor=['outlier_sensitivity'], mandatory=True, + desc='auto-detect good sensitivity (recommended for head or full ' + 'brain scans)') + outlier_sensitivity = traits.Float( + argstr='--sat %.4f', xor=['auto_detect_sensitivity'], mandatory=True, + desc='set outlier sensitivity manually (e.g. "--sat 4.685" ). Higher ' + 'values mean less sensitivity.') # optional - transform_outputs = InputMultiPath(File(exists=False), - argstr='--lta %s', - desc='output xforms to template (for each input)') - intensity_scaling = traits.Bool(default_value=False, - argstr='--iscale', - desc='allow also intensity scaling (default off)') - scaled_intensity_outputs = InputMultiPath(File(exists=False), - argstr='--iscaleout %s', - desc='final intensity scales (will activate --iscale)') - subsample_threshold = traits.Int(argstr='--subsample %d', - desc='subsample if dim > # on all axes (default no subs.)') - average_metric = traits.Enum('median', 'mean', argstr='--average %d', - desc='construct template from: 0 Mean, 1 Median (default)') - initial_timepoint = traits.Int(argstr='--inittp %d', - desc='use TP# for spacial init (default random), 0: no init') - fixed_timepoint = traits.Bool(default_value=False, argstr='--fixtp', - desc='map everthing to init TP# (init TP is not resampled)') - no_iteration = traits.Bool(default_value=False, argstr='--noit', - desc='do not iterate, just create first template') - initial_transforms = InputMultiPath(File(exists=True), - argstr='--ixforms %s', - desc='use initial transforms (lta) on source') - in_intensity_scales = InputMultiPath(File(exists=True), - argstr='--iscalein %s', - desc='use initial intensity scales') + transform_outputs = traits.Either( + InputMultiPath(File(exists=False)), traits.Bool, argstr='--lta %s', + desc='output xforms to template (for each input)') + intensity_scaling = traits.Bool( + default_value=False, argstr='--iscale', + desc='allow also intensity scaling (default off)') + scaled_intensity_outputs = traits.Either( + InputMultiPath(File(exists=False)), traits.Bool, + argstr='--iscaleout %s', + desc='final intensity scales (will activate --iscale)') + subsample_threshold = traits.Int( + argstr='--subsample %d', + desc='subsample if dim > # on all axes (default no subs.)') + average_metric = traits.Enum( + 'median', 'mean', argstr='--average %d', + desc='construct template from: 0 Mean, 1 Median (default)') + initial_timepoint = traits.Int( + argstr='--inittp %d', + desc='use TP# for spacial init (default random), 0: no init') + fixed_timepoint = traits.Bool( + default_value=False, argstr='--fixtp', + desc='map everthing to init TP# (init TP is not resampled)') + no_iteration = traits.Bool( + default_value=False, argstr='--noit', + desc='do not iterate, just create first template') + initial_transforms = InputMultiPath( + File(exists=True), argstr='--ixforms %s', + desc='use initial transforms (lta) on source') + in_intensity_scales = InputMultiPath( + File(exists=True), argstr='--iscalein %s', + desc='use initial intensity scales') + class RobustTemplateOutputSpec(TraitedSpec): out_file = File( @@ -72,7 +84,7 @@ class RobustTemplateOutputSpec(TraitedSpec): File(exists=True), desc="output final intensity scales") -class RobustTemplate(FSCommand): +class RobustTemplate(FSCommandOpenMP): """ construct an unbiased robust template for longitudinal volumes Examples @@ -86,16 +98,23 @@ class RobustTemplate(FSCommand): >>> template.inputs.fixed_timepoint = True >>> template.inputs.no_iteration = True >>> template.inputs.subsample_threshold = 200 - >>> template.cmdline #doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> template.cmdline #doctest: 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template mri_robust_template_out.mgz --subsample 200' >>> template.inputs.out_file = 'T1.nii' - >>> template.cmdline #doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> template.cmdline #doctest: 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --subsample 200' - >>> template.inputs.transform_outputs = ['structural.lta', 'functional.lta'] - >>> template.inputs.scaled_intensity_outputs = ['structural-iscale.txt', 'functional-iscale.txt'] - >>> template.cmdline #doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE - 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout structural-iscale.txt functional-iscale.txt --subsample 200 --lta structural.lta functional.lta' + >>> template.inputs.transform_outputs = ['structural.lta', + ... 'functional.lta'] + >>> template.inputs.scaled_intensity_outputs = ['structural-iscale.txt', + ... 'functional-iscale.txt'] + >>> template.cmdline #doctest: +ELLIPSIS + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../structural-iscale.txt .../functional-iscale.txt --subsample 200 --lta .../structural.lta .../functional.lta' + + >>> template.inputs.transform_outputs = True + >>> template.inputs.scaled_intensity_outputs = True + >>> template.cmdline #doctest: +ELLIPSIS + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../is1.txt .../is2.txt --subsample 200 --lta .../tp1.lta .../tp2.lta' >>> template.run() #doctest: +SKIP @@ -113,18 +132,28 @@ def _format_arg(self, name, spec, value): if name == 'average_metric': # return enumeration value return spec.argstr % {"mean": 0, "median": 1}[value] + if name in ('transform_outputs', 'scaled_intensity_outputs'): + value = self._list_outputs()[name] return super(RobustTemplate, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath( - self.inputs.out_file) + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + n_files = len(self.inputs.in_files) + fmt = '{}{:02d}.{}' if n_files > 9 else '{}{:d}.{}' if isdefined(self.inputs.transform_outputs): - outputs['transform_outputs'] = [os.path.abspath( - x) for x in self.inputs.transform_outputs] + fnames = self.inputs.transform_outputs + if fnames is True: + fnames = [fmt.format('tp', i + 1, 'lta') + for i in range(n_files)] + outputs['transform_outputs'] = [os.path.abspath(x) for x in fnames] if isdefined(self.inputs.scaled_intensity_outputs): - outputs['scaled_intensity_outputs'] = [os.path.abspath( - x) for x in self.inputs.scaled_intensity_outputs] + fnames = self.inputs.scaled_intensity_outputs + if fnames is True: + fnames = [fmt.format('is', i + 1, 'txt') + for i in range(n_files)] + outputs['scaled_intensity_outputs'] = [os.path.abspath(x) + for x in fnames] return outputs @@ -151,9 +180,11 @@ class FuseSegmentationsInputSpec(FSTraitedSpec): must include the corresponding norm file for all given timepoints \ as well as for the current subject") + class FuseSegmentationsOutputSpec(TraitedSpec): out_file = File(exists=False, desc="output fused segmentation file") + class FuseSegmentations(FSCommand): """ fuse segmentations together from multiple timepoints @@ -168,7 +199,7 @@ class FuseSegmentations(FSCommand): >>> fuse.inputs.in_segmentations = ['aseg.mgz', 'aseg.mgz'] >>> fuse.inputs.in_segmentations_noCC = ['aseg.mgz', 'aseg.mgz'] >>> fuse.inputs.in_norms = ['norm.mgz', 'norm.mgz', 'norm.mgz'] - >>> fuse.cmdline # doctest: +ALLOW_UNICODE + >>> fuse.cmdline 'mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2' """ diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py index 007d30ac3c..e2eba23196 100644 --- a/nipype/interfaces/freesurfer/model.py +++ b/nipype/interfaces/freesurfer/model.py @@ -91,7 +91,7 @@ class MRISPreproc(FSCommand): >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \ ('cont1a.nii', 'register.dat')] >>> preproc.inputs.out_file = 'concatenated_file.mgz' - >>> preproc.cmdline # doctest: +ALLOW_UNICODE + >>> preproc.cmdline 'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' """ @@ -148,7 +148,7 @@ class MRISPreprocReconAll(MRISPreproc): >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \ ('cont1a.nii', 'register.dat')] >>> preproc.inputs.out_file = 'concatenated_file.mgz' - >>> preproc.cmdline # doctest: +ALLOW_UNICODE + >>> preproc.cmdline 'mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' """ @@ -486,7 +486,7 @@ class Binarize(FSCommand): -------- >>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii') - >>> binvol.cmdline # doctest: +ALLOW_UNICODE + >>> binvol.cmdline 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' """ @@ -595,7 +595,7 @@ class Concatenate(FSCommand): >>> concat = Concatenate() >>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii'] >>> concat.inputs.concatenated_file = 'bar.nii' - >>> concat.cmdline # doctest: +ALLOW_UNICODE + >>> concat.cmdline 'mri_concat --o bar.nii --i cont1.nii --i cont2.nii' """ @@ -719,7 +719,7 @@ class SegStats(FSCommand): >>> ss.inputs.subjects_dir = '.' >>> ss.inputs.avgwf_txt_file = 'avgwf.txt' >>> ss.inputs.summary_file = 'summary.stats' - >>> ss.cmdline # doctest: +ALLOW_UNICODE + >>> ss.cmdline 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats' """ @@ -841,7 +841,7 @@ class SegStatsReconAll(SegStats): >>> segstatsreconall.inputs.total_gray = True >>> segstatsreconall.inputs.euler = True >>> segstatsreconall.inputs.exclude_id = 0 - >>> segstatsreconall.cmdline # doctest: +ALLOW_UNICODE + >>> segstatsreconall.cmdline 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats' """ input_spec = SegStatsReconAllInputSpec @@ -953,7 +953,7 @@ class Label2Vol(FSCommand): -------- >>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii') - >>> binvol.cmdline # doctest: +ALLOW_UNICODE + >>> binvol.cmdline 'mri_label2vol --fillthresh 0 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' """ @@ -1032,7 +1032,7 @@ class MS_LDA(FSCommand): shift=zero_value, vol_synth_file='synth_out.mgz', \ conform=True, use_weights=True, \ images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz']) - >>> optimalWeights.cmdline # doctest: +ALLOW_UNICODE + >>> optimalWeights.cmdline 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' """ @@ -1124,7 +1124,7 @@ class Label2Label(FSCommand): >>> l2l.inputs.source_label = 'lh-pial.stl' >>> l2l.inputs.source_white = 'lh.pial' >>> l2l.inputs.source_sphere_reg = 'lh.pial' - >>> l2l.cmdline # doctest: +ALLOW_UNICODE + >>> l2l.cmdline 'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335' """ @@ -1208,7 +1208,7 @@ class Label2Annot(FSCommand): >>> l2a.inputs.in_labels = ['lh.aparc.label'] >>> l2a.inputs.orig = 'lh.pial' >>> l2a.inputs.out_annot = 'test' - >>> l2a.cmdline # doctest: +ALLOW_UNICODE + >>> l2a.cmdline 'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335' """ @@ -1289,7 +1289,7 @@ class SphericalAverage(FSCommand): >>> sphericalavg.inputs.subject_id = '10335' >>> sphericalavg.inputs.erode = 2 >>> sphericalavg.inputs.threshold = 5 - >>> sphericalavg.cmdline # doctest: +ALLOW_UNICODE + >>> sphericalavg.cmdline 'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out' """ diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 4e164c342d..1d209c3022 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -67,7 +67,7 @@ class ParseDICOMDir(FSCommand): >>> dcminfo.inputs.dicom_dir = '.' >>> dcminfo.inputs.sortbyrun = True >>> dcminfo.inputs.summarize = True - >>> dcminfo.cmdline # doctest: +ALLOW_UNICODE + >>> dcminfo.cmdline 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' """ @@ -131,7 +131,7 @@ class UnpackSDICOMDir(FSCommand): >>> unpack.inputs.output_dir = '.' >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct') >>> unpack.inputs.dir_structure = 'generic' - >>> unpack.cmdline # doctest: +ALLOW_UNICODE + >>> unpack.cmdline 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' """ _cmd = 'unpacksdcmdir' @@ -353,7 +353,7 @@ class MRIConvert(FSCommand): >>> mc.inputs.in_file = 'structural.nii' >>> mc.inputs.out_file = 'outfile.mgz' >>> mc.inputs.out_type = 'mgz' - >>> mc.cmdline # doctest: +ALLOW_UNICODE + >>> mc.cmdline 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' """ @@ -579,7 +579,7 @@ class Resample(FSCommand): >>> resampler.inputs.in_file = 'structural.nii' >>> resampler.inputs.resampled_file = 'resampled.nii' >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1) - >>> resampler.cmdline # doctest: +ALLOW_UNICODE + >>> resampler.cmdline 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii' """ @@ -628,10 +628,17 @@ class ReconAllInputSpec(CommandLineInputSpec): argstr="-hemi %s") T1_files = InputMultiPath(File(exists=True), argstr='-i %s...', desc='name of T1 file to process') - T2_file = File(exists=True, argstr="-T2 %s", min_ver='5.3.0', + T2_file = File(exists=True, argstr="-T2 %s", + min_ver='5.3.0', desc='Convert T2 image to orig directory') - use_T2 = traits.Bool(argstr="-T2pial", min_ver='5.3.0', - desc='Use converted T2 to refine the cortical surface') + FLAIR_file = File(exists=True, argstr="-FLAIR %s", + min_ver='5.3.0', + desc='Convert FLAIR image to orig directory') + use_T2 = traits.Bool(argstr="-T2pial", min_ver='5.3.0', xor=['use_FLAIR'], + desc='Use T2 image to refine the pial surface') + use_FLAIR = traits.Bool(argstr="-FLAIRpial", + min_ver='5.3.0', xor=['use_T2'], + desc='Use FLAIR image to refine the pial surface') openmp = traits.Int(argstr="-openmp %d", desc="Number of processors to use in parallel") parallel = traits.Bool(argstr="-parallel", @@ -709,27 +716,27 @@ class ReconAll(CommandLine): >>> reconall.inputs.directive = 'all' >>> reconall.inputs.subjects_dir = '.' >>> reconall.inputs.T1_files = 'structural.nii' - >>> reconall.cmdline # doctest: +ALLOW_UNICODE + >>> reconall.cmdline 'recon-all -all -i structural.nii -subjid foo -sd .' >>> reconall.inputs.flags = "-qcache" - >>> reconall.cmdline # doctest: +ALLOW_UNICODE + >>> reconall.cmdline 'recon-all -all -i structural.nii -qcache -subjid foo -sd .' >>> reconall.inputs.flags = ["-cw256", "-qcache"] - >>> reconall.cmdline # doctest: +ALLOW_UNICODE + >>> reconall.cmdline 'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .' Hemisphere may be specified regardless of directive: >>> reconall.inputs.flags = [] >>> reconall.inputs.hemi = 'lh' - >>> reconall.cmdline # doctest: +ALLOW_UNICODE + >>> reconall.cmdline 'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .' ``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere to operate upon: >>> reconall.inputs.directive = 'autorecon-hemi' - >>> reconall.cmdline # doctest: +ALLOW_UNICODE + >>> reconall.cmdline 'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .' Hippocampal subfields can accept T1 and T2 images: @@ -740,14 +747,14 @@ class ReconAll(CommandLine): >>> reconall_subfields.inputs.subjects_dir = '.' >>> reconall_subfields.inputs.T1_files = 'structural.nii' >>> reconall_subfields.inputs.hippocampal_subfields_T1 = True - >>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE + >>> reconall_subfields.cmdline 'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .' >>> reconall_subfields.inputs.hippocampal_subfields_T2 = ( ... 'structural.nii', 'test') - >>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE + >>> reconall_subfields.cmdline 'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .' >>> reconall_subfields.inputs.hippocampal_subfields_T1 = False - >>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE + >>> reconall_subfields.cmdline 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .' """ @@ -1079,7 +1086,7 @@ def cmdline(self): return "echo recon-all: nothing to do" cmd += ' ' + ' '.join(flags) - iflogger.info('resume recon-all : %s' % cmd) + iflogger.info('resume recon-all : %s', cmd) return cmd def _prep_expert_file(self): @@ -1159,6 +1166,8 @@ class BBRegisterInputSpec(FSTraitedSpec): desc="write the transformation matrix in LTA format") registered_file = traits.Either(traits.Bool, File, argstr='--o %s', desc='output warped sourcefile either True or filename') + init_cost_file = traits.Either(traits.Bool, File, argstr='--initcost %s', + desc='output initial registration cost file') class BBRegisterInputSpec6(BBRegisterInputSpec): @@ -1172,10 +1181,11 @@ class BBRegisterInputSpec6(BBRegisterInputSpec): class BBRegisterOutputSpec(TraitedSpec): out_reg_file = File(exists=True, desc='Output registration file') - out_fsl_file = File(desc='Output FLIRT-style registration file') - out_lta_file = File(desc='Output LTA-style registration file') + out_fsl_file = File(exists=True, desc='Output FLIRT-style registration file') + out_lta_file = File(exists=True, desc='Output LTA-style registration file') min_cost_file = File(exists=True, desc='Output registration minimum cost file') - registered_file = File(desc='Registered and resampled source file') + init_cost_file = File(exists=True, desc='Output initial registration cost file') + registered_file = File(exists=True, desc='Registered and resampled source file') class BBRegister(FSCommand): @@ -1190,7 +1200,7 @@ class BBRegister(FSCommand): >>> from nipype.interfaces.freesurfer import BBRegister >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2') - >>> bbreg.cmdline # doctest: +ALLOW_UNICODE + >>> bbreg.cmdline 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me' """ @@ -1223,7 +1233,7 @@ def _list_outputs(self): outputs['registered_file'] = op.abspath(_in.registered_file) if isdefined(_in.out_lta_file): - if isinstance(_in.out_fsl_file, bool): + if isinstance(_in.out_lta_file, bool): suffix = '_bbreg_%s.lta' % _in.subject_id out_lta_file = fname_presuffix(_in.source_file, suffix=suffix, @@ -1242,17 +1252,19 @@ def _list_outputs(self): else: outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file) + if isdefined(_in.init_cost_file): + if isinstance(_in.out_fsl_file, bool): + outputs['init_cost_file'] = outputs['out_reg_file'] + '.initcost' + else: + outputs['init_cost_file'] = op.abspath(_in.init_cost_file) + outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost' return outputs def _format_arg(self, name, spec, value): - - if name in ['registered_file', 'out_fsl_file', 'out_lta_file']: - if isinstance(value, bool): - fname = self._list_outputs()[name] - else: - fname = value - return spec.argstr % fname + if name in ('registered_file', 'out_fsl_file', 'out_lta_file', + 'init_cost_file') and isinstance(value, bool): + value = self._list_outputs()[name] return super(BBRegister, self)._format_arg(name, spec, value) def _gen_filename(self, name): @@ -1277,7 +1289,15 @@ class ApplyVolTransformInputSpec(FSTraitedSpec): fs_target = traits.Bool(argstr='--fstarg', xor=_targ_xor, mandatory=True, requires=['reg_file'], desc='use orig.mgz from subject in regfile as target') - _reg_xor = ('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject') + _reg_xor = ('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', + 'reg_header', 'mni_152_reg', 'subject') + reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s', + mandatory=True, + desc='tkRAS-to-tkRAS matrix (tkregister2 format)') + lta_file = File(exists=True, xor=_reg_xor, argstr='--lta %s', + mandatory=True, desc='Linear Transform Array file') + lta_inv_file = File(exists=True, xor=_reg_xor, argstr='--lta-inv %s', + mandatory=True, desc='LTA, invert') reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s', mandatory=True, desc='tkRAS-to-tkRAS matrix (tkregister2 format)') @@ -1290,8 +1310,9 @@ class ApplyVolTransformInputSpec(FSTraitedSpec): reg_header = traits.Bool(xor=_reg_xor, argstr='--regheader', mandatory=True, desc='ScannerRAS-to-ScannerRAS matrix = identity') - subject = traits.Str(xor=_reg_xor, argstr='--s %s', - mandatory=True, + mni_152_reg = traits.Bool(xor=_reg_xor, argstr='--regheader', mandatory=True, + desc='target MNI152 space') + subject = traits.Str(xor=_reg_xor, argstr='--s %s', mandatory=True, desc='set matrix = identity and use subject for any templates') inverse = traits.Bool(desc='sample from target to source', argstr='--inv') @@ -1337,7 +1358,7 @@ class ApplyVolTransform(FSCommand): >>> applyreg.inputs.reg_file = 'register.dat' >>> applyreg.inputs.transformed_file = 'struct_warped.nii' >>> applyreg.inputs.fs_target = True - >>> applyreg.cmdline # doctest: +ALLOW_UNICODE + >>> applyreg.cmdline 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' """ @@ -1417,7 +1438,7 @@ class Smooth(FSCommand): >>> from nipype.interfaces.freesurfer import Smooth >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6) - >>> smoothvol.cmdline # doctest: +ALLOW_UNICODE + >>> smoothvol.cmdline 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000' """ @@ -1443,76 +1464,102 @@ def _gen_filename(self, name): class RobustRegisterInputSpec(FSTraitedSpec): - source_file = File(mandatory=True, argstr='--mov %s', + source_file = File(exists=True, mandatory=True, argstr='--mov %s', desc='volume to be registered') - target_file = File(mandatory=True, argstr='--dst %s', + target_file = File(exists=True, mandatory=True, argstr='--dst %s', desc='target volume for the registration') - out_reg_file = File(genfile=True, argstr='--lta %s', - desc='registration file to write') - registered_file = traits.Either(traits.Bool, File, argstr='--warp %s', - desc='registered image; either True or filename') - weights_file = traits.Either(traits.Bool, File, argstr='--weights %s', - desc='weights image to write; either True or filename') - est_int_scale = traits.Bool(argstr='--iscale', - desc='estimate intensity scale (recommended for unnormalized images)') + out_reg_file = traits.Either( + True, File, default=True, usedefault=True, argstr='--lta %s', + desc='registration file; either True or filename') + registered_file = traits.Either( + traits.Bool, File, argstr='--warp %s', + desc='registered image; either True or filename') + weights_file = traits.Either( + traits.Bool, File, argstr='--weights %s', + desc='weights image to write; either True or filename') + est_int_scale = traits.Bool( + argstr='--iscale', + desc='estimate intensity scale (recommended for unnormalized images)') trans_only = traits.Bool(argstr='--transonly', desc='find 3 parameter translation only') in_xfm_file = File(exists=True, argstr='--transform', desc='use initial transform on source') - half_source = traits.Either(traits.Bool, File, argstr='--halfmov %s', - desc="write source volume mapped to halfway space") - half_targ = traits.Either(traits.Bool, File, argstr="--halfdst %s", - desc="write target volume mapped to halfway space") - half_weights = traits.Either(traits.Bool, File, argstr="--halfweights %s", - desc="write weights volume mapped to halfway space") - half_source_xfm = traits.Either(traits.Bool, File, argstr="--halfmovlta %s", - desc="write transform from source to halfway space") - half_targ_xfm = traits.Either(traits.Bool, File, argstr="--halfdstlta %s", - desc="write transform from target to halfway space") - auto_sens = traits.Bool(argstr='--satit', xor=['outlier_sens'], mandatory=True, - desc='auto-detect good sensitivity') - outlier_sens = traits.Float(argstr='--sat %.4f', xor=['auto_sens'], mandatory=True, - desc='set outlier sensitivity explicitly') - least_squares = traits.Bool(argstr='--leastsquares', - desc='use least squares instead of robust estimator') + half_source = traits.Either( + traits.Bool, File, argstr='--halfmov %s', + desc="write source volume mapped to halfway space") + half_targ = traits.Either( + traits.Bool, File, argstr="--halfdst %s", + desc="write target volume mapped to halfway space") + half_weights = traits.Either( + traits.Bool, File, argstr="--halfweights %s", + desc="write weights volume mapped to halfway space") + half_source_xfm = traits.Either( + traits.Bool, File, argstr="--halfmovlta %s", + desc="write transform from source to halfway space") + half_targ_xfm = traits.Either( + traits.Bool, File, argstr="--halfdstlta %s", + desc="write transform from target to halfway space") + auto_sens = traits.Bool( + argstr='--satit', xor=['outlier_sens'], mandatory=True, + desc='auto-detect good sensitivity') + outlier_sens = traits.Float( + argstr='--sat %.4f', xor=['auto_sens'], mandatory=True, + desc='set outlier sensitivity explicitly') + least_squares = traits.Bool( + argstr='--leastsquares', + desc='use least squares instead of robust estimator') no_init = traits.Bool(argstr='--noinit', desc='skip transform init') - init_orient = traits.Bool(argstr='--initorient', - desc='use moments for initial orient (recommended for stripped brains)') + init_orient = traits.Bool( + argstr='--initorient', + desc='use moments for initial orient (recommended for stripped brains)' + ) max_iterations = traits.Int(argstr='--maxit %d', desc='maximum # of times on each resolution') high_iterations = traits.Int(argstr='--highit %d', desc='max # of times on highest resolution') - iteration_thresh = traits.Float(argstr='--epsit %.3f', - desc='stop iterations when below threshold') - subsample_thresh = traits.Int(argstr='--subsample %d', - desc='subsample if dimension is above threshold size') + iteration_thresh = traits.Float( + argstr='--epsit %.3f', desc='stop iterations when below threshold') + subsample_thresh = traits.Int( + argstr='--subsample %d', + desc='subsample if dimension is above threshold size') outlier_limit = traits.Float(argstr='--wlimit %.3f', desc='set maximal outlier limit in satit') - write_vo2vox = traits.Bool(argstr='--vox2vox', - desc='output vox2vox matrix (default is RAS2RAS)') - no_multi = traits.Bool(argstr='--nomulti', desc='work on highest resolution') + write_vo2vox = traits.Bool( + argstr='--vox2vox', desc='output vox2vox matrix (default is RAS2RAS)') + no_multi = traits.Bool(argstr='--nomulti', + desc='work on highest resolution') mask_source = File(exists=True, argstr='--maskmov %s', desc='image to mask source volume with') mask_target = File(exists=True, argstr='--maskdst %s', desc='image to mask target volume with') - force_double = traits.Bool(argstr='--doubleprec', desc='use double-precision intensities') - force_float = traits.Bool(argstr='--floattype', desc='use float intensities') + force_double = traits.Bool(argstr='--doubleprec', + desc='use double-precision intensities') + force_float = traits.Bool(argstr='--floattype', + desc='use float intensities') class RobustRegisterOutputSpec(TraitedSpec): out_reg_file = File(exists=True, desc="output registration file") - registered_file = File(desc="output image with registration applied") - weights_file = File(desc="image of weights used") - half_source = File(desc="source image mapped to halfway space") - half_targ = File(desc="target image mapped to halfway space") - half_weights = File(desc="weights image mapped to halfway space") - half_source_xfm = File(desc="transform file to map source image to halfway space") - half_targ_xfm = File(desc="transform file to map target image to halfway space") + registered_file = File(exists=True, + desc="output image with registration applied") + weights_file = File(exists=True, desc="image of weights used") + half_source = File(exists=True, + desc="source image mapped to halfway space") + half_targ = File(exists=True, desc="target image mapped to halfway space") + half_weights = File(exists=True, + desc="weights image mapped to halfway space") + half_source_xfm = File( + exists=True, + desc="transform file to map source image to halfway space") + half_targ_xfm = File( + exists=True, + desc="transform file to map target image to halfway space") class RobustRegister(FSCommand): - """Perform intramodal linear registration (translation and rotation) using robust statistics. + """Perform intramodal linear registration (translation and rotation) using + robust statistics. Examples -------- @@ -1522,13 +1569,13 @@ class RobustRegister(FSCommand): >>> reg.inputs.target_file = 'T1.nii' >>> reg.inputs.auto_sens = True >>> reg.inputs.init_orient = True - >>> reg.cmdline # doctest: +ALLOW_UNICODE - 'mri_robust_register --satit --initorient --lta structural_robustreg.lta --mov structural.nii --dst T1.nii' + >>> reg.cmdline # doctest: +ELLIPSIS + 'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii' References ---------- - Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse Consistent Registration: - A Robust Approach. Neuroimage 53(4) 1181-96. + Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse + Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96. """ @@ -1537,24 +1584,20 @@ class RobustRegister(FSCommand): output_spec = RobustRegisterOutputSpec def _format_arg(self, name, spec, value): - for option in ["registered_file", "weights_file", "half_source", "half_targ", - "half_weights", "half_source_xfm", "half_targ_xfm"]: - if name == option: - if isinstance(value, bool): - fname = self._list_outputs()[name] - else: - fname = value - return spec.argstr % fname + options = ("out_reg_file", "registered_file", "weights_file", + "half_source", "half_targ", "half_weights", + "half_source_xfm", "half_targ_xfm") + if name in options and isinstance(value, bool): + value = self._list_outputs()[name] return super(RobustRegister, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_reg_file'] = self.inputs.out_reg_file - if not isdefined(self.inputs.out_reg_file) and self.inputs.source_file: - outputs['out_reg_file'] = fname_presuffix(self.inputs.source_file, - suffix='_robustreg.lta', use_ext=False) - prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file) - suffices = dict(registered_file=("src", "_robustreg", True), + cwd = os.getcwd() + prefices = dict(src=self.inputs.source_file, + trg=self.inputs.target_file) + suffices = dict(out_reg_file=("src", "_robustreg.lta", False), + registered_file=("src", "_robustreg", True), weights_file=("src", "_robustweights", True), half_source=("src", "_halfway", True), half_targ=("trg", "_halfway", True), @@ -1563,21 +1606,16 @@ def _list_outputs(self): half_targ_xfm=("trg", "_robustxfm.lta", False)) for name, sufftup in list(suffices.items()): value = getattr(self.inputs, name) - if isdefined(value): - if isinstance(value, bool): + if value: + if value is True: outputs[name] = fname_presuffix(prefices[sufftup[0]], suffix=sufftup[1], - newpath=os.getcwd(), + newpath=cwd, use_ext=sufftup[2]) else: - outputs[name] = value + outputs[name] = os.path.abspath(value) return outputs - def _gen_filename(self, name): - if name == 'out_reg_file': - return self._list_outputs()[name] - return None - class FitMSParamsInputSpec(FSTraitedSpec): @@ -1608,7 +1646,7 @@ class FitMSParams(FSCommand): >>> msfit = FitMSParams() >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz'] >>> msfit.inputs.out_dir = 'flash_parameters' - >>> msfit.cmdline # doctest: +ALLOW_UNICODE + >>> msfit.cmdline 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' """ @@ -1681,7 +1719,7 @@ class SynthesizeFLASH(FSCommand): >>> syn.inputs.t1_image = 'T1.mgz' >>> syn.inputs.pd_image = 'PD.mgz' >>> syn.inputs.out_file = 'flash_30syn.mgz' - >>> syn.cmdline # doctest: +ALLOW_UNICODE + >>> syn.cmdline 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' """ @@ -1754,7 +1792,7 @@ class MNIBiasCorrection(FSCommand): >>> correct.inputs.iterations = 6 >>> correct.inputs.protocol_iterations = 1000 >>> correct.inputs.distance = 50 - >>> correct.cmdline # doctest: +ALLOW_UNICODE + >>> correct.cmdline 'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000' References: @@ -1811,7 +1849,7 @@ class WatershedSkullStrip(FSCommand): >>> skullstrip.inputs.t1 = True >>> skullstrip.inputs.transform = "transforms/talairach_with_skull.lta" >>> skullstrip.inputs.out_file = "brainmask.auto.mgz" - >>> skullstrip.cmdline # doctest: +ALLOW_UNICODE + >>> skullstrip.cmdline 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz' """ _cmd = 'mri_watershed' @@ -1859,7 +1897,7 @@ class Normalize(FSCommand): >>> normalize = freesurfer.Normalize() >>> normalize.inputs.in_file = "T1.mgz" >>> normalize.inputs.gradient = 1 - >>> normalize.cmdline # doctest: +ALLOW_UNICODE + >>> normalize.cmdline 'mri_normalize -g 1 T1.mgz T1_norm.mgz' """ _cmd = "mri_normalize" @@ -1911,7 +1949,7 @@ class CANormalize(FSCommand): >>> ca_normalize.inputs.in_file = "T1.mgz" >>> ca_normalize.inputs.atlas = "atlas.nii.gz" # in practice use .gca atlases >>> ca_normalize.inputs.transform = "trans.mat" # in practice use .lta transforms - >>> ca_normalize.cmdline # doctest: +ALLOW_UNICODE + >>> ca_normalize.cmdline 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz' """ _cmd = "mri_ca_normalize" @@ -1969,7 +2007,7 @@ class CARegister(FSCommandOpenMP): >>> ca_register = freesurfer.CARegister() >>> ca_register.inputs.in_file = "norm.mgz" >>> ca_register.inputs.out_file = "talairach.m3z" - >>> ca_register.cmdline # doctest: +ALLOW_UNICODE + >>> ca_register.cmdline 'mri_ca_register norm.mgz talairach.m3z' """ _cmd = "mri_ca_register" @@ -2040,7 +2078,7 @@ class CALabel(FSCommandOpenMP): >>> ca_label.inputs.out_file = "out.mgz" >>> ca_label.inputs.transform = "trans.mat" >>> ca_label.inputs.template = "Template_6.nii" # in practice use .gcs extension - >>> ca_label.cmdline # doctest: +ALLOW_UNICODE + >>> ca_label.cmdline 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz' """ _cmd = "mri_ca_label" @@ -2114,7 +2152,7 @@ class MRIsCALabel(FSCommandOpenMP): >>> ca_label.inputs.sulc = "lh.pial" >>> ca_label.inputs.classifier = "im1.nii" # in pracice, use .gcs extension >>> ca_label.inputs.smoothwm = "lh.pial" - >>> ca_label.cmdline # doctest: +ALLOW_UNICODE + >>> ca_label.cmdline 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot' """ _cmd = "mris_ca_label" @@ -2200,7 +2238,7 @@ class SegmentCC(FSCommand): >>> SegmentCC_node.inputs.in_norm = "norm.mgz" >>> SegmentCC_node.inputs.out_rotation = "cc.lta" >>> SegmentCC_node.inputs.subject_id = "test" - >>> SegmentCC_node.cmdline # doctest: +ALLOW_UNICODE + >>> SegmentCC_node.cmdline 'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test' """ @@ -2291,7 +2329,7 @@ class SegmentWM(FSCommand): >>> SegmentWM_node = freesurfer.SegmentWM() >>> SegmentWM_node.inputs.in_file = "norm.mgz" >>> SegmentWM_node.inputs.out_file = "wm.seg.mgz" - >>> SegmentWM_node.cmdline # doctest: +ALLOW_UNICODE + >>> SegmentWM_node.cmdline 'mri_segment norm.mgz wm.seg.mgz' """ @@ -2335,7 +2373,7 @@ class EditWMwithAseg(FSCommand): >>> editwm.inputs.seg_file = "aseg.mgz" >>> editwm.inputs.out_file = "wm.asegedit.mgz" >>> editwm.inputs.keep_in = True - >>> editwm.cmdline # doctest: +ALLOW_UNICODE + >>> editwm.cmdline 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz' """ _cmd = 'mri_edit_wm_with_aseg' @@ -2351,13 +2389,38 @@ def _list_outputs(self): class ConcatenateLTAInputSpec(FSTraitedSpec): # required in_lta1 = File(exists=True, mandatory=True, argstr='%s', position=-3, - desc="maps some src1 to dst1") - in_lta2 = File(exists=True, mandatory=True, argstr='%s', position=-2, - desc="maps dst1(src2) to dst2") - out_file = File(exists=False, position=-1, argstr='%s', - name_source=['in_lta1'], name_template='%s-long', - hash_files=False, keep_extension=True, - desc="the combined LTA maps: src1 to dst2 = LTA2*LTA1") + desc='maps some src1 to dst1') + in_lta2 = traits.Either( + File(exists=True), 'identity.nofile', argstr='%s', position=-2, + mandatory=True, desc='maps dst1(src2) to dst2') + out_file = File( + position=-1, argstr='%s', hash_files=False, name_source=['in_lta1'], + name_template='%s_concat', keep_extension=True, + desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1') + + # Inversion and transform type + invert_1 = traits.Bool(argstr='-invert1', + desc='invert in_lta1 before applying it') + invert_2 = traits.Bool(argstr='-invert2', + desc='invert in_lta2 before applying it') + invert_out = traits.Bool(argstr='-invertout', + desc='invert output LTA') + out_type = traits.Enum('VOX2VOX', 'RAS2RAS', argstr='-out_type %d', + desc='set final LTA type') + + # Talairach options + tal_source_file = traits.File( + exists=True, argstr='-tal %s', position=-5, + requires=['tal_template_file'], + desc='if in_lta2 is talairach.xfm, specify source for talairach') + tal_template_file = traits.File( + exists=True, argstr='%s', position=-4, requires=['tal_source_file'], + desc='if in_lta2 is talairach.xfm, specify template for talairach') + + subject = traits.Str(argstr='-subject %s', + desc='set subject in output LTA') + # Note rmsdiff would be xor out_file, and would be most easily dealt with + # in a new interface. -CJM 2017.10.05 class ConcatenateLTAOutputSpec(TraitedSpec): @@ -2366,24 +2429,40 @@ class ConcatenateLTAOutputSpec(TraitedSpec): class ConcatenateLTA(FSCommand): - """concatenates two consecutive LTA transformations - into one overall transformation, Out = LTA2*LTA1 + """ Concatenates two consecutive LTA transformations into one overall + transformation + + Out = LTA2*LTA1 Examples -------- >>> from nipype.interfaces.freesurfer import ConcatenateLTA >>> conc_lta = ConcatenateLTA() - >>> conc_lta.inputs.in_lta1 = 'trans.mat' - >>> conc_lta.inputs.in_lta2 = 'trans.mat' - >>> conc_lta.cmdline # doctest: +ALLOW_UNICODE - 'mri_concatenate_lta trans.mat trans.mat trans-long.mat' + >>> conc_lta.inputs.in_lta1 = 'lta1.lta' + >>> conc_lta.inputs.in_lta2 = 'lta2.lta' + >>> conc_lta.cmdline + 'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta' + + You can use 'identity.nofile' as the filename for in_lta2, e.g.: + + >>> conc_lta.inputs.in_lta2 = 'identity.nofile' + >>> conc_lta.inputs.invert_1 = True + >>> conc_lta.inputs.out_file = 'inv1.lta' + >>> conc_lta.cmdline + 'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta' + + To create a RAS2RAS transform: + + >>> conc_lta.inputs.out_type = 'RAS2RAS' + >>> conc_lta.cmdline + 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta' """ _cmd = 'mri_concatenate_lta' input_spec = ConcatenateLTAInputSpec output_spec = ConcatenateLTAOutputSpec - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = os.path.abspath(self.inputs.out_file) - return outputs + def _format_arg(self, name, spec, value): + if name == 'out_type': + value = {'VOX2VOX': 0, 'RAS2RAS': 1}[value] + return super(ConcatenateLTA, self)._format_arg(name, spec, value) diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py index d3cba1749c..60a10b4c11 100644 --- a/nipype/interfaces/freesurfer/registration.py +++ b/nipype/interfaces/freesurfer/registration.py @@ -204,7 +204,7 @@ class EMRegister(FSCommandOpenMP): >>> register.inputs.out_file = 'norm_transform.lta' >>> register.inputs.skull = True >>> register.inputs.nbrspacing = 9 - >>> register.cmdline # doctest: +ALLOW_UNICODE + >>> register.cmdline 'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta' """ _cmd = 'mri_em_register' @@ -254,7 +254,7 @@ class Register(FSCommand): >>> register.inputs.target = 'aseg.mgz' >>> register.inputs.out_file = 'lh.pial.reg' >>> register.inputs.curv = True - >>> register.cmdline # doctest: +ALLOW_UNICODE + >>> register.cmdline 'mris_register -curv lh.pial aseg.mgz lh.pial.reg' """ @@ -320,7 +320,7 @@ class Paint(FSCommand): >>> paint.inputs.template = 'aseg.mgz' >>> paint.inputs.averages = 5 >>> paint.inputs.out_file = 'lh.avg_curv' - >>> paint.cmdline # doctest: +ALLOW_UNICODE + >>> paint.cmdline 'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv' """ @@ -338,3 +338,160 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs + + +class MRICoregInputSpec(FSTraitedSpec): + source_file = File(argstr='--mov %s', desc='source file to be registered', + mandatory=True, copyfile=False) + reference_file = File(argstr='--ref %s', desc='reference (target) file', + mandatory=True, copyfile=False, xor=['subject_id']) + out_lta_file = traits.Either(True, File, argstr='--lta %s', default=True, + usedefault=True, + desc='output registration file (LTA format)') + out_reg_file = traits.Either(True, File, argstr='--regdat %s', + desc='output registration file (REG format)') + out_params_file = traits.Either(True, File, argstr='--params %s', + desc='output parameters file') + + subjects_dir = Directory(exists=True, argstr='--sd %s', + desc='FreeSurfer SUBJECTS_DIR') + subject_id = traits.Str( + argstr='--s %s', position=1, mandatory=True, xor=['reference_file'], + requires=['subjects_dir'], + desc='freesurfer subject ID (implies ``reference_mask == ' + 'aparc+aseg.mgz`` unless otherwise specified)') + dof = traits.Enum(6, 9, 12, argstr='--dof %d', + desc='number of transform degrees of freedom') + reference_mask = traits.Either( + False, traits.Str, argstr='--ref-mask %s', position=2, + desc='mask reference volume with given mask, or None if ``False``') + source_mask = traits.Str(argstr='--mov-mask', + desc='mask source file with given mask') + num_threads = traits.Int(argstr='--threads %d', + desc='number of OpenMP threads') + no_coord_dithering = traits.Bool(argstr='--no-coord-dither', + desc='turn off coordinate dithering') + no_intensity_dithering = traits.Bool(argstr='--no-intensity-dither', + desc='turn off intensity dithering') + sep = traits.List(argstr='--sep %s...', minlen=1, maxlen=2, + desc='set spatial scales, in voxels (default [2, 4])') + initial_translation = traits.Tuple( + traits.Float, traits.Float, traits.Float, argstr='--trans %g %g %g', + desc='initial translation in mm (implies no_cras0)') + initial_rotation = traits.Tuple( + traits.Float, traits.Float, traits.Float, argstr='--rot %g %g %g', + desc='initial rotation in degrees') + initial_scale = traits.Tuple( + traits.Float, traits.Float, traits.Float, argstr='--scale %g %g %g', + desc='initial scale') + initial_shear = traits.Tuple( + traits.Float, traits.Float, traits.Float, argstr='--shear %g %g %g', + desc='initial shear (Hxy, Hxz, Hyz)') + no_cras0 = traits.Bool(argstr='--no-cras0', + desc='do not set translation parameters to align ' + 'centers of source and reference files') + max_iters = traits.Range(low=1, argstr='--nitersmax %d', + desc='maximum iterations (default: 4)') + ftol = traits.Float(argstr='--ftol %e', + desc='floating-point tolerance (default=1e-7)') + linmintol = traits.Float(argstr='--linmintol %e') + saturation_threshold = traits.Range( + low=0.0, high=100.0, argstr='--sat %g', + desc='saturation threshold (default=9.999)') + conform_reference = traits.Bool(argstr='--conf-ref', + desc='conform reference without rescaling') + no_brute_force = traits.Bool(argstr='--no-bf', + desc='do not brute force search') + brute_force_limit = traits.Float( + argstr='--bf-lim %g', xor=['no_brute_force'], + desc='constrain brute force search to +/- lim') + brute_force_samples = traits.Int( + argstr='--bf-nsamp %d', xor=['no_brute_force'], + desc='number of samples in brute force search') + no_smooth = traits.Bool( + argstr='--no-smooth', + desc='do not apply smoothing to either reference or source file') + ref_fwhm = traits.Float(argstr='--ref-fwhm', + desc='apply smoothing to reference file') + source_oob = traits.Bool( + argstr='--mov-oob', + desc='count source voxels that are out-of-bounds as 0') + # Skipping mat2par + + +class MRICoregOutputSpec(TraitedSpec): + out_reg_file = File(exists=True, desc='output registration file') + out_lta_file = File(exists=True, desc='output LTA-style registration file') + out_params_file = File(exists=True, desc='output parameters file') + + +class MRICoreg(FSCommand): + """ This program registers one volume to another + + mri_coreg is a C reimplementation of spm_coreg in FreeSurfer + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRICoreg + >>> coreg = MRICoreg() + >>> coreg.inputs.source_file = 'moving1.nii' + >>> coreg.inputs.reference_file = 'fixed1.nii' + >>> coreg.inputs.subjects_dir = '.' + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --lta .../registration.lta --ref fixed1.nii --mov moving1.nii --sd .' + + If passing a subject ID, the reference mask may be disabled: + + >>> coreg = MRICoreg() + >>> coreg.inputs.source_file = 'moving1.nii' + >>> coreg.inputs.subjects_dir = '.' + >>> coreg.inputs.subject_id = 'fsaverage' + >>> coreg.inputs.reference_mask = False + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --mov moving1.nii --sd .' + + Spatial scales may be specified as a list of one or two separations: + + >>> coreg.inputs.sep = [4] + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --mov moving1.nii --sd .' + + >>> coreg.inputs.sep = [4, 5] + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --sep 5 --mov moving1.nii --sd .' + """ + + _cmd = 'mri_coreg' + input_spec = MRICoregInputSpec + output_spec = MRICoregOutputSpec + + def _format_arg(self, opt, spec, val): + if opt in ('out_reg_file', 'out_lta_file', + 'out_params_file') and val is True: + val = self._list_outputs()[opt] + elif opt == 'reference_mask' and val is False: + return '--no-ref-mask' + return super(MRICoreg, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self.output_spec().get() + + out_lta_file = self.inputs.out_lta_file + if isdefined(out_lta_file): + if out_lta_file is True: + out_lta_file = 'registration.lta' + outputs['out_lta_file'] = os.path.abspath(out_lta_file) + + out_reg_file = self.inputs.out_reg_file + if isdefined(out_reg_file): + if out_reg_file is True: + out_reg_file = 'registration.dat' + outputs['out_reg_file'] = os.path.abspath(out_reg_file) + + out_params_file = self.inputs.out_params_file + if isdefined(out_params_file): + if out_params_file is True: + out_params_file = 'registration.par' + outputs['out_params_file'] = os.path.abspath(out_params_file) + + return outputs diff --git a/nipype/interfaces/freesurfer/tests/test_BBRegister.py b/nipype/interfaces/freesurfer/tests/test_BBRegister.py index e29ea17b63..9725065fef 100644 --- a/nipype/interfaces/freesurfer/tests/test_BBRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_BBRegister.py @@ -12,6 +12,7 @@ def test_BBRegister_inputs(): fsldof=dict(argstr='--fsl-dof %d',), ignore_exception=dict(nohash=True, usedefault=True,), init=dict(argstr='--init-%s', mandatory=True, xor=['init_reg_file'],), + init_cost_file=dict(argstr='--initcost %s',), init_reg_file=dict(argstr='--init-reg %s', mandatory=True, xor=['init'],), intermediate_file=dict(argstr='--int %s',), out_fsl_file=dict(argstr='--fslmat %s',), @@ -36,6 +37,7 @@ def test_BBRegister_inputs(): ignore_exception=dict(nohash=True, usedefault=True,), init=dict(argstr='--init-%s', xor=['init_reg_file'],), init_reg_file=dict(argstr='--init-reg %s', xor=['init'],), + init_cost_file=dict(argstr='--initcost %s',), intermediate_file=dict(argstr='--int %s',), out_fsl_file=dict(argstr='--fslmat %s',), out_lta_file=dict(argstr='--lta %s', min_ver='5.2.0',), @@ -62,7 +64,8 @@ def test_BBRegister_inputs(): def test_BBRegister_outputs(): - output_map = dict(min_cost_file=dict(), + output_map = dict(init_cost_file=dict(), + min_cost_file=dict(), out_fsl_file=dict(), out_lta_file=dict(), out_reg_file=dict(), diff --git a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py index acaa5d466d..70701e5f57 100644 --- a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py @@ -29,9 +29,11 @@ def test_FSSurfaceCommand_inputs(): @pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") -def test_associated_file(): +def test_associated_file(tmpdir): fssrc = FreeSurferSource(subjects_dir=fs.Info.subjectsdir(), subject_id='fsaverage', hemi='lh') + fssrc.base_dir = tmpdir.strpath + fssrc.resource_monitor = False fsavginfo = fssrc.run().outputs.get() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py index 5961ef84cc..c2d0989c6f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -11,7 +11,8 @@ def test_AddXFormToHeader_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_AddXFormToHeader_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py index bcab8391db..140efd6227 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -19,7 +19,8 @@ def test_Aparc2Aseg_inputs(): filled=dict(), hypo_wm=dict(argstr='--hypo-as-wm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), label_wm=dict(argstr='--labelwm', @@ -52,7 +53,8 @@ def test_Aparc2Aseg_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), volmask=dict(argstr='--volmask', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py index 802ebbc1d3..3ee51e0398 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -9,7 +9,8 @@ def test_Apas2Aseg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -19,7 +20,8 @@ def test_Apas2Aseg_inputs(): mandatory=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Apas2Aseg.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py index 2910fbdc62..cd10358bcf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -9,7 +9,8 @@ def test_ApplyMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -34,7 +35,8 @@ def test_ApplyMask_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transfer=dict(argstr='-transfer %d', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py index 6142ae84f1..a96bf413f5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -16,9 +16,10 @@ def test_ApplyVolTransform_inputs(): ), fsl_reg_file=dict(argstr='--fsl %s', mandatory=True, - xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interp=dict(argstr='--interp %s', @@ -28,8 +29,20 @@ def test_ApplyVolTransform_inputs(): invert_morph=dict(argstr='--inv-morph', requires=['m3z_file'], ), + lta_file=dict(argstr='--lta %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + lta_inv_file=dict(argstr='--lta-inv %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), m3z_file=dict(argstr='--m3z %s', ), + mni_152_reg=dict(argstr='--regheader', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), no_ded_m3z_path=dict(argstr='--noDefM3zPath', requires=['m3z_file'], ), @@ -37,11 +50,11 @@ def test_ApplyVolTransform_inputs(): ), reg_file=dict(argstr='--reg %s', mandatory=True, - xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), reg_header=dict(argstr='--regheader', mandatory=True, - xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), source_file=dict(argstr='--mov %s', copyfile=False, @@ -49,7 +62,7 @@ def test_ApplyVolTransform_inputs(): ), subject=dict(argstr='--s %s', mandatory=True, - xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), subjects_dir=dict(), tal=dict(argstr='--tal', @@ -62,14 +75,15 @@ def test_ApplyVolTransform_inputs(): mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformed_file=dict(argstr='--o %s', genfile=True, ), xfm_reg_file=dict(argstr='--xfm %s', mandatory=True, - xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), ), ) inputs = ApplyVolTransform.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py index 4550cb071b..f0f1d8ba9d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -30,7 +30,8 @@ def test_Binarize_inputs(): ), frame_no=dict(argstr='--frame %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -60,7 +61,8 @@ def test_Binarize_inputs(): rmin=dict(argstr='--rmin %f', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ventricles=dict(argstr='--ventricles', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py index 17028f990d..00dc73eb06 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -13,7 +13,8 @@ def test_CALabel_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -42,7 +43,8 @@ def test_CALabel_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py index ab6912accf..bf4bdd612a 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -15,7 +15,8 @@ def test_CANormalize_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -34,7 +35,8 @@ def test_CANormalize_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py index e76437e24d..d50d9d8e6b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -13,7 +13,8 @@ def test_CARegister_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -40,7 +41,8 @@ def test_CARegister_inputs(): template=dict(argstr='%s', position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='-T %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py index 19b38b0273..d59e369b33 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -9,7 +9,8 @@ def test_CheckTalairachAlignment_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-xfm %s', @@ -23,7 +24,8 @@ def test_CheckTalairachAlignment_inputs(): xor=['in_file'], ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-T %.3f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py index 8f702078ea..ea3dc4b1bb 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -18,7 +18,8 @@ def test_Concatenate_inputs(): ), gmean=dict(argstr='--gmean %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='--i %s...', @@ -47,7 +48,8 @@ def test_Concatenate_inputs(): stats=dict(argstr='--%s', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vote=dict(argstr='--vote', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py index b15dfee307..8dd0b1cb9e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -9,7 +9,8 @@ def test_ConcatenateLTA_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_lta1=dict(argstr='%s', @@ -20,15 +21,34 @@ def test_ConcatenateLTA_inputs(): mandatory=True, position=-2, ), + invert_1=dict(argstr='-invert1', + ), + invert_2=dict(argstr='-invert2', + ), + invert_out=dict(argstr='-invertout', + ), out_file=dict(argstr='%s', hash_files=False, keep_extension=True, name_source=['in_lta1'], - name_template='%s-long', + name_template='%s_concat', position=-1, ), + out_type=dict(argstr='-out_type %d', + ), + subject=dict(argstr='-subject %s', + ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + tal_source_file=dict(argstr='-tal %s', + position=-5, + requires=['tal_template_file'], + ), + tal_template_file=dict(argstr='%s', + position=-4, + requires=['tal_source_file'], + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ConcatenateLTA.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py index 57d56b9726..1cdec290b2 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -17,7 +17,8 @@ def test_Contrast_inputs(): hemisphere=dict(argstr='--%s-only', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), orig=dict(mandatory=True, @@ -29,7 +30,8 @@ def test_Contrast_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thickness=dict(mandatory=True, ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py index 03474551d6..3c6d0d91a3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -14,7 +14,8 @@ def test_Curvature_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_Curvature_inputs(): save=dict(argstr='-w', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-thresh %.3f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py index 9bb6f9fc50..06385fc361 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -22,7 +22,8 @@ def test_CurvatureStats_inputs(): mandatory=True, position=-3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), min_max=dict(argstr='-m', @@ -40,7 +41,8 @@ def test_CurvatureStats_inputs(): subjects_dir=dict(), surface=dict(argstr='-F %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), values=dict(argstr='-G', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py index a0e7b0fbdb..f517b74200 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -15,7 +15,8 @@ def test_DICOMConvert_inputs(): usedefault=True, ), file_mapping=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_single_slice=dict(requires=['dicom_info'], @@ -28,7 +29,8 @@ def test_DICOMConvert_inputs(): ), subject_id=dict(), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DICOMConvert.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py index 97e5910c17..f0aa686853 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -9,7 +9,8 @@ def test_EMRegister_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_EMRegister_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='-t %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py index 081856a5fa..aa23199671 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -13,7 +13,8 @@ def test_EditWMwithAseg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_EditWMwithAseg_inputs(): position=-2, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EditWMwithAseg.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py index d2eba7ed16..910e415852 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -9,7 +9,8 @@ def test_EulerNumber_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -17,7 +18,8 @@ def test_EulerNumber_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EulerNumber.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py index eb85cba81b..439378afe4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -9,7 +9,8 @@ def test_ExtractMainComponent_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_ExtractMainComponent_inputs(): name_template='%s.maincmp', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ExtractMainComponent.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py index e718c1c4cb..bd9d6e0e0f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -9,11 +9,13 @@ def test_FSCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FSCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py index 072161bd52..d9a7a8c6f6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -9,12 +9,14 @@ def test_FSCommandOpenMP_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_threads=dict(), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FSCommandOpenMP.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py index b685a4d82a..6bbb0ed0d7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -9,11 +9,13 @@ def test_FSScriptCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FSScriptCommand.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py index 8496ca2ae5..22280e25a9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -10,7 +10,8 @@ def test_FitMSParams_inputs(): usedefault=True, ), flip_list=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_FitMSParams_inputs(): ), subjects_dir=dict(), te_list=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tr_list=dict(), xfm_list=dict(), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py index ec064372eb..2176d18d54 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -17,7 +17,8 @@ def test_FixTopology_inputs(): mandatory=True, position=-1, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_brain=dict(mandatory=True, @@ -40,7 +41,8 @@ def test_FixTopology_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FixTopology.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py index 24c8214fba..f2d4e53fa6 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -9,7 +9,8 @@ def test_FuseSegmentations_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_norms=dict(argstr='-n %s', @@ -28,7 +29,8 @@ def test_FuseSegmentations_inputs(): position=-3, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timepoints=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py index e99a1de407..7b81fee22b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -51,7 +51,8 @@ def test_GLMFit_inputs(): genfile=True, ), hemi=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--y %s', @@ -117,7 +118,8 @@ def test_GLMFit_inputs(): ), synth=dict(argstr='--synth', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uniform=dict(argstr='--uniform %f %f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py index c479c7727a..f49055a47e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -9,14 +9,16 @@ def test_ImageInfo_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', position=1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ImageInfo.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py index 4f986f6a93..fe6998874f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -9,7 +9,8 @@ def test_Jacobian_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_mappedsurf=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_Jacobian_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Jacobian.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py new file mode 100644 index 0000000000..867e01e00d --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -0,0 +1,83 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import LTAConvert + + +def test_LTAConvert_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_fsl=dict(argstr='--infsl %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + in_itk=dict(argstr='--initk %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + in_lta=dict(argstr='--inlta %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + in_mni=dict(argstr='--inmni %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + in_niftyreg=dict(argstr='--inniftyreg %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + in_reg=dict(argstr='--inreg %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk'), + ), + invert=dict(argstr='--invert', + ), + ltavox2vox=dict(argstr='--ltavox2vox', + requires=['out_lta'], + ), + out_fsl=dict(argstr='--outfsl %s', + ), + out_itk=dict(argstr='--outitk %s', + ), + out_lta=dict(argstr='--outlta %s', + ), + out_mni=dict(argstr='--outmni %s', + ), + out_reg=dict(argstr='--outreg %s', + ), + source_file=dict(argstr='--src %s', + ), + target_conform=dict(argstr='--trgconform', + ), + target_file=dict(argstr='--trg %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = LTAConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_LTAConvert_outputs(): + output_map = dict(out_fsl=dict(), + out_itk=dict(), + out_lta=dict(), + out_mni=dict(), + out_reg=dict(), + ) + outputs = LTAConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py index bba05d8690..beece1b6d3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -15,7 +15,8 @@ def test_Label2Annot_inputs(): hemisphere=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_labels=dict(argstr='--l %s...', @@ -33,7 +34,8 @@ def test_Label2Annot_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose_off=dict(argstr='--noverbose', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py index ab1a98f286..55ed8f026c 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -13,7 +13,8 @@ def test_Label2Label_inputs(): hemisphere=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_file=dict(argstr='--trglabel %s', @@ -42,7 +43,8 @@ def test_Label2Label_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), white=dict(mandatory=True, ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py index c58fd71532..25b680036e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -26,7 +26,8 @@ def test_Label2Vol_inputs(): identity=dict(argstr='--identity', xor=('reg_file', 'reg_header', 'identity'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), invert_mtx=dict(argstr='--invertmtx', @@ -66,7 +67,8 @@ def test_Label2Vol_inputs(): template_file=dict(argstr='--temp %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vol_label_file=dict(argstr='--o %s', genfile=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py index acf272a603..1cd0eacc4d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -11,7 +11,8 @@ def test_MNIBiasCorrection_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -36,7 +37,8 @@ def test_MNIBiasCorrection_inputs(): stop=dict(argstr='--stop %f', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='--uchar %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py index b0b1b39a36..1f64be215d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -9,7 +9,8 @@ def test_MPRtoMNI305_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_MPRtoMNI305_inputs(): target=dict(mandatory=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MPRtoMNI305.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py index 9a1f011d77..8b5bc1135d 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -49,7 +49,8 @@ def test_MRIConvert_inputs(): ), fwhm=dict(argstr='--fwhm %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_center=dict(argstr='--in_center %s', @@ -170,7 +171,8 @@ def test_MRIConvert_inputs(): template_info=dict(), template_type=dict(argstr='--template_type %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ti=dict(argstr='-ti %d', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py new file mode 100644 index 0000000000..e04b306c69 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -0,0 +1,109 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MRICoreg + + +def test_MRICoreg_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + brute_force_limit=dict(argstr='--bf-lim %g', + xor=['no_brute_force'], + ), + brute_force_samples=dict(argstr='--bf-nsamp %d', + xor=['no_brute_force'], + ), + conform_reference=dict(argstr='--conf-ref', + ), + dof=dict(argstr='--dof %d', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ftol=dict(argstr='--ftol %e', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + initial_rotation=dict(argstr='--rot %g %g %g', + ), + initial_scale=dict(argstr='--scale %g %g %g', + ), + initial_shear=dict(argstr='--shear %g %g %g', + ), + initial_translation=dict(argstr='--trans %g %g %g', + ), + linmintol=dict(argstr='--linmintol %e', + ), + max_iters=dict(argstr='--nitersmax %d', + ), + no_brute_force=dict(argstr='--no-bf', + ), + no_coord_dithering=dict(argstr='--no-coord-dither', + ), + no_cras0=dict(argstr='--no-cras0', + ), + no_intensity_dithering=dict(argstr='--no-intensity-dither', + ), + no_smooth=dict(argstr='--no-smooth', + ), + num_threads=dict(argstr='--threads %d', + ), + out_lta_file=dict(argstr='--lta %s', + usedefault=True, + ), + out_params_file=dict(argstr='--params %s', + ), + out_reg_file=dict(argstr='--regdat %s', + ), + ref_fwhm=dict(argstr='--ref-fwhm', + ), + reference_file=dict(argstr='--ref %s', + copyfile=False, + mandatory=True, + xor=['subject_id'], + ), + reference_mask=dict(argstr='--ref-mask %s', + position=2, + ), + saturation_threshold=dict(argstr='--sat %g', + ), + sep=dict(argstr='--sep %s...', + ), + source_file=dict(argstr='--mov %s', + copyfile=False, + mandatory=True, + ), + source_mask=dict(argstr='--mov-mask', + ), + source_oob=dict(argstr='--mov-oob', + ), + subject_id=dict(argstr='--s %s', + mandatory=True, + position=1, + requires=['subjects_dir'], + xor=['reference_file'], + ), + subjects_dir=dict(argstr='--sd %s', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = MRICoreg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_MRICoreg_outputs(): + output_map = dict(out_lta_file=dict(), + out_params_file=dict(), + out_reg_file=dict(), + ) + outputs = MRICoreg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py index 1302f7a2dd..f45165af12 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -9,7 +9,8 @@ def test_MRIFill_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_MRIFill_inputs(): segmentation=dict(argstr='-segmentation %s', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='-xform %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py index 13c70086df..9f333c2643 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -13,7 +13,8 @@ def test_MRIMarchingCubes_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_MRIMarchingCubes_inputs(): position=-2, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRIMarchingCubes.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py index 85db09eb46..87dc5ce59f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -9,7 +9,8 @@ def test_MRIPretess_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_filled=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_MRIPretess_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), test=dict(argstr='-test', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py index a35c091e04..2e0d137668 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -21,7 +21,8 @@ def test_MRISPreproc_inputs(): hemi=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_iters=dict(argstr='--niters %d', @@ -60,7 +61,8 @@ def test_MRISPreproc_inputs(): target=dict(argstr='--target %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vol_measure_file=dict(argstr='--iv %s %s...', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py index e3a266d61a..56860ed6d3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -22,7 +22,8 @@ def test_MRISPreprocReconAll_inputs(): hemi=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), lh_surfreg_target=dict(requires=['surfreg_files'], @@ -72,7 +73,8 @@ def test_MRISPreprocReconAll_inputs(): target=dict(argstr='--target %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vol_measure_file=dict(argstr='--iv %s %s...', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py index 58979a75a7..9829b19326 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -9,7 +9,8 @@ def test_MRITessellate_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_MRITessellate_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tesselate_all_voxels=dict(argstr='-a', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py index 50897b18a7..9c70889d59 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -26,7 +26,8 @@ def test_MRIsCALabel_inputs(): mandatory=True, position=-4, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), label=dict(argstr='-l %s', @@ -51,7 +52,8 @@ def test_MRIsCALabel_inputs(): subjects_dir=dict(), sulc=dict(mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRIsCALabel.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py index ad45ba32ed..0844523ac8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -13,7 +13,8 @@ def test_MRIsCalc_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file1=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_MRIsCalc_inputs(): mandatory=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRIsCalc.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py index 2eae71deea..8ac919ad65 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -9,7 +9,8 @@ def test_MRIsCombine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='--combinesurfs %s', @@ -22,7 +23,8 @@ def test_MRIsCombine_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRIsCombine.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py index 6d4501c8ca..5fda94dd63 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -15,7 +15,8 @@ def test_MRIsConvert_inputs(): ), functional_file=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -52,7 +53,8 @@ def test_MRIsConvert_inputs(): subjects_dir=dict(), talairachxfm_subjid=dict(argstr='-t %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), to_scanner=dict(argstr='--to-scanner', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py index c74f31bd59..e292a2b2c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -15,7 +15,8 @@ def test_MRIsExpand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -40,7 +41,8 @@ def test_MRIsExpand_inputs(): spring=dict(argstr='-S %g', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thickness=dict(argstr='-thickness', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py index a2ea82a4f0..3a93d5f1e1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -9,7 +9,8 @@ def test_MRIsInflate_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_MRIsInflate_inputs(): out_sulc=dict(xor=['no_save_sulc'], ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRIsInflate.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py index cf4f27522e..97321adec8 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -11,7 +11,8 @@ def test_MS_LDA_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), images=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_MS_LDA_inputs(): shift=dict(argstr='-shift %d', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_weights=dict(argstr='-W', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py index a230fac5f4..d0fec86806 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -9,7 +9,8 @@ def test_MakeAverageSubject_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_name=dict(argstr='--out %s', @@ -20,7 +21,8 @@ def test_MakeAverageSubject_inputs(): mandatory=True, sep=' ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MakeAverageSubject.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py index ff49e627ba..ed5c9021ab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -16,7 +16,8 @@ def test_MakeSurfaces_inputs(): mandatory=True, position=-1, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_T1=dict(argstr='-T1 %s', @@ -55,7 +56,8 @@ def test_MakeSurfaces_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), white=dict(argstr='-white %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py index c081e76912..33319e77ed 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -12,7 +12,8 @@ def test_Normalize_inputs(): gradient=dict(argstr='-g %d', usedefault=False, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_Normalize_inputs(): segmentation=dict(argstr='-aseg %s', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(), ) diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py index e5d7c18980..a1bb43f9c5 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -51,7 +51,8 @@ def test_OneSampleTTest_inputs(): genfile=True, ), hemi=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--y %s', @@ -117,7 +118,8 @@ def test_OneSampleTTest_inputs(): ), synth=dict(argstr='--synth', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uniform=dict(argstr='--uniform %f %f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py index 3713464c7c..27d836b4f7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -11,7 +11,8 @@ def test_Paint_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_surf=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_Paint_inputs(): position=-3, ), template_param=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Paint.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py index 28de43ee39..57034c4367 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -19,7 +19,8 @@ def test_ParcellationStats_inputs(): mandatory=True, position=-2, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_annotation=dict(argstr='-a %s', @@ -61,7 +62,8 @@ def test_ParcellationStats_inputs(): ), tabular_output=dict(argstr='-b', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), th3=dict(argstr='-th3', requires=['cortex_label'], diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py index 54bf4467e5..59c1d931d9 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -15,7 +15,8 @@ def test_ParseDICOMDir_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), sortbyrun=dict(argstr='--sortbyrun', @@ -23,7 +24,8 @@ def test_ParseDICOMDir_inputs(): subjects_dir=dict(), summarize=dict(argstr='--summarize', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ParseDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index 9a84bf9f28..b28d035254 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -4,7 +4,10 @@ def test_ReconAll_inputs(): - input_map = dict(T1_files=dict(argstr='-i %s...', + input_map = dict(FLAIR_file=dict(argstr='-FLAIR %s', + min_ver='5.3.0', + ), + T1_files=dict(argstr='-i %s...', ), T2_file=dict(argstr='-T2 %s', min_ver='5.3.0', @@ -37,7 +40,8 @@ def test_ReconAll_inputs(): hires=dict(argstr='-hires', min_ver='6.0.0', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mprage=dict(argstr='-mprage', @@ -105,10 +109,16 @@ def test_ReconAll_inputs(): ), talairach=dict(xor=['expert'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + use_FLAIR=dict(argstr='-FLAIRpial', + min_ver='5.3.0', + xor=['use_T2'], ), use_T2=dict(argstr='-T2pial', min_ver='5.3.0', + xor=['use_FLAIR'], ), xopts=dict(argstr='-xopts-%s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py index 33c6e0c941..fca0812be4 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Register.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -12,7 +12,8 @@ def test_Register_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_smoothwm=dict(copyfile=True, @@ -34,7 +35,8 @@ def test_Register_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Register.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py index c10b12911c..452cbb0cea 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -9,7 +9,8 @@ def test_RegisterAVItoTalairach_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_RegisterAVItoTalairach_inputs(): mandatory=True, position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vox2vox=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py index 4e46bbc03d..7e8aa0a3a7 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -13,7 +13,8 @@ def test_RelabelHypointensities_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), lh_white=dict(copyfile=True, @@ -34,7 +35,8 @@ def test_RelabelHypointensities_inputs(): position=-2, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = RelabelHypointensities.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py index 14a9cd8edb..d3b9719662 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -9,7 +9,8 @@ def test_RemoveIntersection_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_RemoveIntersection_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = RemoveIntersection.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py index 023bf6552a..ae697ecaab 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -9,7 +9,8 @@ def test_RemoveNeck_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_RemoveNeck_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py index 811fb85cde..eea61f7cbf 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -9,7 +9,8 @@ def test_Resample_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -21,7 +22,8 @@ def test_Resample_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_size=dict(argstr='-vs %.2f %.2f %.2f', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py index c8b7080c26..48f121cd70 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -31,7 +31,8 @@ def test_RobustRegister_inputs(): ), high_iterations=dict(argstr='--highit %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_xfm_file=dict(argstr='--transform', @@ -53,7 +54,7 @@ def test_RobustRegister_inputs(): no_multi=dict(argstr='--nomulti', ), out_reg_file=dict(argstr='--lta %s', - genfile=True, + usedefault=True, ), outlier_limit=dict(argstr='--wlimit %.3f', ), @@ -72,7 +73,8 @@ def test_RobustRegister_inputs(): target_file=dict(argstr='--dst %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trans_only=dict(argstr='--transonly', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py index 579e3a8007..c4fa30271b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -17,7 +17,8 @@ def test_RobustTemplate_inputs(): ), fixed_timepoint=dict(argstr='--fixtp', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='--mov %s', @@ -33,6 +34,7 @@ def test_RobustTemplate_inputs(): ), no_iteration=dict(argstr='--noit', ), + num_threads=dict(), out_file=dict(argstr='--template %s', mandatory=True, usedefault=True, @@ -46,7 +48,8 @@ def test_RobustTemplate_inputs(): subjects_dir=dict(), subsample_threshold=dict(argstr='--subsample %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_outputs=dict(argstr='--lta %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py index a257fd7e2e..e479518cf3 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -32,7 +32,8 @@ def test_SampleToSurface_inputs(): ico_order=dict(argstr='--icoorder %d', requires=['target_subject'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interp_method=dict(argstr='--interp %s', @@ -92,14 +93,15 @@ def test_SampleToSurface_inputs(): ), subject_id=dict(), subjects_dir=dict(), - surf_reg=dict(argstr='--surfreg', + surf_reg=dict(argstr='--surfreg %s', requires=['target_subject'], ), surface=dict(argstr='--surf %s', ), target_subject=dict(argstr='--trgsubject %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vox_file=dict(argstr='--nvox %s', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py index 04e9f830d1..1f329ac491 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -49,7 +49,8 @@ def test_SegStats_inputs(): gca_color_table=dict(argstr='--ctab-gca %s', xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -97,7 +98,8 @@ def test_SegStats_inputs(): mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), total_gray=dict(argstr='--totalgray', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py index eecc3aa4e5..45f79b2259 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -51,7 +51,8 @@ def test_SegStatsReconAll_inputs(): gca_color_table=dict(argstr='--ctab-gca %s', xor=('color_table_file', 'default_color_table', 'gca_color_table'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -118,7 +119,8 @@ def test_SegStatsReconAll_inputs(): mandatory=True, xor=('segmentation_file', 'annot', 'surf_label'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), total_gray=dict(argstr='--totalgray', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py index f54484b5b7..b6ad0b3891 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -10,7 +10,8 @@ def test_SegmentCC_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-aseg %s', @@ -33,7 +34,8 @@ def test_SegmentCC_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SegmentCC.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py index 450ad4f95b..5109680305 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -9,7 +9,8 @@ def test_SegmentWM_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_SegmentWM_inputs(): position=-1, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SegmentWM.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py index 5720c12975..06035d71d0 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -9,7 +9,8 @@ def test_Smooth_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -37,7 +38,8 @@ def test_Smooth_inputs(): requires=['reg_file'], xor=['num_iters'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vol_fwhm=dict(argstr='--vol-fwhm %f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py index 2419164f5f..eb2d70bec1 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -17,7 +17,8 @@ def test_SmoothTessellation_inputs(): ), gaussian_curvature_smoothing_steps=dict(argstr='%d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -42,7 +43,8 @@ def test_SmoothTessellation_inputs(): snapshot_writing_iterations=dict(argstr='-w %d', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_gaussian_curvature_smoothing=dict(argstr='-g', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py index 8afabb96e6..ca5dd22c17 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -9,7 +9,8 @@ def test_Sphere_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_Sphere_inputs(): seed=dict(argstr='-seed %d', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Sphere.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py index 928a2a5127..54b5aab351 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -19,7 +19,8 @@ def test_SphericalAverage_inputs(): mandatory=True, position=-4, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_average=dict(argstr='%s', @@ -40,7 +41,8 @@ def test_SphericalAverage_inputs(): mandatory=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-t %.1f', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py index 2590827648..d97095b429 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -12,7 +12,8 @@ def test_Surface2VolTransform_inputs(): hemi=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mkmask=dict(argstr='--mkmask', @@ -38,7 +39,8 @@ def test_Surface2VolTransform_inputs(): ), template_file=dict(argstr='--template %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformed_file=dict(argstr='--outvol %s', hash_files=False, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py index 835d4bc601..a0a18ba287 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -18,7 +18,8 @@ def test_SurfaceSmooth_inputs(): hemi=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--sval %s', @@ -36,7 +37,8 @@ def test_SurfaceSmooth_inputs(): mandatory=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SurfaceSmooth.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py index 2043603124..b0c4a7c482 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -26,7 +26,8 @@ def test_SurfaceSnapshots_inputs(): identity_reg=dict(argstr='-overlay-reg-identity', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), invert_overlay=dict(argstr='-invphaseflag 1', @@ -88,7 +89,8 @@ def test_SurfaceSnapshots_inputs(): tcl_script=dict(argstr='%s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), truncate_overlay=dict(argstr='-truncphaseflag 1', ), diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py index 99c54a8f78..fd3cb37931 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -12,7 +12,8 @@ def test_SurfaceTransform_inputs(): hemi=dict(argstr='--hemi %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_file=dict(argstr='--tval %s', @@ -44,7 +45,8 @@ def test_SurfaceTransform_inputs(): ), target_type=dict(argstr='--tfmt %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SurfaceTransform.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py index bc5fb23eb7..6c96565317 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -16,7 +16,8 @@ def test_SynthesizeFLASH_inputs(): mandatory=True, position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_SynthesizeFLASH_inputs(): mandatory=True, position=4, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tr=dict(argstr='%.2f', mandatory=True, diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py index f301168b01..f7f3136f77 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -11,7 +11,8 @@ def test_TalairachAVI_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', @@ -21,7 +22,8 @@ def test_TalairachAVI_inputs(): mandatory=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TalairachAVI.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py index a6ae75b3ff..63ff45a3fe 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -9,7 +9,8 @@ def test_TalairachQC_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), log_file=dict(argstr='%s', @@ -17,7 +18,8 @@ def test_TalairachQC_inputs(): position=0, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TalairachQC.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py index c5e3d65274..a4f018bd7e 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -14,14 +14,24 @@ def test_Tkregister2_inputs(): fsl_out=dict(argstr='--fslregout %s', ), fstal=dict(argstr='--fstal', - xor=['target_image', 'moving_image'], + xor=['target_image', 'moving_image', 'reg_file'], ), fstarg=dict(argstr='--fstarg', xor=['target_image'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), + invert_lta_in=dict(requires=['lta_in'], + ), + invert_lta_out=dict(argstr='--ltaout-inv', + requires=['lta_in'], + ), + lta_in=dict(argstr='--lta %s', + ), + lta_out=dict(argstr='--ltaout %s', + ), moving_image=dict(argstr='--mov %s', mandatory=True, ), @@ -42,7 +52,8 @@ def test_Tkregister2_inputs(): target_image=dict(argstr='--targ %s', xor=['fstarg'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xfm=dict(argstr='--xfm %s', ), @@ -56,6 +67,7 @@ def test_Tkregister2_inputs(): def test_Tkregister2_outputs(): output_map = dict(fsl_file=dict(), + lta_file=dict(), reg_file=dict(), ) outputs = Tkregister2.output_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py index 8a1aecaa22..991a0e895f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -15,7 +15,8 @@ def test_UnpackSDICOMDir_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), log_file=dict(argstr='-log %s', @@ -42,7 +43,8 @@ def test_UnpackSDICOMDir_inputs(): spm_zeropad=dict(argstr='-nspmzeropad %d', ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = UnpackSDICOMDir.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py index b1bcaa4e40..1eaed2521b 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -12,7 +12,8 @@ def test_VolumeMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_aseg=dict(argstr='--aseg_name %s', @@ -46,7 +47,8 @@ def test_VolumeMask_inputs(): usedefault=True, ), subjects_dir=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = VolumeMask.input_spec() diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py index 53ff443424..f1f7469161 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -12,7 +12,8 @@ def test_WatershedSkullStrip_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_WatershedSkullStrip_inputs(): subjects_dir=dict(), t1=dict(argstr='-T1', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='%s', position=-3, diff --git a/nipype/interfaces/freesurfer/tests/test_model.py b/nipype/interfaces/freesurfer/tests/test_model.py index 28e49401e0..a30a29b0ac 100644 --- a/nipype/interfaces/freesurfer/tests/test_model.py +++ b/nipype/interfaces/freesurfer/tests/test_model.py @@ -15,10 +15,10 @@ @pytest.mark.skipif(no_freesurfer(), reason="freesurfer is not installed") def test_concatenate(tmpdir): - tempdir = str(tmpdir) - os.chdir(tempdir) - in1 = os.path.join(tempdir, 'cont1.nii') - in2 = os.path.join(tempdir, 'cont2.nii') + tmpdir.chdir() + + in1 = tmpdir.join('cont1.nii').strpath + in2 = tmpdir.join('cont2.nii').strpath out = 'bar.nii' data1 = np.zeros((3, 3, 3, 1), dtype=np.float32) @@ -31,24 +31,24 @@ def test_concatenate(tmpdir): # Test default behavior res = model.Concatenate(in_files=[in1, in2]).run() - assert res.outputs.concatenated_file == os.path.join(tempdir, 'concat_output.nii.gz') + assert res.outputs.concatenated_file == tmpdir.join('concat_output.nii.gz').strpath assert np.allclose(nb.load('concat_output.nii.gz').get_data(), out_data) # Test specified concatenated_file res = model.Concatenate(in_files=[in1, in2], concatenated_file=out).run() - assert res.outputs.concatenated_file == os.path.join(tempdir, out) + assert res.outputs.concatenated_file == tmpdir.join(out).strpath assert np.allclose(nb.load(out, mmap=NUMPY_MMAP).get_data(), out_data) # Test in workflow - wf = pe.Workflow('test_concatenate', base_dir=tempdir) + wf = pe.Workflow('test_concatenate', base_dir=tmpdir.strpath) concat = pe.Node(model.Concatenate(in_files=[in1, in2], concatenated_file=out), name='concat') wf.add_nodes([concat]) wf.run() - assert np.allclose(nb.load(os.path.join(tempdir, - 'test_concatenate', - 'concat', out)).get_data(), + assert np.allclose(nb.load(tmpdir.join( + 'test_concatenate', + 'concat', out).strpath).get_data(), out_data) # Test a simple statistic diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index 2d5e8cfb44..4965329fae 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -17,6 +17,7 @@ def test_robustregister(create_files_in_directory): filelist, outdir = create_files_in_directory reg = freesurfer.RobustRegister() + cwd = os.getcwd() # make sure command gets called assert reg.cmd == 'mri_robust_register' @@ -28,8 +29,9 @@ def test_robustregister(create_files_in_directory): reg.inputs.source_file = filelist[0] reg.inputs.target_file = filelist[1] reg.inputs.auto_sens = True - assert reg.cmdline == ('mri_robust_register ' - '--satit --lta %s_robustreg.lta --mov %s --dst %s' % (filelist[0][:-4], filelist[0], filelist[1])) + assert reg.cmdline == ('mri_robust_register --satit --lta ' + '%s/%s_robustreg.lta --mov %s --dst %s' % + (cwd, filelist[0][:-4], filelist[0], filelist[1])) # constructor based parameter setting reg2 = freesurfer.RobustRegister(source_file=filelist[0], target_file=filelist[1], outlier_sens=3.0, diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py index 8e756ab401..c033a1e346 100644 --- a/nipype/interfaces/freesurfer/tests/test_utils.py +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -188,18 +188,16 @@ def test_mrisexpand(tmpdir): assert expand_if.cmdline == orig_cmdline assert expand_nd.interface.cmdline == orig_cmdline - # Run both interfaces - if_res = expand_if.run() + # Run Node interface nd_res = expand_nd.run() # Commandlines differ node_cmdline = 'mris_expand -T 60 -pial {cwd}/lh.pial {cwd}/lh.smoothwm ' \ '1 expandtmp'.format(cwd=nd_res.runtime.cwd) - assert if_res.runtime.cmdline == orig_cmdline assert nd_res.runtime.cmdline == node_cmdline # Check output - if_out_file = if_res.outputs.get()['out_file'] + if_out_file = expand_if._list_outputs()['out_file'] nd_out_file = nd_res.outputs.get()['out_file'] # Same filename assert op.basename(if_out_file) == op.basename(nd_out_file) @@ -207,6 +205,3 @@ def test_mrisexpand(tmpdir): assert op.dirname(if_out_file) == op.dirname(fsavginfo['smoothwm']) # Node places output in working directory assert op.dirname(nd_out_file) == nd_res.runtime.cwd - - # Remove test surface - os.unlink(if_out_file) diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py index 85c2bf6779..97ae831b26 100644 --- a/nipype/interfaces/freesurfer/utils.py +++ b/nipype/interfaces/freesurfer/utils.py @@ -139,8 +139,9 @@ class SampleToSurfaceInputSpec(FSTraitedSpec): subject_id = traits.String(desc="subject id") target_subject = traits.String(argstr="--trgsubject %s", desc="sample to surface of different subject than source") - surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"], - desc="use surface registration to target subject") + surf_reg = traits.Either(traits.Bool, traits.Str(), + argstr="--surfreg %s", requires=["target_subject"], + desc="use surface registration to target subject") ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"], desc="icosahedron order when target_subject is 'ico'") @@ -196,7 +197,7 @@ class SampleToSurface(FSCommand): >>> sampler.inputs.sampling_method = "average" >>> sampler.inputs.sampling_range = 1 >>> sampler.inputs.sampling_units = "frac" - >>> sampler.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> sampler.cmdline # doctest: +ELLIPSIS 'mri_vol2surf --hemi lh --o ...lh.cope1.mgz --reg register.dat --projfrac-avg 1.000 --mov cope1.nii.gz' >>> res = sampler.run() # doctest: +SKIP @@ -233,10 +234,15 @@ def _format_arg(self, name, spec, value): "Cannot create {} file with extension " "{}".format(value, ext)) else: - logger.warn("Creating {} file with extension {}: " - "{}{}".format(value, ext, base, ext)) + logger.warn('Creating %s file with extension %s: %s%s', + value, ext, base, ext) + if value in implicit_filetypes: return "" + if name == 'surf_reg': + if value is True: + return spec.argstr % 'sphere.reg' + return super(SampleToSurface, self)._format_arg(name, spec, value) def _get_outfilename(self, opt="out_file"): @@ -326,7 +332,7 @@ class SurfaceSmooth(FSCommand): >>> smoother.inputs.subject_id = "subj_1" >>> smoother.inputs.hemi = "lh" >>> smoother.inputs.fwhm = 5 - >>> smoother.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> smoother.cmdline # doctest: +ELLIPSIS 'mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1' >>> smoother.run() # doctest: +SKIP @@ -423,8 +429,8 @@ def _format_arg(self, name, spec, value): "Cannot create {} file with extension " "{}".format(value, ext)) else: - logger.warn("Creating {} file with extension {}: " - "{}{}".format(value, ext, base, ext)) + logger.warn('Creating %s file with extension %s: %s%s', + value, ext, base, ext) if value in implicit_filetypes: return "" return super(SurfaceTransform, self)._format_arg(name, spec, value) @@ -518,7 +524,7 @@ class Surface2VolTransform(FSCommand): >>> xfm2vol.inputs.hemi = 'lh' >>> xfm2vol.inputs.template_file = 'cope1.nii.gz' >>> xfm2vol.inputs.subjects_dir = '.' - >>> xfm2vol.cmdline # doctest: +ALLOW_UNICODE + >>> xfm2vol.cmdline 'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii' >>> res = xfm2vol.run()# doctest: +SKIP @@ -995,7 +1001,7 @@ class MRIsCombine(FSSurfaceCommand): >>> mris = fs.MRIsCombine() >>> mris.inputs.in_files = ['lh.pial', 'rh.pial'] >>> mris.inputs.out_file = 'bh.pial' - >>> mris.cmdline # doctest: +ALLOW_UNICODE + >>> mris.cmdline 'mris_convert --combinesurfs lh.pial rh.pial bh.pial' >>> mris.run() # doctest: +SKIP """ @@ -1124,7 +1130,7 @@ class MRIPretess(FSCommand): >>> pretess.inputs.in_filled = 'wm.mgz' >>> pretess.inputs.in_norm = 'norm.mgz' >>> pretess.inputs.nocorners = True - >>> pretess.cmdline # doctest: +ALLOW_UNICODE + >>> pretess.cmdline 'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz' >>> pretess.run() # doctest: +SKIP @@ -1294,7 +1300,7 @@ class MakeAverageSubject(FSCommand): >>> from nipype.interfaces.freesurfer import MakeAverageSubject >>> avg = MakeAverageSubject(subjects_ids=['s1', 's2']) - >>> avg.cmdline # doctest: +ALLOW_UNICODE + >>> avg.cmdline 'make_average_subject --out average --subjects s1 s2' """ @@ -1329,7 +1335,7 @@ class ExtractMainComponent(CommandLine): >>> from nipype.interfaces.freesurfer import ExtractMainComponent >>> mcmp = ExtractMainComponent(in_file='lh.pial') - >>> mcmp.cmdline # doctest: +ALLOW_UNICODE + >>> mcmp.cmdline 'mris_extract_main_component lh.pial lh.maincmp' """ @@ -1349,8 +1355,23 @@ class Tkregister2InputSpec(FSTraitedSpec): moving_image = File(exists=True, mandatory=True, argstr="--mov %s", desc='moving volume') + # Input registration file options fsl_in_matrix = File(exists=True, argstr="--fsl %s", desc='fsl-style registration input matrix') + xfm = File(exists=True, argstr='--xfm %s', + desc='use a matrix in MNI coordinates as initial registration') + lta_in = File(exists=True, argstr='--lta %s', + desc='use a matrix in MNI coordinates as initial registration') + invert_lta_in = traits.Bool(requires=['lta_in'], + desc='Invert input LTA before applying') + # Output registration file options + fsl_out = traits.Either(True, File, argstr='--fslregout %s', + desc='compute an FSL-compatible resgitration matrix') + lta_out = traits.Either(True, File, argstr='--ltaout %s', + desc='output registration file (LTA format)') + invert_lta_out = traits.Bool(argstr='--ltaout-inv', requires=['lta_in'], + desc='Invert input LTA before applying') + subject_id = traits.String(argstr="--s %s", desc='freesurfer subject ID') noedit = traits.Bool(True, argstr="--noedit", usedefault=True, @@ -1361,19 +1382,16 @@ class Tkregister2InputSpec(FSTraitedSpec): reg_header = traits.Bool(False, argstr='--regheader', desc='compute regstration from headers') fstal = traits.Bool(False, argstr='--fstal', - xor=['target_image', 'moving_image'], + xor=['target_image', 'moving_image', 'reg_file'], desc='set mov to be tal and reg to be tal xfm') movscale = traits.Float(argstr='--movscale %f', desc='adjust registration matrix to scale mov') - xfm = File(exists=True, argstr='--xfm %s', - desc='use a matrix in MNI coordinates as initial registration') - fsl_out = File(argstr='--fslregout %s', - desc='compute an FSL-compatible resgitration matrix') class Tkregister2OutputSpec(TraitedSpec): reg_file = File(exists=True, desc='freesurfer-style registration file') fsl_file = File(desc='FSL-style registration file') + lta_file = File(desc='LTA-style registration file') class Tkregister2(FSCommand): @@ -1392,7 +1410,7 @@ class Tkregister2(FSCommand): >>> tk2.inputs.moving_image = 'T1.mgz' >>> tk2.inputs.target_image = 'structural.nii' >>> tk2.inputs.reg_header = True - >>> tk2.cmdline # doctest: +ALLOW_UNICODE + >>> tk2.cmdline 'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader \ --targ structural.nii' >>> tk2.run() # doctest: +SKIP @@ -1405,7 +1423,7 @@ class Tkregister2(FSCommand): >>> tk2 = Tkregister2() >>> tk2.inputs.moving_image = 'epi.nii' >>> tk2.inputs.fsl_in_matrix = 'flirt.mat' - >>> tk2.cmdline # doctest: +ALLOW_UNICODE + >>> tk2.cmdline 'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat' >>> tk2.run() # doctest: +SKIP """ @@ -1413,11 +1431,34 @@ class Tkregister2(FSCommand): input_spec = Tkregister2InputSpec output_spec = Tkregister2OutputSpec + def _format_arg(self, name, spec, value): + if name == 'lta_in' and self.inputs.invert_lta_in: + spec = '--lta-inv %s' + if name in ('fsl_out', 'lta_out') and value is True: + value = self._list_outputs()[name] + return super(Tkregister2, self)._format_arg(name, spec, value) + def _list_outputs(self): outputs = self._outputs().get() - outputs['reg_file'] = os.path.abspath(self.inputs.reg_file) - if isdefined(self.inputs.fsl_out): - outputs['fsl_file'] = os.path.abspath(self.inputs.fsl_out) + reg_file = os.path.abspath(self.inputs.reg_file) + outputs['reg_file'] = reg_file + + cwd = os.getcwd() + fsl_out = self.inputs.fsl_out + if isdefined(fsl_out): + if fsl_out is True: + outputs['fsl_file'] = fname_presuffix( + reg_file, suffix='.mat', newpath=cwd, use_ext=False) + else: + outputs['fsl_file'] = os.path.abspath(self.inputs.fsl_out) + + lta_out = self.inputs.lta_out + if isdefined(lta_out): + if lta_out is True: + outputs['lta_file'] = fname_presuffix( + reg_file, suffix='.lta', newpath=cwd, use_ext=False) + else: + outputs['lta_file'] = os.path.abspath(self.inputs.lta_out) return outputs def _gen_outfilename(self): @@ -1459,11 +1500,11 @@ class AddXFormToHeader(FSCommand): >>> adder = AddXFormToHeader() >>> adder.inputs.in_file = 'norm.mgz' >>> adder.inputs.transform = 'trans.mat' - >>> adder.cmdline # doctest: +ALLOW_UNICODE + >>> adder.cmdline 'mri_add_xform_to_header trans.mat norm.mgz output.mgz' >>> adder.inputs.copy_name = True - >>> adder.cmdline # doctest: +ALLOW_UNICODE + >>> adder.cmdline 'mri_add_xform_to_header -c trans.mat norm.mgz output.mgz' >>> adder.run() # doctest: +SKIP @@ -1517,7 +1558,7 @@ class CheckTalairachAlignment(FSCommand): >>> checker.inputs.in_file = 'trans.mat' >>> checker.inputs.threshold = 0.005 - >>> checker.cmdline # doctest: +ALLOW_UNICODE + >>> checker.cmdline 'talairach_afd -T 0.005 -xfm trans.mat' >>> checker.run() # doctest: +SKIP @@ -1566,7 +1607,7 @@ class TalairachAVI(FSCommand): >>> example = TalairachAVI() >>> example.inputs.in_file = 'norm.mgz' >>> example.inputs.out_file = 'trans.mat' - >>> example.cmdline # doctest: +ALLOW_UNICODE + >>> example.cmdline 'talairach_avi --i norm.mgz --xfm trans.mat' >>> example.run() # doctest: +SKIP @@ -1597,7 +1638,7 @@ class TalairachQC(FSScriptCommand): >>> from nipype.interfaces.freesurfer import TalairachQC >>> qc = TalairachQC() >>> qc.inputs.log_file = 'dirs.txt' - >>> qc.cmdline # doctest: +ALLOW_UNICODE + >>> qc.cmdline 'tal_QC_AZS dirs.txt' """ _cmd = "tal_QC_AZS" @@ -1636,7 +1677,7 @@ class RemoveNeck(FSCommand): >>> remove_neck.inputs.in_file = 'norm.mgz' >>> remove_neck.inputs.transform = 'trans.mat' >>> remove_neck.inputs.template = 'trans.mat' - >>> remove_neck.cmdline # doctest: +ALLOW_UNICODE + >>> remove_neck.cmdline 'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz' """ _cmd = "mri_remove_neck" @@ -1776,7 +1817,7 @@ class Sphere(FSCommandOpenMP): >>> from nipype.interfaces.freesurfer import Sphere >>> sphere = Sphere() >>> sphere.inputs.in_file = 'lh.pial' - >>> sphere.cmdline # doctest: +ALLOW_UNICODE + >>> sphere.cmdline 'mris_sphere lh.pial lh.sphere' """ _cmd = 'mris_sphere' @@ -1900,7 +1941,7 @@ class EulerNumber(FSCommand): >>> from nipype.interfaces.freesurfer import EulerNumber >>> ft = EulerNumber() >>> ft.inputs.in_file = 'lh.pial' - >>> ft.cmdline # doctest: +ALLOW_UNICODE + >>> ft.cmdline 'mris_euler_number lh.pial' """ _cmd = 'mris_euler_number' @@ -1936,7 +1977,7 @@ class RemoveIntersection(FSCommand): >>> from nipype.interfaces.freesurfer import RemoveIntersection >>> ri = RemoveIntersection() >>> ri.inputs.in_file = 'lh.pial' - >>> ri.cmdline # doctest: +ALLOW_UNICODE + >>> ri.cmdline 'mris_remove_intersection lh.pial lh.pial' """ @@ -2032,7 +2073,7 @@ class MakeSurfaces(FSCommand): >>> makesurfaces.inputs.in_label = 'aparc+aseg.nii' >>> makesurfaces.inputs.in_T1 = 'T1.mgz' >>> makesurfaces.inputs.orig_pial = 'lh.pial' - >>> makesurfaces.cmdline # doctest: +ALLOW_UNICODE + >>> makesurfaces.cmdline 'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh' """ @@ -2165,7 +2206,7 @@ class Curvature(FSCommand): >>> curv = Curvature() >>> curv.inputs.in_file = 'lh.pial' >>> curv.inputs.save = True - >>> curv.cmdline # doctest: +ALLOW_UNICODE + >>> curv.cmdline 'mris_curvature -w lh.pial' """ @@ -2259,7 +2300,7 @@ class CurvatureStats(FSCommand): >>> curvstats.inputs.values = True >>> curvstats.inputs.min_max = True >>> curvstats.inputs.write = True - >>> curvstats.cmdline # doctest: +ALLOW_UNICODE + >>> curvstats.cmdline 'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial' """ @@ -2316,7 +2357,7 @@ class Jacobian(FSCommand): >>> jacobian = Jacobian() >>> jacobian.inputs.in_origsurf = 'lh.pial' >>> jacobian.inputs.in_mappedsurf = 'lh.pial' - >>> jacobian.cmdline # doctest: +ALLOW_UNICODE + >>> jacobian.cmdline 'mris_jacobian lh.pial lh.pial lh.jacobian' """ @@ -2453,7 +2494,7 @@ class VolumeMask(FSCommand): >>> volmask.inputs.rh_white = 'lh.pial' >>> volmask.inputs.subject_id = '10335' >>> volmask.inputs.save_ribbon = True - >>> volmask.cmdline # doctest: +ALLOW_UNICODE + >>> volmask.cmdline 'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335' """ @@ -2793,7 +2834,7 @@ class RelabelHypointensities(FSCommand): >>> relabelhypos.inputs.rh_white = 'lh.pial' >>> relabelhypos.inputs.surf_directory = '.' >>> relabelhypos.inputs.aseg = 'aseg.mgz' - >>> relabelhypos.cmdline # doctest: +ALLOW_UNICODE + >>> relabelhypos.cmdline 'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz' """ @@ -2964,7 +3005,7 @@ class Apas2Aseg(FSCommand): >>> apas2aseg = Apas2Aseg() >>> apas2aseg.inputs.in_file = 'aseg.mgz' >>> apas2aseg.inputs.out_file = 'output.mgz' - >>> apas2aseg.cmdline # doctest: +ALLOW_UNICODE + >>> apas2aseg.cmdline 'apas2aseg --i aseg.mgz --o output.mgz' """ @@ -3046,10 +3087,10 @@ class MRIsExpand(FSSurfaceCommand): >>> from nipype.interfaces.freesurfer import MRIsExpand >>> mris_expand = MRIsExpand(thickness=True, distance=0.5) >>> mris_expand.inputs.in_file = 'lh.white' - >>> mris_expand.cmdline # doctest: +ALLOW_UNICODE + >>> mris_expand.cmdline 'mris_expand -thickness lh.white 0.5 expanded' >>> mris_expand.inputs.out_name = 'graymid' - >>> mris_expand.cmdline # doctest: +ALLOW_UNICODE + >>> mris_expand.cmdline 'mris_expand -thickness lh.white 0.5 graymid' """ _cmd = 'mris_expand' @@ -3080,3 +3121,83 @@ def _normalize_filenames(self): thickness_name) self.inputs.sphere = self._associated_file(in_file, self.inputs.sphere) + + +class LTAConvertInputSpec(CommandLineInputSpec): + # Inputs + _in_xor = ('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk') + in_lta = traits.Either( + File(exists=True), 'identity.nofile', argstr='--inlta %s', + mandatory=True, xor=_in_xor, desc='input transform of LTA type') + in_fsl = File( + exists=True, argstr='--infsl %s', mandatory=True, xor=_in_xor, + desc='input transform of FSL type') + in_mni = File( + exists=True, argstr='--inmni %s', mandatory=True, xor=_in_xor, + desc='input transform of MNI/XFM type') + in_reg = File( + exists=True, argstr='--inreg %s', mandatory=True, xor=_in_xor, + desc='input transform of TK REG type (deprecated format)') + in_niftyreg = File( + exists=True, argstr='--inniftyreg %s', mandatory=True, xor=_in_xor, + desc='input transform of Nifty Reg type (inverse RAS2RAS)') + in_itk = File( + exists=True, argstr='--initk %s', mandatory=True, xor=_in_xor, + desc='input transform of ITK type') + # Outputs + out_lta = traits.Either( + traits.Bool, File, argstr='--outlta %s', + desc='output linear transform (LTA Freesurfer format)') + out_fsl = traits.Either(traits.Bool, File, argstr='--outfsl %s', + desc='output transform in FSL format') + out_mni = traits.Either(traits.Bool, File, argstr='--outmni %s', + desc='output transform in MNI/XFM format') + out_reg = traits.Either(traits.Bool, File, argstr='--outreg %s', + desc='output transform in reg dat format') + out_itk = traits.Either(traits.Bool, File, argstr='--outitk %s', + desc='output transform in ITK format') + # Optional flags + invert = traits.Bool(argstr='--invert') + ltavox2vox = traits.Bool(argstr='--ltavox2vox', requires=['out_lta']) + source_file = File(exists=True, argstr='--src %s') + target_file = File(exists=True, argstr='--trg %s') + target_conform = traits.Bool(argstr='--trgconform') + + +class LTAConvertOutputSpec(TraitedSpec): + out_lta = File(exists=True, + desc='output linear transform (LTA Freesurfer format)') + out_fsl = File(exists=True, desc='output transform in FSL format') + out_mni = File(exists=True, desc='output transform in MNI/XFM format') + out_reg = File(exists=True, desc='output transform in reg dat format') + out_itk = File(exists=True, desc='output transform in ITK format') + + +class LTAConvert(CommandLine): + """Convert different transformation formats. + Some formats may require you to pass an image if the geometry information + is missing form the transform file format. + + For complete details, see the `lta_convert documentation. + `_ + """ + input_spec = LTAConvertInputSpec + output_spec = LTAConvertOutputSpec + _cmd = 'lta_convert' + + def _format_arg(self, name, spec, value): + if name.startswith('out_') and value is True: + value = self._list_outputs()[name] + return super(LTAConvert, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + for name, default in (('out_lta', 'out.lta'), ('out_fsl', 'out.mat'), + ('out_mni', 'out.xfm'), ('out_reg', 'out.dat'), + ('out_itk', 'out.txt')): + attr = getattr(self.inputs, name) + if attr: + fname = default if attr is True else attr + outputs[name] = os.path.abspath(fname) + + return outputs diff --git a/nipype/interfaces/fsl/__init__.py b/nipype/interfaces/fsl/__init__.py index 29b99ecb1b..9524a731d6 100644 --- a/nipype/interfaces/fsl/__init__.py +++ b/nipype/interfaces/fsl/__init__.py @@ -18,7 +18,9 @@ PlotTimeSeries, PlotMotionParams, ConvertXFM, SwapDimensions, PowerSpectrum, Reorient2Std, Complex, InvWarp, WarpUtils, ConvertWarp, WarpPoints, - WarpPointsToStd, RobustFOV, CopyGeom, MotionOutliers) + WarpPointsToStd, WarpPointsFromStd, RobustFOV, + CopyGeom, MotionOutliers) + from .epi import (PrepareFieldmap, TOPUP, ApplyTOPUP, Eddy, EPIDeWarp, SigLoss, EddyCorrect, EpiReg) from .dti import (BEDPOSTX, XFibres, DTIFit, @@ -33,3 +35,4 @@ from .possum import B0Calc from .fix import (AccuracyTester, Classifier, Cleaner, FeatureExtractor, Training, TrainingSetCreator) +from .aroma import ICA_AROMA diff --git a/nipype/interfaces/fsl/ICA_AROMA.py b/nipype/interfaces/fsl/aroma.py similarity index 96% rename from nipype/interfaces/fsl/ICA_AROMA.py rename to nipype/interfaces/fsl/aroma.py index 7129602d65..fb8dc82bd8 100644 --- a/nipype/interfaces/fsl/ICA_AROMA.py +++ b/nipype/interfaces/fsl/aroma.py @@ -13,7 +13,7 @@ from __future__ import print_function, division, unicode_literals, absolute_import from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, - File, Directory, traits) + File, Directory, traits, isdefined) import os @@ -55,7 +55,7 @@ class ICA_AROMAInputSpec(CommandLineInputSpec): denoise_type = traits.Enum('nonaggr', 'aggr', 'both', 'no', usedefault=True, mandatory=True, argstr='-den %s', desc='Type of denoising strategy:\n' - '-none: only classification, no denoising\n' + '-no: only classification, no denoising\n' '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\n' '-aggr: aggressive denoising, i.e. full component regression\n' '-both: both aggressive and non-aggressive denoising (two outputs)') @@ -87,7 +87,7 @@ class ICA_AROMA(CommandLine): >>> from nipype.interfaces.fsl import ICA_AROMA >>> from nipype.testing import example_data - >>> AROMA_obj = ICA_AROMA.ICA_AROMA() + >>> AROMA_obj = ICA_AROMA() >>> AROMA_obj.inputs.in_file = 'functional.nii' >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat' >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii' @@ -95,7 +95,7 @@ class ICA_AROMA(CommandLine): >>> AROMA_obj.inputs.mask = 'mask.nii.gz' >>> AROMA_obj.inputs.denoise_type = 'both' >>> AROMA_obj.inputs.out_dir = 'ICA_testout' - >>> AROMA_obj.cmdline # doctest: +ALLOW_UNICODE + >>> AROMA_obj.cmdline 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o ICA_testout' """ _cmd = 'ICA_AROMA.py' @@ -109,7 +109,7 @@ def _list_outputs(self): else: outputs['out_dir'] = self._gen_filename('out_dir') out_dir = outputs['out_dir'] - + if self.inputs.denoise_type in ('aggr', 'both'): outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz') if self.inputs.denoise_type in ('nonaggr', 'both'): @@ -119,5 +119,3 @@ def _list_outputs(self): def _gen_filename(self, name): if name == 'out_dir': return os.getcwd() - - diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py index f5353f2b06..2bb120e097 100644 --- a/nipype/interfaces/fsl/base.py +++ b/nipype/interfaces/fsl/base.py @@ -26,27 +26,33 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import open, object from glob import glob import os from ... import logging from ...utils.filemanip import fname_presuffix -from ..base import traits, isdefined, CommandLine, CommandLineInputSpec +from ..base import traits, isdefined, CommandLine, CommandLineInputSpec, PackageInfo from ...external.due import BibTeX -LOGGER = logging.getLogger('interface') +IFLOGGER = logging.getLogger('interface') -class Info(object): - """Handle fsl output type and version information. - - version refers to the version of fsl on the system +class Info(PackageInfo): + """ + Handle FSL ``output_type`` and version information. output type refers to the type of file fsl defaults to writing eg, NIFTI, NIFTI_GZ + Examples + -------- + + >>> from nipype.interfaces.fsl import Info + >>> Info.version() # doctest: +SKIP + >>> Info.output_type() # doctest: +SKIP + + """ ftypes = {'NIFTI': '.nii', @@ -54,28 +60,13 @@ class Info(object): 'NIFTI_GZ': '.nii.gz', 'NIFTI_PAIR_GZ': '.img.gz'} - @staticmethod - def version(): - """Check for fsl version on system - - Parameters - ---------- - None - - Returns - ------- - version : str - Version number as string or None if FSL not found + if os.getenv('FSLDIR'): + version_file = os.path.join( + os.getenv('FSLDIR'), 'etc', 'fslversion') - """ - # find which fsl being used....and get version from - # /path/to/fsl/etc/fslversion - try: - basedir = os.environ['FSLDIR'] - except KeyError: - return None - out = open('%s/etc/fslversion' % (basedir)).read() - return out.strip('\n') + @staticmethod + def parse_version(raw_info): + return raw_info.splitlines()[0] @classmethod def output_type_to_ext(cls, output_type): @@ -113,8 +104,8 @@ def output_type(cls): try: return os.environ['FSLOUTPUTTYPE'] except KeyError: - LOGGER.warn('FSLOUTPUTTYPE environment variable is not set. ' - 'Setting FSLOUTPUTTYPE=NIFTI') + IFLOGGER.warn('FSLOUTPUTTYPE environment variable is not set. ' + 'Setting FSLOUTPUTTYPE=NIFTI') return 'NIFTI' @staticmethod diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py index c514bd95f0..812515fcda 100644 --- a/nipype/interfaces/fsl/dti.py +++ b/nipype/interfaces/fsl/dti.py @@ -85,7 +85,7 @@ class DTIFit(FSLCommand): >>> dti.inputs.bvals = 'bvals' >>> dti.inputs.base_name = 'TP' >>> dti.inputs.mask = 'mask.nii' - >>> dti.cmdline # doctest: +ALLOW_UNICODE + >>> dti.cmdline 'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals' """ @@ -327,7 +327,7 @@ class BEDPOSTX5(FSLXCommand): >>> from nipype.interfaces import fsl >>> bedp = fsl.BEDPOSTX5(bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', ... mask='mask.nii', n_fibres=1) - >>> bedp.cmdline # doctest: +ALLOW_UNICODE + >>> bedp.cmdline 'bedpostx bedpostx --forcedir -n 1' """ @@ -583,7 +583,7 @@ class ProbTrackX(FSLCommand): target_masks = ['targets_MASK1.nii', 'targets_MASK2.nii'], \ thsamples='merged_thsamples.nii', fsamples='merged_fsamples.nii', phsamples='merged_phsamples.nii', \ out_dir='.') - >>> pbx.cmdline # doctest: +ALLOW_UNICODE + >>> pbx.cmdline 'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --samples=merged --seed=MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat' """ @@ -780,7 +780,7 @@ class ProbTrackX2(ProbTrackX): >>> pbx2.inputs.out_dir = '.' >>> pbx2.inputs.n_samples = 3 >>> pbx2.inputs.n_steps = 10 - >>> pbx2.cmdline # doctest: +ALLOW_UNICODE + >>> pbx2.cmdline 'probtrackx2 --forcedir -m nodif_brain_mask.nii.gz --nsamples=3 --nsteps=10 --opd --dir=. --samples=merged --seed=seed_source.nii.gz' """ _cmd = 'probtrackx2' @@ -795,9 +795,11 @@ def _list_outputs(self): else: out_dir = self.inputs.out_dir + outputs['way_total'] = os.path.abspath(os.path.join(out_dir, 'waytotal')) + if isdefined(self.inputs.omatrix1): outputs['network_matrix'] = os.path.abspath( - os.path.join(out_dir, 'fdt_network_matrix')) + os.path.join(out_dir, 'matrix_seeds_to_all_targets')) outputs['matrix1_dot'] = os.path.abspath( os.path.join(out_dir, 'fdt_matrix1.dot')) @@ -869,7 +871,7 @@ class VecReg(FSLCommand): affine_mat='trans.mat', \ ref_vol='mni.nii', \ out_file='diffusion_vreg.nii') - >>> vreg.cmdline # doctest: +ALLOW_UNICODE + >>> vreg.cmdline 'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii' """ @@ -930,7 +932,7 @@ class ProjThresh(FSLCommand): >>> from nipype.interfaces import fsl >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] >>> pThresh = fsl.ProjThresh(in_files=ldir, threshold=3) - >>> pThresh.cmdline # doctest: +ALLOW_UNICODE + >>> pThresh.cmdline 'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3' """ @@ -978,7 +980,7 @@ class FindTheBiggest(FSLCommand): >>> from nipype.interfaces import fsl >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] >>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation') - >>> fBig.cmdline # doctest: +ALLOW_UNICODE + >>> fBig.cmdline 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' """ diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 1f4a7ded1a..0beb60b3c0 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -71,7 +71,7 @@ class PrepareFieldmap(FSLCommand): >>> prepare.inputs.in_phase = "phase.nii" >>> prepare.inputs.in_magnitude = "magnitude.nii" >>> prepare.inputs.output_type = "NIFTI_GZ" - >>> prepare.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> prepare.cmdline # doctest: +ELLIPSIS 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii \ .../phase_fslprepared.nii.gz 2.460000' >>> res = prepare.run() # doctest: +SKIP @@ -143,6 +143,9 @@ class TOPUPInputSpec(FSLCommandInputSpec): out_warp_prefix = traits.Str("warpfield", argstr='--dfout=%s', hash_files=False, desc='prefix for the warpfield images (in mm)', usedefault=True) + out_mat_prefix = traits.Str("xfm", argstr='--rbmout=%s', hash_files=False, + desc='prefix for the realignment matrices', + usedefault=True) out_jac_prefix = traits.Str("jac", argstr='--jacout=%s', hash_files=False, desc='prefix for the warpfield images', @@ -221,6 +224,7 @@ class TOPUPOutputSpec(TraitedSpec): out_field = File(desc='name of image file with field (Hz)') out_warps = traits.List(File(exists=True), desc='warpfield images') out_jacs = traits.List(File(exists=True), desc='Jacobian images') + out_mats = traits.List(File(exists=True), desc='realignment matrices') out_corrected = File(desc='name of 4D image file with unwarped images') out_logfile = File(desc='name of log-file') @@ -243,11 +247,11 @@ class TOPUP(FSLCommand): >>> topup.inputs.in_file = "b0_b0rev.nii" >>> topup.inputs.encoding_file = "topup_encoding.txt" >>> topup.inputs.output_type = "NIFTI_GZ" - >>> topup.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> topup.cmdline # doctest: +ELLIPSIS 'topup --config=b02b0.cnf --datain=topup_encoding.txt \ --imain=b0_b0rev.nii --out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz \ --fout=b0_b0rev_field.nii.gz --jacout=jac --logout=b0_b0rev_topup.log \ ---dfout=warpfield' +--rbmout=xfm --dfout=warpfield' >>> res = topup.run() # doctest: +SKIP """ @@ -289,6 +293,9 @@ def _list_outputs(self): outputs['out_jacs'] = [ fmt(prefix=self.inputs.out_jac_prefix, i=i, ext=ext) for i in range(1, n_vols + 1)] + outputs['out_mats'] = [ + fmt(prefix=self.inputs.out_mat_prefix, i=i, ext=".mat") + for i in range(1, n_vols + 1)] if isdefined(self.inputs.encoding_direction): outputs['out_enc_file'] = self._get_encfilename() @@ -382,7 +389,7 @@ class ApplyTOPUP(FSLCommand): >>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz" >>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt" >>> applytopup.inputs.output_type = "NIFTI_GZ" - >>> applytopup.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> applytopup.cmdline # doctest: +ELLIPSIS 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii \ --inindex=1,2 --topup=topup --out=epi_corrected.nii.gz' >>> res = applytopup.run() # doctest: +SKIP @@ -538,12 +545,12 @@ class Eddy(FSLCommand): >>> eddy.inputs.in_bvec = 'bvecs.scheme' >>> eddy.inputs.in_bval = 'bvals.scheme' >>> eddy.inputs.use_cuda = True - >>> eddy.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> eddy.cmdline # doctest: +ELLIPSIS 'eddy_cuda --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme \ --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii \ --out=.../eddy_corrected' >>> eddy.inputs.use_cuda = False - >>> eddy.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> eddy.cmdline # doctest: +ELLIPSIS 'eddy_openmp --acqp=epi_acqp.txt --bvals=bvals.scheme \ --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ --mask=epi_mask.nii --out=.../eddy_corrected' @@ -585,7 +592,7 @@ def _run_interface(self, runtime): cmd = self._cmd if all((FSLDIR != '', cmd == 'eddy_openmp', - not os.path.exists(os.path.join(FSLDIR, cmd)))): + not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))): self._cmd = 'eddy' runtime = super(Eddy, self)._run_interface(runtime) @@ -672,7 +679,7 @@ class SigLoss(FSLCommand): >>> sigloss.inputs.in_file = "phase.nii" >>> sigloss.inputs.echo_time = 0.03 >>> sigloss.inputs.output_type = "NIFTI_GZ" - >>> sigloss.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> sigloss.cmdline # doctest: +ELLIPSIS 'sigloss --te=0.030000 -i phase.nii -s .../phase_sigloss.nii.gz' >>> res = sigloss.run() # doctest: +SKIP @@ -777,7 +784,7 @@ class EpiReg(FSLCommand): >>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii' >>> epireg.inputs.echospacing=0.00067 >>> epireg.inputs.pedir='y' - >>> epireg.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> epireg.cmdline # doctest: +ELLIPSIS 'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii \ --fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --noclean \ --pedir=y --epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct' @@ -888,7 +895,7 @@ class EPIDeWarp(FSLCommand): >>> dewarp.inputs.mag_file = "magnitude.nii" >>> dewarp.inputs.dph_file = "phase.nii" >>> dewarp.inputs.output_type = "NIFTI_GZ" - >>> dewarp.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> dewarp.cmdline # doctest: +ELLIPSIS 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii \ --esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 \ --tmpdir .../temp --vsm .../vsm.nii.gz' @@ -981,7 +988,7 @@ class EddyCorrect(FSLCommand): >>> from nipype.interfaces.fsl import EddyCorrect >>> eddyc = EddyCorrect(in_file='diffusion.nii', ... out_file="diffusion_edc.nii", ref_num=0) - >>> eddyc.cmdline # doctest: +ALLOW_UNICODE + >>> eddyc.cmdline 'eddy_correct diffusion.nii diffusion_edc.nii 0' """ diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py index 0775cf62c5..cd4aacfedb 100644 --- a/nipype/interfaces/fsl/fix.py +++ b/nipype/interfaces/fsl/fix.py @@ -64,21 +64,24 @@ OutputMultiPath, BaseInterface, BaseInterfaceInputSpec, - traits + traits, + Directory, + File, + isdefined ) -from ..traits_extension import Directory, File, isdefined import os class TrainingSetCreatorInputSpec(BaseInterfaceInputSpec): mel_icas_in = InputMultiPath(Directory(exists=True), copyfile=False, - desc='Melodic output directories', - argstr='%s', position=-1) + desc='Melodic output directories', + argstr='%s', position=-1) + class TrainingSetCreatorOutputSpec(TraitedSpec): mel_icas_out = OutputMultiPath(Directory(exists=True), copyfile=False, - desc='Hand labels for noise vs signal', - argstr='%s', position=-1) + desc='Hand labels for noise vs signal', + argstr='%s', position=-1) class TrainingSetCreator(BaseInterface): diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py index 588f7caf95..f403c5c402 100644 --- a/nipype/interfaces/fsl/maths.py +++ b/nipype/interfaces/fsl/maths.py @@ -439,7 +439,7 @@ class MultiImageMaths(MathsCommand): >>> maths.inputs.op_string = "-add %s -mul -1 -div %s" >>> maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] >>> maths.inputs.out_file = "functional4.nii" - >>> maths.cmdline # doctest: +ALLOW_UNICODE + >>> maths.cmdline 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii' """ diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index f45f6d62fb..0dbf7c6bd1 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -17,6 +17,7 @@ import os from glob import glob from shutil import rmtree +from string import Template import numpy as np from nibabel import load @@ -25,12 +26,13 @@ from ...utils.filemanip import list_to_filename, filename_to_list from ...utils.misc import human_order_sorted from ...external.due import BibTeX -from ..base import (load_template, File, traits, isdefined, +from ..base import (File, traits, isdefined, TraitedSpec, BaseInterface, Directory, InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec) from .base import FSLCommand, FSLCommandInputSpec, Info + class Level1DesignInputSpec(BaseInterfaceInputSpec): interscan_interval = traits.Float(mandatory=True, desc='Interscan interval (in secs)') @@ -934,7 +936,7 @@ class FLAMEO(FSLCommand): >>> flameo.inputs.t_con_file = 'design.con' >>> flameo.inputs.mask_file = 'mask.nii' >>> flameo.inputs.run_mode = 'fe' - >>> flameo.cmdline # doctest: +ALLOW_UNICODE + >>> flameo.cmdline 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' """ @@ -1601,7 +1603,7 @@ class MELODIC(FSLCommand): >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' >>> melodic_setup.inputs.s_con = 'subjectDesign.con' >>> melodic_setup.inputs.out_dir = 'groupICA.out' - >>> melodic_setup.cmdline # doctest: +ALLOW_UNICODE + >>> melodic_setup.cmdline 'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000' >>> melodic_setup.run() # doctest: +SKIP @@ -1657,7 +1659,7 @@ class SmoothEstimate(FSLCommand): >>> est = SmoothEstimate() >>> est.inputs.zstat_file = 'zstat1.nii.gz' >>> est.inputs.mask_file = 'mask.nii' - >>> est.cmdline # doctest: +ALLOW_UNICODE + >>> est.cmdline 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' """ @@ -1773,7 +1775,7 @@ class Cluster(FSLCommand): >>> cl.inputs.in_file = 'zstat1.nii.gz' >>> cl.inputs.out_localmax_txt_file = 'stats.txt' >>> cl.inputs.use_mm = True - >>> cl.cmdline # doctest: +ALLOW_UNICODE + >>> cl.cmdline 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000 --mm' """ @@ -1815,11 +1817,77 @@ def _format_arg(self, name, spec, value): return super(Cluster, self)._format_arg(name, spec, value) +class DualRegressionInputSpec(FSLCommandInputSpec): + in_files = InputMultiPath(File(exists=True), argstr="%s", mandatory=True, + position=-1, sep=" ", + desc="List all subjects' preprocessed, standard-space 4D datasets",) + group_IC_maps_4D = File(exists=True, argstr="%s", mandatory=True, position=1, + desc="4D image containing spatial IC maps (melodic_IC) from the " + "whole-group ICA analysis") + des_norm = traits.Bool(True, argstr="%i", position=2, usedefault=True, + desc="Whether to variance-normalise the timecourses used as the " + "stage-2 regressors; True is default and recommended") + one_sample_group_mean = traits.Bool(argstr="-1", position=3, + desc="perform 1-sample group-mean test instead of generic " + "permutation test") + design_file = File(exists=True, argstr="%s", position=3, + desc="Design matrix for final cross-subject modelling with " + "randomise") + con_file = File(exists=True, argstr="%s", position=4, + desc="Design contrasts for final cross-subject modelling with " + "randomise") + n_perm = traits.Int(argstr="%i", mandatory=True, position=5, + desc="Number of permutations for randomise; set to 1 for just raw " + "tstat output, set to 0 to not run randomise at all.") + out_dir = Directory("output", argstr="%s", usedefault=True, position=6, + desc="This directory will be created to hold all output and logfiles", + genfile=True) + + +class DualRegressionOutputSpec(TraitedSpec): + out_dir = Directory(exists=True) + + +class DualRegression(FSLCommand): + """Wrapper Script for Dual Regression Workflow + + Examples + -------- + + >>> dual_regression = DualRegression() + >>> dual_regression.inputs.in_files = ["functional.nii", "functional2.nii", "functional3.nii"] + >>> dual_regression.inputs.group_IC_maps_4D = "allFA.nii" + >>> dual_regression.inputs.des_norm = False + >>> dual_regression.inputs.one_sample_group_mean = True + >>> dual_regression.inputs.n_perm = 10 + >>> dual_regression.inputs.out_dir = "my_output_directory" + >>> dual_regression.cmdline + 'dual_regression allFA.nii 0 -1 10 my_output_directory functional.nii functional2.nii functional3.nii' + >>> dual_regression.run() # doctest: +SKIP + + """ + input_spec = DualRegressionInputSpec + output_spec = DualRegressionOutputSpec + _cmd = 'dual_regression' + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_dir): + outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + else: + outputs['out_dir'] = self._gen_filename("out_dir") + return outputs + + def _gen_filename(self, name): + if name == "out_dir": + return os.getcwd() + + class RandomiseInputSpec(FSLCommandInputSpec): in_file = File(exists=True, desc='4D input file', argstr='-i %s', position=0, mandatory=True) base_name = traits.Str( - 'tbss_', desc='the rootname that all generated files will have', + 'randomise', desc='the rootname that all generated files will have', argstr='-o "%s"', position=1, usedefault=True) design_mat = File( exists=True, desc='design matrix file', argstr='-d %s', position=2) @@ -1866,9 +1934,9 @@ class RandomiseInputSpec(FSLCommandInputSpec): var_smooth = traits.Int( argstr='-v %d', desc='use variance smoothing (std is in mm)') c_thresh = traits.Float( - argstr='-c %.2f', desc='carry out cluster-based thresholding') + argstr='-c %.1f', desc='carry out cluster-based thresholding') cm_thresh = traits.Float( - argstr='-C %.2f', desc='carry out cluster-mass-based thresholding') + argstr='-C %.1f', desc='carry out cluster-mass-based thresholding') f_c_thresh = traits.Float( argstr='-F %.2f', desc='carry out f cluster thresholding') f_cm_thresh = traits.Float( @@ -1911,8 +1979,8 @@ class Randomise(FSLCommand): ------- >>> import nipype.interfaces.fsl as fsl >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') - >>> rand.cmdline # doctest: +ALLOW_UNICODE - 'randomise -i allFA.nii -o "tbss_" -d design.mat -t design.con -m mask.nii' + >>> rand.cmdline + 'randomise -i allFA.nii -o "randomise" -d design.mat -t design.con -m mask.nii' """ @@ -2056,7 +2124,7 @@ class GLM(FSLCommand): ------- >>> import nipype.interfaces.fsl as fsl >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') - >>> glm.cmdline # doctest: +ALLOW_UNICODE + >>> glm.cmdline 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' """ @@ -2102,3 +2170,25 @@ def _list_outputs(self): self.inputs.out_vnscales_name) return outputs + + +def load_template(name): + """Load a template from the model_templates directory + + Parameters + ---------- + name : str + The name of the file to load + + Returns + ------- + template : string.Template + + """ + from pkg_resources import resource_filename as pkgrf + full_fname = pkgrf( + 'nipype', os.path.join('interfaces', 'fsl', 'model_templates', name)) + with open(full_fname) as template_file: + template = Template(template_file.read()) + + return template diff --git a/nipype/interfaces/script_templates/feat_contrast_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrast_element.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl diff --git a/nipype/interfaces/script_templates/feat_contrast_ftest_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrast_ftest_element.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl diff --git a/nipype/interfaces/script_templates/feat_contrast_header.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrast_header.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl diff --git a/nipype/interfaces/script_templates/feat_contrast_prolog.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrast_prolog.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl diff --git a/nipype/interfaces/script_templates/feat_contrastmask_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrastmask_element.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl diff --git a/nipype/interfaces/script_templates/feat_contrastmask_footer.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrastmask_footer.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl diff --git a/nipype/interfaces/script_templates/feat_contrastmask_header.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrastmask_header.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl diff --git a/nipype/interfaces/script_templates/feat_contrasts.tcl b/nipype/interfaces/fsl/model_templates/feat_contrasts.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_contrasts.tcl rename to nipype/interfaces/fsl/model_templates/feat_contrasts.tcl diff --git a/nipype/interfaces/script_templates/feat_ev_custom.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_ev_custom.tcl rename to nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl diff --git a/nipype/interfaces/script_templates/feat_ev_gamma.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_ev_gamma.tcl rename to nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl diff --git a/nipype/interfaces/script_templates/feat_ev_hrf.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_ev_hrf.tcl rename to nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl diff --git a/nipype/interfaces/script_templates/feat_ev_none.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_none.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_ev_none.tcl rename to nipype/interfaces/fsl/model_templates/feat_ev_none.tcl diff --git a/nipype/interfaces/script_templates/feat_ev_ortho.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_ev_ortho.tcl rename to nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_copes.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_fe_copes.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_ev_element.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_fe_ev_element.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_ev_header.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_fe_ev_header.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_featdirs.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_fe_featdirs.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_footer.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_fe_footer.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl diff --git a/nipype/interfaces/script_templates/feat_fe_header.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_header.tcl similarity index 99% rename from nipype/interfaces/script_templates/feat_fe_header.tcl rename to nipype/interfaces/fsl/model_templates/feat_fe_header.tcl index 1caee22a46..4d4d1939fa 100644 --- a/nipype/interfaces/script_templates/feat_fe_header.tcl +++ b/nipype/interfaces/fsl/model_templates/feat_fe_header.tcl @@ -259,7 +259,7 @@ set fmri(regstandard_dof) 12 set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution -set fmri(regstandard_nonlinear_warpres) 10 +set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 diff --git a/nipype/interfaces/script_templates/feat_header.tcl b/nipype/interfaces/fsl/model_templates/feat_header.tcl similarity index 97% rename from nipype/interfaces/script_templates/feat_header.tcl rename to nipype/interfaces/fsl/model_templates/feat_header.tcl index 6f5d895129..806d50c517 100644 --- a/nipype/interfaces/script_templates/feat_header.tcl +++ b/nipype/interfaces/fsl/model_templates/feat_header.tcl @@ -17,7 +17,7 @@ set fmri(level) 1 # 2 : Stats # 6 : Stats + Post-stats # 4 : Post-stats -set fmri(analysis) $analysis_stages +set fmri(analysis) $analysis_stages # Use relative filenames set fmri(relative_yn) 0 @@ -57,7 +57,7 @@ set fmri(inputtype) 1 # Carry out pre-stats processing? set fmri(filtering_yn) 0 -# Brain/background threshold, +# Brain/background threshold, set fmri(brain_thresh) 10 # Critical z for design efficiency calculation @@ -157,7 +157,7 @@ set fmri(evs_real) $num_evs set fmri(evs_vox) 0 # Number of contrasts -set fmri(ncon_orig) $num_contrasts +set fmri(ncon_orig) $num_contrasts set fmri(ncon_real) $num_contrasts # Number of F-tests @@ -259,7 +259,7 @@ set fmri(regstandard_dof) 12 set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution -set fmri(regstandard_nonlinear_warpres) 10 +set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 diff --git a/nipype/interfaces/script_templates/feat_header_l1.tcl b/nipype/interfaces/fsl/model_templates/feat_header_l1.tcl similarity index 98% rename from nipype/interfaces/script_templates/feat_header_l1.tcl rename to nipype/interfaces/fsl/model_templates/feat_header_l1.tcl index b810b272bf..fc63166cd5 100644 --- a/nipype/interfaces/script_templates/feat_header_l1.tcl +++ b/nipype/interfaces/fsl/model_templates/feat_header_l1.tcl @@ -57,7 +57,7 @@ set fmri(inputtype) 2 # Carry out pre-stats processing? set fmri(filtering_yn) 0 -# Brain/background threshold, +# Brain/background threshold, set fmri(brain_thresh) 10 # Critical z for design efficiency calculation @@ -259,7 +259,7 @@ set fmri(regstandard_dof) 0 set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution -set fmri(regstandard_nonlinear_warpres) 10 +set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) $high_pass_filter_cutoff diff --git a/nipype/interfaces/script_templates/feat_nongui.tcl b/nipype/interfaces/fsl/model_templates/feat_nongui.tcl similarity index 100% rename from nipype/interfaces/script_templates/feat_nongui.tcl rename to nipype/interfaces/fsl/model_templates/feat_nongui.tcl diff --git a/nipype/interfaces/script_templates/featreg_header.tcl b/nipype/interfaces/fsl/model_templates/featreg_header.tcl similarity index 99% rename from nipype/interfaces/script_templates/featreg_header.tcl rename to nipype/interfaces/fsl/model_templates/featreg_header.tcl index c0b0170819..a73b17bb44 100644 --- a/nipype/interfaces/script_templates/featreg_header.tcl +++ b/nipype/interfaces/fsl/model_templates/featreg_header.tcl @@ -259,7 +259,7 @@ set fmri(regstandard_dof) $regdof set fmri(regstandard_nonlinear_yn) 0 # Control nonlinear warp field resolution -set fmri(regstandard_nonlinear_warpres) 10 +set fmri(regstandard_nonlinear_warpres) 10 # High pass filter cutoff set fmri(paradigm_hp) 100 diff --git a/nipype/interfaces/fsl/possum.py b/nipype/interfaces/fsl/possum.py index 20efefbf2c..1c2b10e1d2 100644 --- a/nipype/interfaces/fsl/possum.py +++ b/nipype/interfaces/fsl/possum.py @@ -80,7 +80,7 @@ class B0Calc(FSLCommand): >>> b0calc.inputs.in_file = 'tissue+air_map.nii' >>> b0calc.inputs.z_b0 = 3.0 >>> b0calc.inputs.output_type = "NIFTI_GZ" - >>> b0calc.cmdline # doctest: +ALLOW_UNICODE + >>> b0calc.cmdline 'b0calc -i tissue+air_map.nii -o tissue+air_map_b0field.nii.gz --b0=3.00' """ diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index 882611738e..4ffeead842 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -127,7 +127,7 @@ class BET(FSLCommand): >>> btr.inputs.in_file = 'structural.nii' >>> btr.inputs.frac = 0.7 >>> btr.inputs.out_file = 'brain_anat.nii' - >>> btr.cmdline # doctest: +ALLOW_UNICODE + >>> btr.cmdline 'bet structural.nii brain_anat.nii -f 0.70' >>> res = btr.run() # doctest: +SKIP @@ -298,7 +298,7 @@ class FAST(FSLCommand): >>> fastr = fsl.FAST() >>> fastr.inputs.in_files = 'structural.nii' >>> fastr.inputs.out_basename = 'fast_' - >>> fastr.cmdline # doctest: +ALLOW_UNICODE + >>> fastr.cmdline 'fast -o fast_ -S 1 structural.nii' >>> out = fastr.run() # doctest: +SKIP @@ -541,7 +541,7 @@ class FLIRT(FSLCommand): >>> flt.inputs.in_file = 'structural.nii' >>> flt.inputs.reference = 'mni.nii' >>> flt.inputs.output_type = "NIFTI_GZ" - >>> flt.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> flt.cmdline # doctest: +ELLIPSIS 'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' >>> res = flt.run() #doctest: +SKIP @@ -549,27 +549,30 @@ class FLIRT(FSLCommand): _cmd = 'flirt' input_spec = FLIRTInputSpec output_spec = FLIRTOutputSpec + _log_written = False def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = super(FLIRT, self).aggregate_outputs( runtime=runtime, needed_outputs=needed_outputs) - if isdefined(self.inputs.save_log) and self.inputs.save_log: + if self.inputs.save_log and not self._log_written: with open(outputs.out_log, "a") as text_file: text_file.write(runtime.stdout + '\n') + self._log_written = True return outputs def _parse_inputs(self, skip=None): - skip = [] - if isdefined(self.inputs.save_log) and self.inputs.save_log: - if not isdefined(self.inputs.verbose) or self.inputs.verbose == 0: - self.inputs.verbose = 1 - if isdefined(self.inputs.apply_xfm) and self.inputs.apply_xfm: - if not self.inputs.in_matrix_file and not self.inputs.uses_qform: - raise RuntimeError('Argument apply_xfm requires in_matrix_file ' - 'or uses_qform arguments to run') + if skip is None: + skip = [] + if self.inputs.save_log and not self.inputs.verbose: + self.inputs.verbose = 1 + if self.inputs.apply_xfm and not (self.inputs.in_matrix_file or + self.inputs.uses_qform): + raise RuntimeError('Argument apply_xfm requires in_matrix_file or ' + 'uses_qform arguments to run') skip.append('save_log') return super(FLIRT, self)._parse_inputs(skip=skip) + class ApplyXFMInputSpec(FLIRTInputSpec): apply_xfm = traits.Bool( True, argstr='-applyxfm', @@ -671,7 +674,7 @@ class MCFLIRT(FSLCommand): >>> mcflt.inputs.in_file = 'functional.nii' >>> mcflt.inputs.cost = 'mutualinfo' >>> mcflt.inputs.out_file = 'moco.nii' - >>> mcflt.cmdline # doctest: +ALLOW_UNICODE + >>> mcflt.cmdline 'mcflirt -in functional.nii -cost mutualinfo -out moco.nii' >>> res = mcflt.run() # doctest: +SKIP @@ -874,7 +877,7 @@ class FNIRTInputSpec(FSLCommandInputSpec): desc=('If true, ref image is used to calculate derivatives. ' 'Default false')) intensity_mapping_model = traits.Enum( - 'none', 'global_linear', 'global_non_linear' + 'none', 'global_linear', 'global_non_linear', 'local_linear', 'global_non_linear_with_bias', 'local_non_linear', argstr='--intmod=%s', desc='Model for intensity-mapping') @@ -1394,7 +1397,7 @@ class FUGUE(FSLCommand): >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.output_type = "NIFTI_GZ" - >>> fugue.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> fugue.cmdline # doctest: +ELLIPSIS 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz' >>> fugue.run() #doctest: +SKIP @@ -1409,7 +1412,7 @@ class FUGUE(FSLCommand): >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.output_type = "NIFTI_GZ" - >>> fugue.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> fugue.cmdline # doctest: +ELLIPSIS 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz' >>> fugue.run() #doctest: +SKIP @@ -1424,7 +1427,7 @@ class FUGUE(FSLCommand): >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.save_shift = True >>> fugue.inputs.output_type = "NIFTI_GZ" - >>> fugue.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> fugue.cmdline # doctest: +ELLIPSIS 'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y' >>> fugue.run() #doctest: +SKIP diff --git a/nipype/interfaces/fsl/tests/test_Level1Design_functions.py b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py index 56fdecd0b4..b7573f7454 100644 --- a/nipype/interfaces/fsl/tests/test_Level1Design_functions.py +++ b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py @@ -4,7 +4,8 @@ from ..model import Level1Design -def test_level1design(): +def test_level1design(tmpdir): + old = tmpdir.chdir() l = Level1Design() runinfo = dict(cond=[{'name': 'test_condition', 'onset': [0, 10], 'duration':[10, 10]}],regress=[]) diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py index 2c3eda86cb..e21844346a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -13,7 +13,8 @@ def test_AR1Image_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_AR1Image_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AR1Image.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py new file mode 100644 index 0000000000..dc5daa76c5 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import AccuracyTester + + +def test_AccuracyTester_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + mel_icas=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=3, + ), + output_directory=dict(argstr='%s', + mandatory=True, + position=2, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + trained_wts_file=dict(argstr='%s', + mandatory=True, + position=1, + ), + ) + inputs = AccuracyTester.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_AccuracyTester_outputs(): + output_map = dict(output_directory=dict(argstr='%s', + position=1, + ), + ) + outputs = AccuracyTester.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py index 0a74d811c3..f5bad0d6f2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -9,7 +9,8 @@ def test_ApplyMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_ApplyMask_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ApplyMask.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py index 7b08c18c28..c60fc9f5de 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -14,7 +14,8 @@ def test_ApplyTOPUP_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='--imain=%s', @@ -40,7 +41,8 @@ def test_ApplyTOPUP_inputs(): name_template='%s_corrected', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ApplyTOPUP.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py index bcdcc670ac..7056d1d363 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -16,7 +16,8 @@ def test_ApplyWarp_inputs(): ), field_file=dict(argstr='--warp=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -50,7 +51,8 @@ def test_ApplyWarp_inputs(): ), supersample=dict(argstr='--super', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ApplyWarp.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py index 438f4ce486..9e8b4d0877 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -54,7 +54,8 @@ def test_ApplyXFM_inputs(): ), force_scaling=dict(argstr='-forcescaling', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -128,7 +129,8 @@ def test_ApplyXFM_inputs(): ), sinc_window=dict(argstr='-sincwindow %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uses_qform=dict(argstr='-usesqform', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py index e19577b71f..eb582cc783 100644 --- a/nipype/interfaces/fsl/tests/test_auto_AvScale.py +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -11,7 +11,8 @@ def test_AvScale_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mat_file=dict(argstr='%s', @@ -20,7 +21,8 @@ def test_AvScale_inputs(): ref_file=dict(argstr='%s', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AvScale.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py index ee6b749d7e..a03dba1061 100644 --- a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -19,7 +19,8 @@ def test_B0Calc_inputs(): ), extendboundary=dict(argstr='--extendboundary=%0.2f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -33,7 +34,8 @@ def test_B0Calc_inputs(): position=1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), x_b0=dict(argstr='--b0x=%0.2f', xor=['xyz_b0'], diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py index 782c1a9317..7e57bdea55 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -39,7 +39,8 @@ def test_BEDPOSTX5_inputs(): grad_dev=dict(), gradnonlin=dict(argstr='-g', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), logdir=dict(argstr='--logdir=%s', @@ -75,7 +76,8 @@ def test_BEDPOSTX5_inputs(): ), seed=dict(argstr='--seed=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), update_proposal_every=dict(argstr='--updateproposalevery=%d', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py index 98af74707d..a0ba58c88a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BET.py +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -17,7 +17,8 @@ def test_BET_inputs(): functional=dict(argstr='-F', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -61,7 +62,8 @@ def test_BET_inputs(): t2_guided=dict(argstr='-A2 %s', xor=('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-t', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py index aae4a436dd..d4c8eed2f9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -9,7 +9,8 @@ def test_BinaryMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -45,7 +46,8 @@ def test_BinaryMaths_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BinaryMaths.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py index 6d2952c073..2142994a08 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -9,7 +9,8 @@ def test_ChangeDataType_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_ChangeDataType_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ChangeDataType.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py new file mode 100644 index 0000000000..d9e8180f87 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Classifier + + +def test_Classifier_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + artifacts_list_file=dict(), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + mel_ica=dict(argstr='%s', + copyfile=False, + position=1, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + thresh=dict(argstr='%d', + mandatory=True, + position=-1, + ), + trained_wts_file=dict(argstr='%s', + copyfile=False, + mandatory=True, + position=2, + ), + ) + inputs = Classifier.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Classifier_outputs(): + output_map = dict(artifacts_list_file=dict(), + ) + outputs = Classifier.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py new file mode 100644 index 0000000000..08f40263b5 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Cleaner + + +def test_Cleaner_inputs(): + input_map = dict(aggressive=dict(argstr='-A', + position=3, + ), + args=dict(argstr='%s', + ), + artifacts_list_file=dict(argstr='%s', + mandatory=True, + position=1, + ), + cleanup_motion=dict(argstr='-m', + position=2, + ), + confound_file=dict(argstr='-x %s', + position=4, + ), + confound_file_1=dict(argstr='-x %s', + position=5, + ), + confound_file_2=dict(argstr='-x %s', + position=6, + ), + environ=dict(nohash=True, + usedefault=True, + ), + highpass=dict(argstr='-m -h %f', + position=2, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = Cleaner.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Cleaner_outputs(): + output_map = dict(cleaned_functional_file=dict(), + ) + outputs = Cleaner.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py index 886ef8885b..8085989aad 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Cluster.py +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -21,7 +21,8 @@ def test_Cluster_inputs(): fractional=dict(argstr='--fractional', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -67,7 +68,8 @@ def test_Cluster_inputs(): ), std_space_file=dict(argstr='--stdvol=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--thresh=%.10f', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py index c0544c799d..0a2133bb6f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Complex.py +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -39,7 +39,8 @@ def test_Complex_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), imaginary_in_file=dict(argstr='%s', @@ -86,7 +87,8 @@ def test_Complex_inputs(): start_vol=dict(argstr='%d', position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Complex.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py index 5fa6e7828c..de4296e751 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -20,7 +20,8 @@ def test_ContrastMgr_inputs(): ), fcon_file=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_type=dict(), @@ -39,7 +40,8 @@ def test_ContrastMgr_inputs(): mandatory=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ContrastMgr.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py index 97c0a06315..4ea9b536f9 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -14,7 +14,8 @@ def test_ConvertWarp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), jacobian_max=dict(argstr='--jmax=%f', @@ -52,7 +53,8 @@ def test_ConvertWarp_inputs(): ), shift_in_file=dict(argstr='--shiftmap=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp1=dict(argstr='--warp1=%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py index 5146d1f718..1dad2d7d63 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -19,7 +19,8 @@ def test_ConvertXFM_inputs(): requires=['in_file2'], xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_ConvertXFM_inputs(): position=1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ConvertXFM.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py index 70922a9da9..8374c8c93d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -20,7 +20,8 @@ def test_CopyGeom_inputs(): ignore_dims=dict(argstr='-d', position='-1', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_CopyGeom_inputs(): position=0, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = CopyGeom.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py index 4badbfb2dc..02e031519e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -29,7 +29,8 @@ def test_DTIFit_inputs(): ), gradnonlin=dict(argstr='--gradnonlin=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), little_bit=dict(argstr='--littlebit', @@ -55,7 +56,8 @@ def test_DTIFit_inputs(): ), sse=dict(argstr='--sse', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DTIFit.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py index 40da7affbe..2b8be886a6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -9,7 +9,8 @@ def test_DilateImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -46,7 +47,8 @@ def test_DilateImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DilateImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py index 87cde59644..1b063503dc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -13,7 +13,8 @@ def test_DistanceMap_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -27,7 +28,8 @@ def test_DistanceMap_inputs(): mask_file=dict(argstr='--mask=%s', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DistanceMap.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py new file mode 100644 index 0000000000..894a3a3ad4 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -0,0 +1,66 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import DualRegression + + +def test_DualRegression_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + con_file=dict(argstr='%s', + position=4, + ), + des_norm=dict(argstr='%i', + position=2, + usedefault=True, + ), + design_file=dict(argstr='%s', + position=3, + ), + environ=dict(nohash=True, + usedefault=True, + ), + group_IC_maps_4D=dict(argstr='%s', + mandatory=True, + position=1, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='%s', + mandatory=True, + position=-1, + sep=' ', + ), + n_perm=dict(argstr='%i', + mandatory=True, + position=5, + ), + one_sample_group_mean=dict(argstr='-1', + position=3, + ), + out_dir=dict(argstr='%s', + genfile=True, + position=6, + usedefault=True, + ), + output_type=dict(), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = DualRegression.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_DualRegression_outputs(): + output_map = dict(out_dir=dict(), + ) + outputs = DualRegression.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py index 5f49d5a89e..969ecfa86d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -27,7 +27,8 @@ def test_EPIDeWarp_inputs(): exfdw=dict(argstr='--exfdw %s', genfile=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mag_file=dict(argstr='--mag %s', @@ -44,7 +45,8 @@ def test_EPIDeWarp_inputs(): tediff=dict(argstr='--tediff %s', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tmpdir=dict(argstr='--tmpdir %s', genfile=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py index c5f521045f..df4155472d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Eddy.py +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -25,7 +25,8 @@ def test_Eddy_inputs(): ), fwhm=dict(argstr='--fwhm=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_acqp=dict(argstr='--acqp=%s', @@ -74,7 +75,8 @@ def test_Eddy_inputs(): ), slm=dict(argstr='--slm=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_cuda=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py index c7606a2cea..57bf91ac79 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -9,7 +9,8 @@ def test_EddyCorrect_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_EddyCorrect_inputs(): position=2, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EddyCorrect.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index c34014dd57..65e8134a30 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -21,7 +21,8 @@ def test_EpiReg_inputs(): ), fmapmagbrain=dict(argstr='--fmapmagbrain=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), no_clean=dict(argstr='--noclean', @@ -44,7 +45,8 @@ def test_EpiReg_inputs(): mandatory=True, position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), weight_image=dict(argstr='--weight=%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py index a64c6f5d9e..015b97e59e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -9,7 +9,8 @@ def test_ErodeImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -46,7 +47,8 @@ def test_ErodeImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ErodeImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py index 77be2edb95..8abad8a656 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -13,7 +13,8 @@ def test_ExtractROI_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_ExtractROI_inputs(): t_size=dict(argstr='%d', position=9, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), x_min=dict(argstr='%d', position=2, diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py index 11e6cec5de..a5e54b0882 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FAST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -16,7 +16,8 @@ def test_FAST_inputs(): ), hyper=dict(argstr='-H %.2f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), img_type=dict(argstr='-t %d', @@ -57,7 +58,8 @@ def test_FAST_inputs(): ), segments=dict(argstr='-g', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_priors=dict(argstr='-P', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py index f2c5e46e7e..bb024b4c29 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEAT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -13,11 +13,13 @@ def test_FEAT_inputs(): mandatory=True, position=0, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FEAT.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py index e0956ee674..9aa25a02d8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -19,11 +19,13 @@ def test_FEATModel_inputs(): mandatory=True, position=0, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FEATModel.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py index a0f5e09177..9a46bd77c7 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -6,7 +6,8 @@ def test_FEATRegister_inputs(): input_map = dict(feat_dirs=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), reg_dof=dict(usedefault=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py index 7b98ac128c..61f369a0f1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FIRST.py +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -15,7 +15,8 @@ def test_FIRST_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -45,7 +46,8 @@ def test_FIRST_inputs(): usedefault=True, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-v', position=1, diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py index ed8093853d..6caf4d6d9a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -26,7 +26,8 @@ def test_FLAMEO_inputs(): ), fix_mean=dict(argstr='--fixmean', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), infer_outliers=dict(argstr='--inferoutliers', @@ -54,7 +55,8 @@ def test_FLAMEO_inputs(): t_con_file=dict(argstr='--tcontrastsfile=%s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), var_cope_file=dict(argstr='--varcopefile=%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py index fabfa4054c..bd13a2fd36 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -53,7 +53,8 @@ def test_FLIRT_inputs(): ), force_scaling=dict(argstr='-forcescaling', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -127,7 +128,8 @@ def test_FLIRT_inputs(): ), sinc_window=dict(argstr='-sincwindow %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uses_qform=dict(argstr='-usesqform', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 8e4cf47fc3..19536f0913 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -38,7 +38,8 @@ def test_FNIRT_inputs(): ), hessian_precision=dict(argstr='--numprec=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -114,7 +115,8 @@ def test_FNIRT_inputs(): subsampling_scheme=dict(argstr='--subsamp=%s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_resolution=dict(argstr='--warpres=%d,%d,%d', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py index 2ade472bfa..a88daabceb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -9,11 +9,13 @@ def test_FSLCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FSLCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py index 280cf3a588..adabb09143 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -39,7 +39,8 @@ def test_FSLXCommand_inputs(): ), fudge=dict(argstr='--fudge=%d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), logdir=dict(argstr='--logdir=%s', @@ -72,7 +73,8 @@ def test_FSLXCommand_inputs(): ), seed=dict(argstr='--seed=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), update_proposal_every=dict(argstr='--updateproposalevery=%d', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py index 93b41ea6de..f88ffd7b6f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -33,7 +33,8 @@ def test_FUGUE_inputs(): icorr_only=dict(argstr='--icorronly', requires=['unwarped_file'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -75,7 +76,8 @@ def test_FUGUE_inputs(): ), smooth3d=dict(argstr='--smooth3=%.2f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unwarp_direction=dict(argstr='--unwarpdir=%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py new file mode 100644 index 0000000000..69565fa7c8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import FeatureExtractor + + +def test_FeatureExtractor_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + mel_ica=dict(argstr='%s', + copyfile=False, + position=-1, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = FeatureExtractor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_FeatureExtractor_outputs(): + output_map = dict(mel_ica=dict(argstr='%s', + copyfile=False, + position=-1, + ), + ) + outputs = FeatureExtractor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py index ac62586ec5..576b7ea3ee 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -23,7 +23,8 @@ def test_FilterRegressor_inputs(): position=4, xor=['filter_all'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -40,7 +41,8 @@ def test_FilterRegressor_inputs(): out_vnscales=dict(argstr='--out_vnscales', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), var_norm=dict(argstr='--vn', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py index ef7e14fcbf..cb5b925f15 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -9,7 +9,8 @@ def test_FindTheBiggest_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_FindTheBiggest_inputs(): position=2, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FindTheBiggest.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py index d9ab6bc168..846e3f4854 100644 --- a/nipype/interfaces/fsl/tests/test_auto_GLM.py +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -23,7 +23,8 @@ def test_GLM_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -61,7 +62,8 @@ def test_GLM_inputs(): out_z_name=dict(argstr='--out_z=%s', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), var_norm=dict(argstr='--vn', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py index 0e28417b05..3a04429b2c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -1,6 +1,6 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..ICA_AROMA import ICA_AROMA +from ..aroma import ICA_AROMA def test_ICA_AROMA_inputs(): @@ -24,7 +24,8 @@ def test_ICA_AROMA_inputs(): fnirt_warp_file=dict(argstr='-warp %s', xor=['feat_dir'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -46,7 +47,8 @@ def test_ICA_AROMA_inputs(): out_dir=dict(argstr='-o %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ICA_AROMA.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py index 8c1aef8b5c..bbff7e8b42 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -9,7 +9,8 @@ def test_ImageMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_ImageMaths_inputs(): ), output_type=dict(), suffix=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ImageMaths.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py index 73deecb7e5..2991a45c9b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -11,7 +11,8 @@ def test_ImageMeants_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -34,7 +35,8 @@ def test_ImageMeants_inputs(): ), spatial_coord=dict(argstr='-c %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transpose=dict(argstr='--transpose', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py index 1a4739a320..0a97eb2e21 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -9,7 +9,8 @@ def test_ImageStats_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_ImageStats_inputs(): split_4d=dict(argstr='-t', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ImageStats.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py index 02624a6d2c..0d96f46d4d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -12,7 +12,8 @@ def test_InvWarp_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inverse_warp=dict(argstr='--out=%s', @@ -37,7 +38,8 @@ def test_InvWarp_inputs(): relative=dict(argstr='--rel', xor=['absolute'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp=dict(argstr='--warp=%s', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py index 0d33d852a2..bc02253720 100644 --- a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -14,7 +14,8 @@ def test_IsotropicSmooth_inputs(): position=4, xor=['sigma'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -41,7 +42,8 @@ def test_IsotropicSmooth_inputs(): position=4, xor=['fwhm'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = IsotropicSmooth.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py index 81f74cc923..ef86a37e0e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_L2Model.py +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -4,7 +4,8 @@ def test_L2Model_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), num_copes=dict(mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index f5fcfe4093..c15f8e055a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -7,7 +7,8 @@ def test_Level1Design_inputs(): input_map = dict(bases=dict(mandatory=True, ), contrasts=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interscan_interval=dict(mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py index 300f829bfa..07ecdc9094 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -15,7 +15,8 @@ def test_MCFLIRT_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -53,7 +54,8 @@ def test_MCFLIRT_inputs(): ), stats_imgs=dict(argstr='-stats', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_contour=dict(argstr='-edge', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py index 1c14c441d9..5e009e701d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -27,7 +27,8 @@ def test_MELODIC_inputs(): ), epsilonS=dict(argstr='--epsS=%f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='-i %s', @@ -107,7 +108,8 @@ def test_MELODIC_inputs(): ), t_des=dict(argstr='--Tdes=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tr_sec=dict(argstr='--tr=%f', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py index ed921d092a..447be7025e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -9,7 +9,8 @@ def test_MakeDyadicVectors_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_MakeDyadicVectors_inputs(): mandatory=True, position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), theta_vol=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py index 1962cd5ad9..224bc3ee75 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -9,7 +9,8 @@ def test_MathsCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_MathsCommand_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MathsCommand.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py index 22ba2f24ad..536a44bccf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -13,7 +13,8 @@ def test_MaxImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_MaxImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MaxImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py index 12444f2e3b..09aec304d8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -13,7 +13,8 @@ def test_MaxnImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_MaxnImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MaxnImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py index 86b23eb8b9..c444c296ad 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -13,7 +13,8 @@ def test_MeanImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_MeanImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MeanImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py index c4be8d6687..b398d50975 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -13,7 +13,8 @@ def test_MedianImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_MedianImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MedianImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py index 32c966edaf..0638326c65 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Merge.py +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -13,7 +13,8 @@ def test_Merge_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_Merge_inputs(): position=1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tr=dict(argstr='%.2f', position=-1, diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py index 973bc9a369..f2216fb083 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MinImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -13,7 +13,8 @@ def test_MinImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_MinImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MinImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py index a4268fd930..7921e1031c 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -11,7 +11,8 @@ def test_MotionOutliers_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -42,7 +43,8 @@ def test_MotionOutliers_inputs(): name_template='%s_metrics.txt', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--thresh=%g', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py index 964605e726..4a67036b55 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -9,7 +9,8 @@ def test_MultiImageMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -37,7 +38,8 @@ def test_MultiImageMaths_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MultiImageMaths.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py index 69ef20f16c..c0bd71f12a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -7,7 +7,8 @@ def test_MultipleRegressDesign_inputs(): input_map = dict(contrasts=dict(mandatory=True, ), groups=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), regressors=dict(mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py index 84885f6c10..91b09fdd7a 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Overlay.py +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -28,7 +28,8 @@ def test_Overlay_inputs(): position=5, xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), out_file=dict(argstr='%s', @@ -60,7 +61,8 @@ def test_Overlay_inputs(): stat_thresh2=dict(argstr='%.2f %.2f', position=10, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transparency=dict(argstr='%s', position=1, diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py index 38d8f7bdf3..98b8d69889 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -15,7 +15,8 @@ def test_PRELUDE_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), label_file=dict(argstr='--labels=%s', @@ -52,7 +53,8 @@ def test_PRELUDE_inputs(): ), start=dict(argstr='--start=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--thresh=%.10f', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py index 49f1ed3538..36fd550b23 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -13,7 +13,8 @@ def test_PercentileImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_PercentileImage_inputs(): position=5, usedefault=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PercentileImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py index 9dc7a30fd0..e910f7e173 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -9,7 +9,8 @@ def test_PlotMotionParams_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_PlotMotionParams_inputs(): plot_type=dict(argstr='%s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PlotMotionParams.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py index 03467e1dcf..feaa9d8449 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -9,7 +9,8 @@ def test_PlotTimeSeries_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -38,7 +39,8 @@ def test_PlotTimeSeries_inputs(): ), sci_notation=dict(argstr='--sci', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), title=dict(argstr='%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py index 114feac427..409c8bfee6 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -9,7 +9,8 @@ def test_PowerSpectrum_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_PowerSpectrum_inputs(): position=1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PowerSpectrum.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py index 8400c376e6..b9bf8c1c94 100644 --- a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -14,7 +14,8 @@ def test_PrepareFieldmap_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_magnitude=dict(argstr='%s', @@ -37,7 +38,8 @@ def test_PrepareFieldmap_inputs(): position=1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PrepareFieldmap.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py index d88ab0b0b9..12352f4a38 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -24,7 +24,8 @@ def test_ProbTrackX_inputs(): ), fsamples=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inv_xfm=dict(argstr='--invxfm=%s', @@ -83,7 +84,8 @@ def test_ProbTrackX_inputs(): ), target_masks=dict(argstr='--targetmasks=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thsamples=dict(mandatory=True, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py index 770eafe3cb..249bc87777 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -32,7 +32,8 @@ def test_ProbTrackX2_inputs(): ), fsamples=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inv_xfm=dict(argstr='--invxfm=%s', @@ -109,7 +110,8 @@ def test_ProbTrackX2_inputs(): ), target_masks=dict(argstr='--targetmasks=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thsamples=dict(mandatory=True, ), diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py index 318e67c9d9..37648d5a2b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -9,7 +9,8 @@ def test_ProjThresh_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -17,7 +18,8 @@ def test_ProjThresh_inputs(): position=0, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='%d', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py index 53e999893c..432455e4a8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Randomise.py +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -10,9 +10,9 @@ def test_Randomise_inputs(): position=1, usedefault=True, ), - c_thresh=dict(argstr='-c %.2f', + c_thresh=dict(argstr='-c %.1f', ), - cm_thresh=dict(argstr='-C %.2f', + cm_thresh=dict(argstr='-C %.1f', ), demean=dict(argstr='-D', ), @@ -30,7 +30,8 @@ def test_Randomise_inputs(): ), fcon=dict(argstr='-f %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -57,7 +58,8 @@ def test_Randomise_inputs(): tcon=dict(argstr='-t %s', position=3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tfce=dict(argstr='-T', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py index fd37a51ecb..0062b7d489 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -9,7 +9,8 @@ def test_Reorient2Std_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -20,7 +21,8 @@ def test_Reorient2Std_inputs(): hash_files=False, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Reorient2Std.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py index 26d3c45c6f..6b547109e1 100644 --- a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -11,7 +11,8 @@ def test_RobustFOV_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -29,7 +30,8 @@ def test_RobustFOV_inputs(): name_template='%s_to_ROI', ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = RobustFOV.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py index 301a5fdd47..f6ed7d4fd2 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SMM.py +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -9,7 +9,8 @@ def test_SMM_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask=dict(argstr='--mask="%s"', @@ -26,7 +27,8 @@ def test_SMM_inputs(): mandatory=True, position=0, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SMM.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py index bdaba2cad6..e3da09dadf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -21,7 +21,8 @@ def test_SUSAN_inputs(): mandatory=True, position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -34,7 +35,8 @@ def test_SUSAN_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), usans=dict(argstr='', position=6, diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py index c2b645b540..d7caee328e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -11,7 +11,8 @@ def test_SigLoss_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -25,7 +26,8 @@ def test_SigLoss_inputs(): output_type=dict(), slice_direction=dict(argstr='-d %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SigLoss.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py index 681e9157b2..99d0b7215b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -15,7 +15,8 @@ def test_SliceTimer_inputs(): ), global_shift=dict(argstr='--tglobal', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -33,7 +34,8 @@ def test_SliceTimer_inputs(): output_type=dict(), slice_direction=dict(argstr='--direction=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), time_repetition=dict(argstr='--repeat=%f', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py index d00aeafbaf..6108b5f702 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Slicer.py +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -20,7 +20,8 @@ def test_Slicer_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_edges=dict(argstr='%s', @@ -73,7 +74,8 @@ def test_Slicer_inputs(): slice_number=dict(argstr='-%d', position=11, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold_edges=dict(argstr='-e %.3f', position=6, diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py index af09615294..d653e4d7cb 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Smooth.py +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -14,7 +14,8 @@ def test_Smooth_inputs(): position=1, xor=['sigma'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -33,7 +34,8 @@ def test_Smooth_inputs(): name_template='%s_smooth', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Smooth.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py index 066af89a60..c98830c384 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -13,7 +13,8 @@ def test_SmoothEstimate_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask_file=dict(argstr='--mask=%s', @@ -23,7 +24,8 @@ def test_SmoothEstimate_inputs(): residual_fit_file=dict(argstr='--res=%s', requires=['dof'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), zstat_file=dict(argstr='--zstat=%s', xor=['dof'], diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py index 949254bdcc..be3772926f 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -9,7 +9,8 @@ def test_SpatialFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -46,7 +47,8 @@ def test_SpatialFilter_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SpatialFilter.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py index 7eb80a9f12..efe176be46 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Split.py +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -13,7 +13,8 @@ def test_Split_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_Split_inputs(): position=1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Split.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py index 5fd80d2dc0..8675590d07 100644 --- a/nipype/interfaces/fsl/tests/test_auto_StdImage.py +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -13,7 +13,8 @@ def test_StdImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_StdImage_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = StdImage.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py index 1fe20d3351..c225c37ab0 100644 --- a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -9,7 +9,8 @@ def test_SwapDimensions_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_SwapDimensions_inputs(): hash_files=False, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SwapDimensions.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py index 28083c6dc0..fd04dadcbc 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -25,7 +25,8 @@ def test_TOPUP_inputs(): ), fwhm=dict(argstr='--fwhm=%f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--imain=%s', @@ -64,6 +65,10 @@ def test_TOPUP_inputs(): name_source=['in_file'], name_template='%s_topup.log', ), + out_mat_prefix=dict(argstr='--rbmout=%s', + hash_files=False, + usedefault=True, + ), out_warp_prefix=dict(argstr='--dfout=%s', hash_files=False, usedefault=True, @@ -87,7 +92,8 @@ def test_TOPUP_inputs(): ), subsamp=dict(argstr='--subsamp=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_res=dict(argstr='--warpres=%f', ), @@ -106,6 +112,7 @@ def test_TOPUP_outputs(): out_fieldcoef=dict(), out_jacs=dict(), out_logfile=dict(), + out_mats=dict(), out_movpar=dict(), out_warps=dict(), ) diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py index f5a4f5835a..4f5bb4d84e 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -13,7 +13,8 @@ def test_TemporalFilter_inputs(): position=4, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_TemporalFilter_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TemporalFilter.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py index bd56d6270b..923deff51d 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Threshold.py +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -11,7 +11,8 @@ def test_Threshold_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -33,7 +34,8 @@ def test_Threshold_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresh=dict(argstr='%s', mandatory=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py index 360a5b9b57..0c5634d731 100644 --- a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -15,7 +15,8 @@ def test_TractSkeleton_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -30,7 +31,8 @@ def test_TractSkeleton_inputs(): ), skeleton_file=dict(argstr='-o %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(), use_cingulum_mask=dict(usedefault=True, diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py new file mode 100644 index 0000000000..5d4d965951 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Training + + +def test_Training_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + loo=dict(argstr='-l', + position=2, + ), + mel_icas=dict(argstr='%s', + copyfile=False, + position=-1, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + trained_wts_filestem=dict(argstr='%s', + position=1, + ), + ) + inputs = Training.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_Training_outputs(): + output_map = dict(trained_wts_file=dict(), + ) + outputs = Training.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py new file mode 100644 index 0000000000..370e89fdf4 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import TrainingSetCreator + + +def test_TrainingSetCreator_inputs(): + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + mel_icas_in=dict(argstr='%s', + copyfile=False, + position=-1, + ), + ) + inputs = TrainingSetCreator.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_TrainingSetCreator_outputs(): + output_map = dict(mel_icas_out=dict(argstr='%s', + copyfile=False, + position=-1, + ), + ) + outputs = TrainingSetCreator.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py index 9ac8a42d7f..eb43d00aaf 100644 --- a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -9,7 +9,8 @@ def test_UnaryMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_UnaryMaths_inputs(): position=-1, ), output_type=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = UnaryMaths.input_spec() diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py index 9ea57c1677..2308c13b04 100644 --- a/nipype/interfaces/fsl/tests/test_auto_VecReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -11,7 +11,8 @@ def test_VecReg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -35,7 +36,8 @@ def test_VecReg_inputs(): ), rotation_warp=dict(argstr='--rotwarp=%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_field=dict(argstr='-w %s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py index 3c5e999c51..cc8f9f9ce8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -18,7 +18,8 @@ def test_WarpPoints_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_coords=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_WarpPoints_inputs(): src_file=dict(argstr='-src %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_file=dict(argstr='-warp %s', xor=['xfm_file'], diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py new file mode 100644 index 0000000000..0824beae24 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import WarpPointsFromStd + + +def test_WarpPointsFromStd_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + coord_mm=dict(argstr='-mm', + xor=['coord_vox'], + ), + coord_vox=dict(argstr='-vox', + xor=['coord_mm'], + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + img_file=dict(argstr='-img %s', + mandatory=True, + ), + in_coords=dict(argstr='%s', + mandatory=True, + position=-2, + ), + std_file=dict(argstr='-std %s', + mandatory=True, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + warp_file=dict(argstr='-warp %s', + xor=['xfm_file'], + ), + xfm_file=dict(argstr='-xfm %s', + xor=['warp_file'], + ), + ) + inputs = WarpPointsFromStd.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_WarpPointsFromStd_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = WarpPointsFromStd.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py index aa9d63ceca..ac7f73031b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -15,7 +15,8 @@ def test_WarpPointsToStd_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), img_file=dict(argstr='-img %s', @@ -34,7 +35,8 @@ def test_WarpPointsToStd_inputs(): std_file=dict(argstr='-std %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_file=dict(argstr='-warp %s', xor=['xfm_file'], diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py index a32d067588..2361a8f7e4 100644 --- a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -9,7 +9,8 @@ def test_WarpUtils_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='--in=%s', @@ -30,7 +31,8 @@ def test_WarpUtils_inputs(): reference=dict(argstr='--ref=%s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_resolution=dict(argstr='--warpres=%0.4f,%0.4f,%0.4f', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py index 6a3022ed2d..f84b0b9ae8 100644 --- a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -41,7 +41,8 @@ def test_XFibres5_inputs(): ), gradnonlin=dict(argstr='--gradnonlin=%s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), logdir=dict(argstr='--logdir=%s', @@ -74,7 +75,8 @@ def test_XFibres5_inputs(): ), seed=dict(argstr='--seed=%d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), update_proposal_every=dict(argstr='--updateproposalevery=%d', ), diff --git a/nipype/interfaces/fsl/tests/test_model.py b/nipype/interfaces/fsl/tests/test_model.py index 667e9033c9..b2e3f8571c 100644 --- a/nipype/interfaces/fsl/tests/test_model.py +++ b/nipype/interfaces/fsl/tests/test_model.py @@ -13,7 +13,7 @@ @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") def test_MultipleRegressDesign(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() foo = fsl.MultipleRegressDesign() foo.inputs.regressors = dict(voice_stenght=[1, 1, 1], age=[0.2, 0.4, 0.5], BMI=[1, -1, 2]) con1 = ['voice_and_age', 'T', ['age', 'voice_stenght'], [0.5, 0.5]] @@ -22,7 +22,7 @@ def test_MultipleRegressDesign(tmpdir): res = foo.run() for ii in ["mat", "con", "fts", "grp"]: - assert getattr(res.outputs, "design_"+ii) == os.path.join(os.getcwd(), 'design.'+ii) + assert getattr(res.outputs, "design_"+ii) == tmpdir.join('design.'+ii).strpath design_mat_expected_content = """/NumWaves 3 /NumPoints 3 @@ -62,6 +62,6 @@ def test_MultipleRegressDesign(tmpdir): 1 """ for ii in ["mat", "con", "fts", "grp"]: - assert open(os.path.join(os.getcwd(), 'design.'+ii), 'r').read() == eval("design_"+ii+"_expected_content") + assert tmpdir.join('design.'+ii).read() == eval("design_"+ii+"_expected_content") diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index a4c8f2640f..3d75d514a6 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -6,10 +6,9 @@ from builtins import open, open import os -import tempfile from copy import deepcopy -import pytest +import pytest, pdb from nipype.utils.filemanip import split_filename, filename_to_list from .. import preprocess as fsl from nipype.interfaces.fsl import Info @@ -27,11 +26,9 @@ def fsl_name(obj, fname): @pytest.fixture() def setup_infile(tmpdir): ext = Info.output_type_to_ext(Info.output_type()) - tmp_dir = str(tmpdir) - tmp_infile = os.path.join(tmp_dir, 'foo' + ext) - open(tmp_infile, 'w') - - return (tmp_infile, tmp_dir) + tmp_infile = tmpdir.join('foo' + ext) + tmp_infile.open("w") + return (tmp_infile.strpath, tmpdir.strpath) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -153,7 +150,7 @@ def test_fast(setup_infile): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -def test_fast_list_outputs(setup_infile): +def test_fast_list_outputs(setup_infile, tmpdir): ''' By default (no -o), FSL's fast command outputs files into the same directory as the input files. If the flag -o is set, it outputs files into the cwd ''' @@ -166,9 +163,9 @@ def _run_and_test(opts, output_base): # set up tmp_infile, indir = setup_infile - cwd = tempfile.mkdtemp() - os.chdir(cwd) - assert indir != cwd + cwd = tmpdir.mkdir("new") + cwd.chdir() + assert indir != cwd.strpath out_basename = 'a_basename' # run and test @@ -177,17 +174,17 @@ def _run_and_test(opts, output_base): _run_and_test(opts, os.path.join(input_path, input_filename)) opts['out_basename'] = out_basename - _run_and_test(opts, os.path.join(cwd, out_basename)) + _run_and_test(opts, os.path.join(cwd.strpath, out_basename)) @pytest.fixture() def setup_flirt(tmpdir): ext = Info.output_type_to_ext(Info.output_type()) - tmp_dir = str(tmpdir) - _, infile = tempfile.mkstemp(suffix=ext, dir=tmp_dir) - _, reffile = tempfile.mkstemp(suffix=ext, dir=tmp_dir) - - return (tmp_dir, infile, reffile) + infile = tmpdir.join("infile"+ext) + infile.open("w") + reffile = tmpdir.join("reffile"+ext) + reffile.open("w") + return (tmpdir, infile.strpath, reffile.strpath) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @@ -205,6 +202,7 @@ def test_flirt(setup_flirt): out_file='outfile', out_matrix_file='outmat.mat', bins=256, cost='mutualinfo') + flirt_est = fsl.FLIRT(in_file=infile, reference=reffile, out_matrix_file='outmat.mat', bins=256, @@ -249,8 +247,8 @@ def test_flirt(setup_flirt): axfm2.inputs.in_matrix_file = reffile assert axfm2.cmdline == (realcmd + ' -applyxfm -init %s' % reffile) - - _, tmpfile = tempfile.mkstemp(suffix='.nii', dir=tmpdir) + tmpfile = tmpdir.join("file4test.nii") + tmpfile.open("w") # Loop over all inputs, set a reasonable value and make sure the # cmdline is updated correctly. for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()): @@ -258,7 +256,8 @@ def test_flirt(setup_flirt): if key in ('trait_added', 'trait_modified', 'in_file', 'reference', 'environ', 'output_type', 'out_file', 'out_matrix_file', 'in_matrix_file', 'apply_xfm', 'ignore_exception', - 'terminal_output', 'out_log', 'save_log'): + 'resource_monitor', 'terminal_output', 'out_log', + 'save_log'): continue param = None value = None @@ -266,7 +265,7 @@ def test_flirt(setup_flirt): param = '-v' value = '-v' elif isinstance(trait_spec.trait_type, File): - value = tmpfile + value = tmpfile.strpath param = trait_spec.argstr % value elif trait_spec.default is False: param = trait_spec.argstr @@ -382,7 +381,7 @@ def test_mcflirt_noinput(): def test_fnirt(setup_flirt): tmpdir, infile, reffile = setup_flirt - os.chdir(tmpdir) + tmpdir.chdir() fnirt = fsl.FNIRT() assert fnirt.cmd == 'fnirt' @@ -393,7 +392,8 @@ def test_fnirt(setup_flirt): ('in_fwhm', '--infwhm', [4, 2, 2, 0], '4,2,2,0'), ('apply_refmask', '--applyrefmask', [0, 0, 1, 1], '0,0,1,1'), ('apply_inmask', '--applyinmask', [0, 0, 0, 1], '0,0,0,1'), - ('regularization_lambda', '--lambda', [0.5, 0.75], '0.5,0.75')] + ('regularization_lambda', '--lambda', [0.5, 0.75], '0.5,0.75'), + ('intensity_mapping_model', '--intmod', 'global_non_linear', 'global_non_linear')] for item, flag, val, strval in params: fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, @@ -406,7 +406,7 @@ def test_fnirt(setup_flirt): ' %s=%s --ref=%s'\ ' --iout=%s' % (infile, log, flag, strval, reffile, iout) - elif item in ('in_fwhm'): + elif item in ('in_fwhm', 'intensity_mapping_model'): cmd = 'fnirt --in=%s %s=%s --logout=%s '\ '--ref=%s --iout=%s' % (infile, flag, strval, log, reffile, iout) @@ -547,11 +547,10 @@ def setup_fugue(tmpdir): import os.path as op d = np.ones((80, 80, 80)) - tmp_dir = str(tmpdir) - infile = op.join(tmp_dir, 'dumbfile.nii.gz') + infile = tmpdir.join('dumbfile.nii.gz').strpath nb.Nifti1Image(d, None, None).to_filename(infile) - return (tmp_dir, infile) + return (tmpdir, infile) @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") diff --git a/nipype/interfaces/fsl/tests/test_utils.py b/nipype/interfaces/fsl/tests/test_utils.py index 9196d6d8d9..66b91cf96f 100644 --- a/nipype/interfaces/fsl/tests/test_utils.py +++ b/nipype/interfaces/fsl/tests/test_utils.py @@ -3,8 +3,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import os -from tempfile import mkdtemp -from shutil import rmtree import numpy as np diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py index 7bb95f49eb..b28a4df425 100644 --- a/nipype/interfaces/fsl/utils.py +++ b/nipype/interfaces/fsl/utils.py @@ -191,7 +191,7 @@ class Smooth(FSLCommand): >>> sm.inputs.output_type = 'NIFTI_GZ' >>> sm.inputs.in_file = 'functional2.nii' >>> sm.inputs.sigma = 8.0 - >>> sm.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> sm.cmdline # doctest: +ELLIPSIS 'fslmaths functional2.nii -kernel gauss 8.000 -fmean functional2_smooth.nii.gz' Setting the kernel width using fwhm: @@ -200,7 +200,7 @@ class Smooth(FSLCommand): >>> sm.inputs.output_type = 'NIFTI_GZ' >>> sm.inputs.in_file = 'functional2.nii' >>> sm.inputs.fwhm = 8.0 - >>> sm.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> sm.cmdline # doctest: +ELLIPSIS 'fslmaths functional2.nii -kernel gauss 3.397 -fmean functional2_smooth.nii.gz' One of sigma or fwhm must be set: @@ -263,10 +263,10 @@ class Merge(FSLCommand): >>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii'] >>> merger.inputs.dimension = 't' >>> merger.inputs.output_type = 'NIFTI_GZ' - >>> merger.cmdline # doctest: +ALLOW_UNICODE + >>> merger.cmdline 'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii' >>> merger.inputs.tr = 2.25 - >>> merger.cmdline # doctest: +ALLOW_UNICODE + >>> merger.cmdline 'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25' @@ -1187,7 +1187,7 @@ class ConvertXFM(FSLCommand): >>> invt.inputs.in_file = "flirt.mat" >>> invt.inputs.invert_xfm = True >>> invt.inputs.out_file = 'flirt_inv.mat' - >>> invt.cmdline # doctest: +ALLOW_UNICODE + >>> invt.cmdline 'convert_xfm -omat flirt_inv.mat -inverse flirt.mat' @@ -1492,7 +1492,7 @@ class InvWarp(FSLCommand): >>> invwarp.inputs.warp = "struct2mni.nii" >>> invwarp.inputs.reference = "anatomical.nii" >>> invwarp.inputs.output_type = "NIFTI_GZ" - >>> invwarp.cmdline # doctest: +ALLOW_UNICODE + >>> invwarp.cmdline 'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii' >>> res = invwarp.run() # doctest: +SKIP @@ -1728,7 +1728,7 @@ class WarpUtils(FSLCommand): >>> warputils.inputs.out_format = 'spline' >>> warputils.inputs.warp_resolution = (10,10,10) >>> warputils.inputs.output_type = "NIFTI_GZ" - >>> warputils.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> warputils.cmdline # doctest: +ELLIPSIS 'fnirtfileutils --in=warpfield.nii --outformat=spline --ref=T1.nii --warpres=10.0000,10.0000,10.0000 --out=warpfield_coeffs.nii.gz' >>> res = invwarp.run() # doctest: +SKIP @@ -1880,7 +1880,7 @@ class ConvertWarp(FSLCommand): >>> warputils.inputs.reference = "T1.nii" >>> warputils.inputs.relwarp = True >>> warputils.inputs.output_type = "NIFTI_GZ" - >>> warputils.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> warputils.cmdline # doctest: +ELLIPSIS 'convertwarp --ref=T1.nii --rel --warp1=warpfield.nii --out=T1_concatwarp.nii.gz' >>> res = warputils.run() # doctest: +SKIP @@ -1940,7 +1940,7 @@ class WarpPoints(CommandLine): >>> warppoints.inputs.dest_file = 'T1.nii' >>> warppoints.inputs.warp_file = 'warpfield.nii' >>> warppoints.inputs.coord_mm = True - >>> warppoints.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> warppoints.cmdline # doctest: +ELLIPSIS 'img2imgcoord -mm -dest T1.nii -src epi.nii -warp warpfield.nii surf.txt' >>> res = warppoints.run() # doctest: +SKIP @@ -2100,7 +2100,7 @@ class WarpPointsToStd(WarpPoints): >>> warppoints.inputs.std_file = 'mni.nii' >>> warppoints.inputs.warp_file = 'warpfield.nii' >>> warppoints.inputs.coord_mm = True - >>> warppoints.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> warppoints.cmdline # doctest: +ELLIPSIS 'img2stdcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' >>> res = warppoints.run() # doctest: +SKIP @@ -2110,6 +2110,59 @@ class WarpPointsToStd(WarpPoints): input_spec = WarpPointsToStdInputSpec output_spec = WarpPointsOutputSpec _cmd = 'img2stdcoord' + _terminal_output = 'file_split' + + +class WarpPointsFromStdInputSpec(CommandLineInputSpec): + img_file = File(exists=True, argstr='-img %s', mandatory=True, + desc='filename of a destination image') + std_file = File(exists=True, argstr='-std %s', mandatory=True, + desc='filename of the image in standard space') + in_coords = File(exists=True, position=-2, argstr='%s', mandatory=True, + desc='filename of file containing coordinates') + xfm_file = File(exists=True, argstr='-xfm %s', xor=['warp_file'], + desc='filename of affine transform (e.g. source2dest.mat)') + warp_file = File(exists=True, argstr='-warp %s', xor=['xfm_file'], + desc='filename of warpfield (e.g. ' + 'intermediate2dest_warp.nii.gz)') + coord_vox = traits.Bool(True, argstr='-vox', xor=['coord_mm'], + desc='all coordinates in voxels - default') + coord_mm = traits.Bool(False, argstr='-mm', xor=['coord_vox'], + desc='all coordinates in mm') + + +class WarpPointsFromStd(CommandLine): + """ + Use FSL `std2imgcoord `_ + to transform point sets to standard space coordinates. Accepts plain text coordinates + files. + + + Examples + -------- + + >>> from nipype.interfaces.fsl import WarpPointsFromStd + >>> warppoints = WarpPointsFromStd() + >>> warppoints.inputs.in_coords = 'surf.txt' + >>> warppoints.inputs.img_file = 'T1.nii' + >>> warppoints.inputs.std_file = 'mni.nii' + >>> warppoints.inputs.warp_file = 'warpfield.nii' + >>> warppoints.inputs.coord_mm = True + >>> warppoints.cmdline # doctest: +ELLIPSIS + 'std2imgcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' + >>> res = warppoints.run() # doctest: +SKIP + + + """ + + input_spec = WarpPointsFromStdInputSpec + output_spec = WarpPointsOutputSpec + _cmd = 'std2imgcoord' + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath('stdout.nipype') + return outputs class MotionOutliersInputSpec(FSLCommandInputSpec): @@ -2164,7 +2217,7 @@ class MotionOutliers(FSLCommand): >>> from nipype.interfaces.fsl import MotionOutliers >>> mo = MotionOutliers() >>> mo.inputs.in_file = "epi.nii" - >>> mo.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> mo.cmdline # doctest: +ELLIPSIS 'fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt' >>> res = mo.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index cfdedd8870..0793b955bd 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -40,6 +40,7 @@ from .base import ( TraitedSpec, traits, Str, File, Directory, BaseInterface, InputMultiPath, isdefined, OutputMultiPath, DynamicTraitedSpec, Undefined, BaseInterfaceInputSpec) +from .bids_utils import BIDSDataGrabber try: import pyxnat @@ -71,7 +72,7 @@ def copytree(src, dst, use_hardlink=False): try: os.makedirs(dst) except OSError as why: - if 'File exists' in why: + if 'File exists' in why.strerror: pass else: raise why @@ -350,17 +351,17 @@ def _substitute(self, pathstr): oldpathstr = pathstr pathstr = pathstr.replace(key, val) if pathstr != oldpathstr: - iflogger.debug('sub.str: %s -> %s using %r -> %r' - % (oldpathstr, pathstr, key, val)) + iflogger.debug('sub.str: %s -> %s using %r -> %r', + oldpathstr, pathstr, key, val) if isdefined(self.inputs.regexp_substitutions): for key, val in self.inputs.regexp_substitutions: oldpathstr = pathstr pathstr, _ = re.subn(key, val, pathstr) if pathstr != oldpathstr: - iflogger.debug('sub.regexp: %s -> %s using %r -> %r' - % (oldpathstr, pathstr, key, val)) + iflogger.debug('sub.regexp: %s -> %s using %r -> %r', + oldpathstr, pathstr, key, val) if pathstr_ != pathstr: - iflogger.info('sub: %s -> %s' % (pathstr_, pathstr)) + iflogger.info('sub: %s -> %s', pathstr_, pathstr) return pathstr # Check for s3 in base directory @@ -513,8 +514,8 @@ def _fetch_bucket(self, bucket_name): # Try and get AWS credentials if a creds_path is specified if aws_access_key_id and aws_secret_access_key: # Init connection - iflogger.info('Connecting to S3 bucket: %s with credentials...'\ - % bucket_name) + iflogger.info('Connecting to S3 bucket: %s with credentials...', + bucket_name) # Use individual session for each instance of DataSink # Better when datasinks are being used in multi-threading, see: # http://boto3.readthedocs.org/en/latest/guide/resources.html#multithreading @@ -524,8 +525,7 @@ def _fetch_bucket(self, bucket_name): # Otherwise, connect anonymously else: - iflogger.info('Connecting to AWS: %s anonymously...'\ - % bucket_name) + iflogger.info('Connecting to AWS: %s anonymously...', bucket_name) session = boto3.session.Session() s3_resource = session.resource('s3', use_ssl=True) s3_resource.meta.client.meta.events.register('choose-signer.s3.*', @@ -610,7 +610,7 @@ def _upload_to_s3(self, bucket, src, dst): src_md5 = hashlib.md5(src_read).hexdigest() # Move to next loop iteration if dst_md5 == src_md5: - iflogger.info('File %s already exists on S3, skipping...' % dst_f) + iflogger.info('File %s already exists on S3, skipping...', dst_f) continue else: iflogger.info('Overwriting previous S3 file...') @@ -619,8 +619,8 @@ def _upload_to_s3(self, bucket, src, dst): iflogger.info('New file to S3') # Copy file up to S3 (either encrypted or not) - iflogger.info('Uploading %s to S3 bucket, %s, as %s...'\ - % (src_f, bucket.name, dst_f)) + iflogger.info('Uploading %s to S3 bucket, %s, as %s...', src_f, + bucket.name, dst_f) if self.inputs.encrypt_bucket_keys: extra_args = {'ServerSideEncryption' : 'AES256'} else: @@ -670,7 +670,7 @@ def _list_outputs(self): outdir = local_out_exception # Log local copying directory iflogger.info('Access to S3 failed! Storing outputs locally at: '\ - '%s\nError: %s' %(outdir, exc)) + '%s\nError: %s', outdir, exc) else: s3dir = '' @@ -687,7 +687,7 @@ def _list_outputs(self): try: os.makedirs(outdir) except OSError as inst: - if 'File exists' in inst: + if 'File exists' in inst.strerror: pass else: raise(inst) @@ -696,7 +696,7 @@ def _list_outputs(self): for key, files in list(self.inputs._outputs.items()): if not isdefined(files): continue - iflogger.debug("key: %s files: %s" % (key, str(files))) + iflogger.debug("key: %s files: %s", key, str(files)) files = filename_to_list(files) tempoutdir = outdir if s3_flag: @@ -738,22 +738,22 @@ def _list_outputs(self): try: os.makedirs(path) except OSError as inst: - if 'File exists' in inst: + if 'File exists' in inst.strerror: pass else: raise(inst) # If src is a file, copy it to dst if os.path.isfile(src): - iflogger.debug('copyfile: %s %s' % (src, dst)) + iflogger.debug('copyfile: %s %s', src, dst) copyfile(src, dst, copy=True, hashmethod='content', use_hardlink=use_hardlink) out_files.append(dst) # If src is a directory, copy entire contents to dst dir elif os.path.isdir(src): if os.path.exists(dst) and self.inputs.remove_dest_dir: - iflogger.debug('removing: %s' % dst) + iflogger.debug('removing: %s', dst) shutil.rmtree(dst) - iflogger.debug('copydir: %s %s' % (src, dst)) + iflogger.debug('copydir: %s %s', src, dst) copytree(src, dst) out_files.append(dst) @@ -869,7 +869,7 @@ def _list_outputs(self): # get list of all files in s3 bucket conn = boto.connect_s3(anon=self.inputs.anon) bkt = conn.get_bucket(self.inputs.bucket) - bkt_files = list(k.key for k in bkt.list()) + bkt_files = list(k.key for k in bkt.list(prefix=self.inputs.bucket_path)) # keys are outfields, args are template args for the outfield for key, args in list(self.inputs.template_args.items()): @@ -948,12 +948,11 @@ def _list_outputs(self): # We must convert to the local location specified # and download the files. for key,val in outputs.items(): - #This will basically be either list-like or string-like: - #if it has the __iter__ attribute, it's list-like (list, - #tuple, numpy array) and we iterate through each of its - #values. If it doesn't, it's string-like (string, - #unicode), and we convert that value directly. - if hasattr(val,'__iter__'): + # This will basically be either list-like or string-like: + # if it's an instance of a list, we'll iterate through it. + # If it isn't, it's string-like (string, unicode), we + # convert that value directly. + if isinstance(val, (list, tuple, set)): for i,path in enumerate(val): outputs[key][i] = self.s3tolocal(path, bkt) else: @@ -1188,15 +1187,18 @@ class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): base_directory = Directory(exists=True, desc="Root path common to templates.") sort_filelist = traits.Bool(True, usedefault=True, - desc="When matching mutliple files, return them in sorted order.") + desc="When matching mutliple files, return them" + " in sorted order.") raise_on_empty = traits.Bool(True, usedefault=True, - desc="Raise an exception if a template pattern matches no files.") + desc="Raise an exception if a template pattern " + "matches no files.") force_lists = traits.Either(traits.Bool(), traits.List(Str()), default=False, usedefault=True, - desc=("Whether to return outputs as a list even when only one file " - "matches the template. Either a boolean that applies to all " - "output fields or a list of output field names to coerce to " - " a list")) + desc=("Whether to return outputs as a list even" + " when only one file matches the template. " + "Either a boolean that applies to all output " + "fields or a list of output field names to " + "coerce to a list")) class SelectFiles(IOBase): @@ -1219,7 +1221,7 @@ class SelectFiles(IOBase): ... "epi": "{subject_id}/func/f[0, 1].nii"} >>> dg = Node(SelectFiles(templates), "selectfiles") >>> dg.inputs.subject_id = "subj1" - >>> pprint.pprint(dg.outputs.get()) # doctest: +NORMALIZE_WHITESPACE +ALLOW_UNICODE + >>> pprint.pprint(dg.outputs.get()) # doctest: {'T1': , 'epi': } The same thing with dynamic grabbing of specific files: @@ -1256,8 +1258,10 @@ def __init__(self, templates, **kwargs): infields = [] for name, template in list(templates.items()): for _, field_name, _, _ in string.Formatter().parse(template): - if field_name is not None and field_name not in infields: - infields.append(field_name) + if field_name is not None: + field_name = re.match("\w+", field_name).group() + if field_name not in infields: + infields.append(field_name) self._infields = infields self._outfields = list(templates) @@ -1294,6 +1298,8 @@ def _list_outputs(self): for field, template in list(self._templates.items()): + find_dirs = template[-1] == os.sep + # Build the full template path if isdefined(self.inputs.base_directory): template = op.abspath(op.join( @@ -1301,6 +1307,10 @@ def _list_outputs(self): else: template = op.abspath(template) + # re-add separator if searching exclusively for directories + if find_dirs: + template += os.sep + # Fill in the template and glob for files filled_template = template.format(**info) filelist = glob.glob(filled_template) @@ -1613,13 +1623,13 @@ def _get_files(self, path, key, dirval, altkey=None): globprefix = self.inputs.hemi + '.' else: globprefix = '?h.' + if key in ('aseg_stats', 'wmparc_stats'): + globprefix = '' elif key == 'ribbon': if self.inputs.hemi != 'both': globprefix = self.inputs.hemi + '.' else: globprefix = '*' - elif key in ('aseg_stats', 'wmparc_stats'): - globprefix = '' keys = filename_to_list(altkey) if altkey else [key] globfmt = os.path.join(path, dirval, ''.join((globprefix, '{}', globsuffix))) @@ -2419,7 +2429,7 @@ def _list_outputs(self): try: sftp.get(os.path.join(filledtemplate_dir, f), f) except IOError: - iflogger.info('remote file %s not found' % f) + iflogger.info('remote file %s not found', f) if any([val is None for val in outputs[key]]): outputs[key] = [] if len(outputs[key]) == 0: @@ -2466,18 +2476,28 @@ class JSONFileGrabber(IOBase): Example ------- + .. testsetup:: + + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() # changing to a temporary directory + + .. doctest:: + >>> import pprint >>> from nipype.interfaces.io import JSONFileGrabber >>> jsonSource = JSONFileGrabber() >>> jsonSource.inputs.defaults = {'param1': 'overrideMe', 'param3': 1.0} >>> res = jsonSource.run() - >>> pprint.pprint(res.outputs.get()) # doctest: +ALLOW_UNICODE + >>> pprint.pprint(res.outputs.get()) {'param1': 'overrideMe', 'param3': 1.0} - >>> jsonSource.inputs.in_file = 'jsongrabber.txt' + >>> jsonSource.inputs.in_file = os.path.join(datadir, 'jsongrabber.txt') >>> res = jsonSource.run() - >>> pprint.pprint(res.outputs.get()) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS +ALLOW_UNICODE + >>> pprint.pprint(res.outputs.get()) # doctest:, +ELLIPSIS {'param1': 'exampleStr', 'param2': 4, 'param3': 1.0} + .. testsetup:: + + >>> os.chdir(old.strpath) """ input_spec = JSONFileGrabberInputSpec diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index d3f6f26993..0d8aa29e16 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -22,6 +22,7 @@ def get_matlab_command(): try: res = CommandLine(command='which', args=matlab_cmd, + resource_monitor=False, terminal_output='allatonce').run() matlab_path = res.runtime.stdout.strip() except Exception as e: @@ -104,7 +105,7 @@ def __init__(self, matlab_cmd=None, **inputs): self.inputs.single_comp_thread = True # For matlab commands force all output to be returned since matlab # does not have a clean way of notifying an error - self.inputs.terminal_output = 'allatonce' + self.terminal_output = 'allatonce' @classmethod def set_default_matlab_cmd(cls, matlab_cmd): @@ -140,7 +141,7 @@ def set_default_paths(cls, paths): cls._default_paths = paths def _run_interface(self, runtime): - self.inputs.terminal_output = 'allatonce' + self.terminal_output = 'allatonce' runtime = super(MatlabCommand, self)._run_interface(runtime) try: # Matlab can leave the terminal in a barbbled state diff --git a/nipype/interfaces/meshfix.py b/nipype/interfaces/meshfix.py index 6ae1859459..466190468a 100644 --- a/nipype/interfaces/meshfix.py +++ b/nipype/interfaces/meshfix.py @@ -105,7 +105,7 @@ class MeshFix(CommandLine): >>> fix.inputs.in_file1 = 'lh-pial.stl' >>> fix.inputs.in_file2 = 'rh-pial.stl' >>> fix.run() # doctest: +SKIP - >>> fix.cmdline # doctest: +ALLOW_UNICODE + >>> fix.cmdline 'meshfix lh-pial.stl rh-pial.stl -o lh-pial_fixed.off' """ _cmd = 'meshfix' diff --git a/nipype/interfaces/minc/base.py b/nipype/interfaces/minc/base.py index 6348e4ee0f..e4b8592adf 100644 --- a/nipype/interfaces/minc/base.py +++ b/nipype/interfaces/minc/base.py @@ -109,11 +109,11 @@ def aggregate_filename(files, new_suffix): >>> from nipype.interfaces.minc.base import aggregate_filename >>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/foo2.mnc', '/tmp/foo3.mnc'], 'averaged') - >>> os.path.split(f)[1] # This has a full path, so just check the filename. # doctest: +ALLOW_UNICODE + >>> os.path.split(f)[1] # This has a full path, so just check the filename. 'foo_averaged.mnc' >>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/blah1.mnc'], 'averaged') - >>> os.path.split(f)[1] # This has a full path, so just check the filename. # doctest: +ALLOW_UNICODE + >>> os.path.split(f)[1] # This has a full path, so just check the filename. 'foo1_averaged.mnc' """ diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py index 5b9f60d0d3..b7adb07145 100644 --- a/nipype/interfaces/minc/tests/test_auto_Average.py +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -59,7 +59,8 @@ def test_Average_inputs(): format_unsigned=dict(argstr='-unsigned', xor=('format_filetype', 'format_byte', 'format_short', 'format_int', 'format_long', 'format_float', 'format_double', 'format_signed', 'format_unsigned'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_files=dict(argstr='%s', @@ -94,7 +95,8 @@ def test_Average_inputs(): ), sdfile=dict(argstr='-sdfile %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), two=dict(argstr='-2', ), diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py index 3ef4392a2c..b469e57184 100644 --- a/nipype/interfaces/minc/tests/test_auto_BBox.py +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -15,7 +15,8 @@ def test_BBox_inputs(): ), format_mincreshape=dict(argstr='-mincreshape', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_BBox_inputs(): name_template='%s_bbox.txt', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-threshold', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py index 10a804e98f..9b24d9cb0b 100644 --- a/nipype/interfaces/minc/tests/test_auto_Beast.py +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -23,7 +23,8 @@ def test_Beast_inputs(): ), flip_images=dict(argstr='-flip', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -58,7 +59,8 @@ def test_Beast_inputs(): ), smoothness_factor_beta=dict(argstr='-beta %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold_patch_selection=dict(argstr='-threshold %s', ), diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py index 0bec968390..8d9641db16 100644 --- a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -12,7 +12,8 @@ def test_BestLinReg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), output_mnc=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_BestLinReg_inputs(): mandatory=True, position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py index 1fc965370c..f0f4bd20f8 100644 --- a/nipype/interfaces/minc/tests/test_auto_BigAverage.py +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -12,7 +12,8 @@ def test_BigAverage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_files=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_BigAverage_inputs(): name_source=['input_files'], name_template='%s_bigaverage_stdev.mnc', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tmpdir=dict(argstr='-tmpdir %s', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py index 0f87d37bb2..a0f2d95d40 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blob.py +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -11,7 +11,8 @@ def test_Blob_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_Blob_inputs(): name_template='%s_blob.mnc', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trace=dict(argstr='-trace', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py index 77342e4f9d..e50f0f0b47 100644 --- a/nipype/interfaces/minc/tests/test_auto_Blur.py +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -27,7 +27,8 @@ def test_Blur_inputs(): ), gradient=dict(argstr='-gradient', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -48,7 +49,8 @@ def test_Blur_inputs(): mandatory=True, xor=('fwhm', 'fwhm3d', 'standard_dev'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Blur.input_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py index 6ac13c47b1..1e690a1468 100644 --- a/nipype/interfaces/minc/tests/test_auto_Calc.py +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -62,7 +62,8 @@ def test_Calc_inputs(): format_unsigned=dict(argstr='-unsigned', xor=('format_filetype', 'format_byte', 'format_short', 'format_int', 'format_long', 'format_float', 'format_double', 'format_signed', 'format_unsigned'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_nan=dict(argstr='-ignore_nan', @@ -103,7 +104,8 @@ def test_Calc_inputs(): quiet=dict(argstr='-quiet', xor=('verbose', 'quiet'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), two=dict(argstr='-2', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py index 10bbdad6a6..6df129aee4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Convert.py +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -17,7 +17,8 @@ def test_Convert_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -33,7 +34,8 @@ def test_Convert_inputs(): ), template=dict(argstr='-template', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), two=dict(argstr='-2', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py index 62fa8b7470..73662e9b88 100644 --- a/nipype/interfaces/minc/tests/test_auto_Copy.py +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -9,7 +9,8 @@ def test_Copy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_Copy_inputs(): real_values=dict(argstr='-real_values', xor=('pixel_values', 'real_values'), ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Copy.input_spec() diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py index 07e183e009..2e9ab091b4 100644 --- a/nipype/interfaces/minc/tests/test_auto_Dump.py +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -21,7 +21,8 @@ def test_Dump_inputs(): header_data=dict(argstr='-h', xor=('coordinate_data', 'header_data'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -45,7 +46,8 @@ def test_Dump_inputs(): ), precision=dict(argstr='%s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), variables=dict(argstr='-v %s', sep=',', diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py index f4fb2c6e2b..0df7132519 100644 --- a/nipype/interfaces/minc/tests/test_auto_Extract.py +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -48,7 +48,8 @@ def test_Extract_inputs(): flip_z_positive=dict(argstr='+zdirection', xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_maximum=dict(argstr='-image_maximum %s', @@ -80,7 +81,8 @@ def test_Extract_inputs(): start=dict(argstr='-start %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), write_ascii=dict(argstr='-ascii', xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', 'write_int', 'write_long', 'write_float', 'write_double', 'write_signed', 'write_unsigned'), diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py index 77ffe4d114..e09dc43c73 100644 --- a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -14,7 +14,8 @@ def test_Gennlxfm_inputs(): ), ident=dict(argstr='-ident', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), like=dict(argstr='-like %s', @@ -28,7 +29,8 @@ def test_Gennlxfm_inputs(): ), step=dict(argstr='-step %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py index b12e3cd60a..33946b7e44 100644 --- a/nipype/interfaces/minc/tests/test_auto_Math.py +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -73,7 +73,8 @@ def test_Math_inputs(): format_unsigned=dict(argstr='-unsigned', xor=('format_filetype', 'format_byte', 'format_short', 'format_int', 'format_long', 'format_float', 'format_double', 'format_signed', 'format_unsigned'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_nan=dict(argstr='-ignore_nan', @@ -134,7 +135,8 @@ def test_Math_inputs(): ), square=dict(argstr='-square', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), test_eq=dict(argstr='-eq', ), diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py index a30e856276..905c4b9080 100644 --- a/nipype/interfaces/minc/tests/test_auto_NlpFit.py +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -15,7 +15,8 @@ def test_NlpFit_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), init_xfm=dict(argstr='-init_xfm %s', @@ -37,7 +38,8 @@ def test_NlpFit_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py index 410309a364..ca19629e3a 100644 --- a/nipype/interfaces/minc/tests/test_auto_Norm.py +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -17,7 +17,8 @@ def test_Norm_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -44,7 +45,8 @@ def test_Norm_inputs(): name_source=['input_file'], name_template='%s_norm_threshold_mask.mnc', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='-threshold', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py index b3f5e6cf78..1e0b92fb35 100644 --- a/nipype/interfaces/minc/tests/test_auto_Pik.py +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -22,7 +22,8 @@ def test_Pik_inputs(): horizontal_triplanar_view=dict(argstr='--horizontal', xor=('vertical_triplanar_view', 'horizontal_triplanar_view'), ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_range=dict(argstr='--image_range %s %s', @@ -65,7 +66,8 @@ def test_Pik_inputs(): ), start=dict(argstr='--slice %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tile_size=dict(argstr='--tilesize %s', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py index dd3e788557..f11c11daf0 100644 --- a/nipype/interfaces/minc/tests/test_auto_Resample.py +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -51,7 +51,8 @@ def test_Resample_inputs(): half_width_sinc_window=dict(argstr='-width %s', requires=['sinc_interpolation'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -118,7 +119,8 @@ def test_Resample_inputs(): ), talairach=dict(argstr='-talairach', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation=dict(argstr='-transformation %s', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py index 11ee473e78..64f28362c9 100644 --- a/nipype/interfaces/minc/tests/test_auto_Reshape.py +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -12,7 +12,8 @@ def test_Reshape_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_Reshape_inputs(): name_template='%s_reshape.mnc', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py index dea6132cdf..3b48a27654 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToEcat.py +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -17,7 +17,8 @@ def test_ToEcat_inputs(): ), ignore_ecat_subheader_variable=dict(argstr='-ignore_ecat_subheader_variable', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_patient_variable=dict(argstr='-ignore_patient_variable', @@ -38,7 +39,8 @@ def test_ToEcat_inputs(): name_template='%s_to_ecat.v', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxels_as_integers=dict(argstr='-label', ), diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py index d92ee7322b..02af1da373 100644 --- a/nipype/interfaces/minc/tests/test_auto_ToRaw.py +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -9,7 +9,8 @@ def test_ToRaw_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_ToRaw_inputs(): name_template='%s.raw', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), write_byte=dict(argstr='-byte', xor=('write_byte', 'write_short', 'write_int', 'write_long', 'write_float', 'write_double'), diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py index cf0550b1b1..88145f639d 100644 --- a/nipype/interfaces/minc/tests/test_auto_VolSymm.py +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -18,7 +18,8 @@ def test_VolSymm_inputs(): ), fit_nonlinear=dict(argstr='-nonlinear', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -35,7 +36,8 @@ def test_VolSymm_inputs(): name_template='%s_vol_symm.mnc', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trans_file=dict(argstr='%s', genfile=True, diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py index 89bd7bda04..7bf95a9c3d 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volcentre.py +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -16,7 +16,8 @@ def test_Volcentre_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_Volcentre_inputs(): name_template='%s_volcentre.mnc', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py index 74efb575c1..76ad3283c8 100644 --- a/nipype/interfaces/minc/tests/test_auto_Voliso.py +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -14,7 +14,8 @@ def test_Voliso_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_Voliso_inputs(): name_template='%s_voliso.mnc', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py index 063db70230..6ac867639e 100644 --- a/nipype/interfaces/minc/tests/test_auto_Volpad.py +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -18,7 +18,8 @@ def test_Volpad_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_Volpad_inputs(): ), smooth_distance=dict(argstr='-smooth_distance %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py index e90331196f..3d288aa1cd 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -16,7 +16,8 @@ def test_XfmAvg_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_linear=dict(argstr='-ignore_linear', @@ -33,7 +34,8 @@ def test_XfmAvg_inputs(): genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py index 1e7702b92e..4d3cdadb24 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -12,7 +12,8 @@ def test_XfmConcat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_files=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_XfmConcat_inputs(): name_template='%s_xfmconcat.xfm', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py index 2ee570e7fe..10f5e9ec6a 100644 --- a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -12,7 +12,8 @@ def test_XfmInvert_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_file=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_XfmInvert_inputs(): genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-verbose', ), diff --git a/nipype/interfaces/mipav/developer.py b/nipype/interfaces/mipav/developer.py index ac42f7c5a9..141a7de1cf 100644 --- a/nipype/interfaces/mipav/developer.py +++ b/nipype/interfaces/mipav/developer.py @@ -722,10 +722,10 @@ class JistIntensityMp2rageMaskingInputSpec(CommandLineInputSpec): inSkip = traits.Enum("true", "false", desc="Skip zero values", argstr="--inSkip %s") inMasking = traits.Enum("binary", "proba", desc="Whether to use a binary threshold or a weighted average based on the probability.", argstr="--inMasking %s") xPrefExt = traits.Enum("nrrd", desc="Output File Type", argstr="--xPrefExt %s") - outSignal = traits.Either(traits.Bool, File(), hash_files=False, desc="Signal Proba Image", argstr="--outSignal %s") - outSignal2 = traits.Either(traits.Bool, File(), hash_files=False, desc="Signal Mask Image", argstr="--outSignal2 %s") - outMasked = traits.Either(traits.Bool, File(), hash_files=False, desc="Masked T1 Map Image", argstr="--outMasked %s") - outMasked2 = traits.Either(traits.Bool, File(), hash_files=False, desc="Masked Iso Image", argstr="--outMasked2 %s") + outSignal = traits.Either(traits.Bool, File(), hash_files=False, desc="Signal Proba Image", argstr="--outSignal_Proba %s") + outSignal2 = traits.Either(traits.Bool, File(), hash_files=False, desc="Signal Mask Image", argstr="--outSignal_Mask %s") + outMasked = traits.Either(traits.Bool, File(), hash_files=False, desc="Masked T1 Map Image", argstr="--outMasked_T1_Map %s") + outMasked2 = traits.Either(traits.Bool, File(), hash_files=False, desc="Masked Iso Image", argstr="--outMasked_T1weighted %s") null = traits.Str(desc="Execution Time", argstr="--null %s") xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d") xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True) diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py index 7aa5289887..64349fa299 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -9,7 +9,8 @@ def test_JistBrainMgdmSegmentation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inAdjust=dict(argstr='--inAdjust %s', @@ -58,7 +59,8 @@ def test_JistBrainMgdmSegmentation_inputs(): outSegmented=dict(argstr='--outSegmented %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py index dae7e339d7..113bc27c4d 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -9,7 +9,8 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inDistance=dict(argstr='--inDistance %f', @@ -25,7 +26,8 @@ def test_JistBrainMp2rageDuraEstimation_inputs(): outDura=dict(argstr='--outDura %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py index 077ec1f574..624326b534 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -9,7 +9,8 @@ def test_JistBrainMp2rageSkullStripping_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inFilter=dict(argstr='--inFilter %s', @@ -36,7 +37,8 @@ def test_JistBrainMp2rageSkullStripping_inputs(): outMasked3=dict(argstr='--outMasked3 %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py index 10e55ce20e..8fe4e3df73 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -9,7 +9,8 @@ def test_JistBrainPartialVolumeFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inInput=dict(argstr='--inInput %s', @@ -23,7 +24,8 @@ def test_JistBrainPartialVolumeFilter_inputs(): outPartial=dict(argstr='--outPartial %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py index 1fef3cc678..238e2fd02e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -9,7 +9,8 @@ def test_JistCortexSurfaceMeshInflation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inLevelset=dict(argstr='--inLevelset %s', @@ -34,7 +35,8 @@ def test_JistCortexSurfaceMeshInflation_inputs(): outOriginal=dict(argstr='--outOriginal %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py index 95700af1be..769202018d 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -9,7 +9,8 @@ def test_JistIntensityMp2rageMasking_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inBackground=dict(argstr='--inBackground %s', @@ -26,19 +27,20 @@ def test_JistIntensityMp2rageMasking_inputs(): ), null=dict(argstr='--null %s', ), - outMasked=dict(argstr='--outMasked %s', + outMasked=dict(argstr='--outMasked_T1_Map %s', hash_files=False, ), - outMasked2=dict(argstr='--outMasked2 %s', + outMasked2=dict(argstr='--outMasked_T1weighted %s', hash_files=False, ), - outSignal=dict(argstr='--outSignal %s', + outSignal=dict(argstr='--outSignal_Proba %s', hash_files=False, ), - outSignal2=dict(argstr='--outSignal2 %s', + outSignal2=dict(argstr='--outSignal_Mask %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py index 54c3909e85..0485ed2ad2 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -9,7 +9,8 @@ def test_JistLaminarProfileCalculator_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inIntensity=dict(argstr='--inIntensity %s', @@ -23,7 +24,8 @@ def test_JistLaminarProfileCalculator_inputs(): outResult=dict(argstr='--outResult %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py index 34b8b80569..21f94d42e9 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -9,7 +9,8 @@ def test_JistLaminarProfileGeometry_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inProfile=dict(argstr='--inProfile %s', @@ -27,7 +28,8 @@ def test_JistLaminarProfileGeometry_inputs(): outResult=dict(argstr='--outResult %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py index cc2b743f3e..6d9ad3493a 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -9,7 +9,8 @@ def test_JistLaminarProfileSampling_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inCortex=dict(argstr='--inCortex %s', @@ -26,7 +27,8 @@ def test_JistLaminarProfileSampling_inputs(): outProfilemapped=dict(argstr='--outProfilemapped %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py index e51df02dc1..012fa2872b 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -9,7 +9,8 @@ def test_JistLaminarROIAveraging_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inIntensity=dict(argstr='--inIntensity %s', @@ -25,7 +26,8 @@ def test_JistLaminarROIAveraging_inputs(): outROI3=dict(argstr='--outROI3 %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py index 562e5846d0..e80496ec47 100644 --- a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -9,7 +9,8 @@ def test_JistLaminarVolumetricLayering_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inInner=dict(argstr='--inInner %s', @@ -45,7 +46,8 @@ def test_JistLaminarVolumetricLayering_inputs(): outLayer=dict(argstr='--outLayer %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py index 8254b959fd..c273cbf378 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmImageCalculator_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inOperation=dict(argstr='--inOperation %s', @@ -23,7 +24,8 @@ def test_MedicAlgorithmImageCalculator_inputs(): outResult=dict(argstr='--outResult %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py index 328072d54d..dd97b91a1f 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmLesionToads_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inAtlas=dict(argstr='--inAtlas %s', @@ -83,7 +84,8 @@ def test_MedicAlgorithmLesionToads_inputs(): outWM=dict(argstr='--outWM %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py index 9422fda7ac..e089749b4c 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmMipavReorient_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inInterpolation=dict(argstr='--inInterpolation %s', @@ -36,7 +37,8 @@ def test_MedicAlgorithmMipavReorient_inputs(): outReoriented=dict(argstr='--outReoriented %s', sep=';', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py index cbe6f4e2d5..42a161c44e 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmN3_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inAutomatic=dict(argstr='--inAutomatic %s', @@ -38,7 +39,8 @@ def test_MedicAlgorithmN3_inputs(): outInhomogeneity2=dict(argstr='--outInhomogeneity2 %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py index c273c2f223..6d2c379fcf 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inApply=dict(argstr='--inApply %s', @@ -109,7 +110,8 @@ def test_MedicAlgorithmSPECTRE2010_inputs(): outd0=dict(argstr='--outd0 %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py index 9b98541542..c895cd75d5 100644 --- a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -9,7 +9,8 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inLabel=dict(argstr='--inLabel %s', @@ -26,7 +27,8 @@ def test_MedicAlgorithmThresholdToBinaryMask_inputs(): outBinary=dict(argstr='--outBinary %s', sep=';', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py index 19ea1c4c89..b7e3c098f7 100644 --- a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -9,7 +9,8 @@ def test_RandomVol_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inField=dict(argstr='--inField %s', @@ -35,7 +36,8 @@ def test_RandomVol_inputs(): outRand1=dict(argstr='--outRand1 %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py index f2f3a70641..5196ddf5be 100644 --- a/nipype/interfaces/mne/base.py +++ b/nipype/interfaces/mne/base.py @@ -55,7 +55,7 @@ class WatershedBEM(FSCommand): >>> bem = WatershedBEM() >>> bem.inputs.subject_id = 'subj1' >>> bem.inputs.subjects_dir = '.' - >>> bem.cmdline # doctest: +ALLOW_UNICODE + >>> bem.cmdline 'mne_watershed_bem --overwrite --subject subj1 --volume T1' >>> bem.run() # doctest: +SKIP diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py index 8f5f876b73..4fb5dcee55 100644 --- a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -11,7 +11,8 @@ def test_WatershedBEM_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), overwrite=dict(argstr='--overwrite', @@ -23,7 +24,8 @@ def test_WatershedBEM_inputs(): subjects_dir=dict(mandatory=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), volume=dict(argstr='--volume %s', usedefault=True, diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py index 00e87ec0dd..eb34de974e 100644 --- a/nipype/interfaces/mrtrix/convert.py +++ b/nipype/interfaces/mrtrix/convert.py @@ -68,7 +68,7 @@ def read_mrtrix_header(in_file): key = line.split(': ')[0] value = line.split(': ')[1] header[key] = value - iflogger.info('...adding "{v}" to header for key "{k}"'.format(v=value, k=key)) + iflogger.info('...adding "%s" to header for key "%s"', value, key) fileobj.close() header['count'] = int(header['count'].replace('\n', '')) header['offset'] = int(header['file'].replace('.', '')) @@ -118,8 +118,8 @@ def track_gen(track_points): raise HeaderError( 'Expecting %s points, found only %s' % ( stream_count, n_streams)) - iflogger.error('Expecting %s points, found only %s' % ( - stream_count, n_streams)) + iflogger.error('Expecting %s points, found only %s', + stream_count, n_streams) break pts = np.ndarray( shape=(n_pts, pt_cols), @@ -136,16 +136,15 @@ def track_gen(track_points): yield xyz n_streams += 1 if n_streams == stream_count: - iflogger.info('100% : {n} tracks read'.format(n=n_streams)) + iflogger.info('100%% : %i tracks read', n_streams) raise StopIteration try: if n_streams % int(stream_count / 100) == 0: percent = int(float(n_streams) / float(stream_count) * 100) - iflogger.info('{p}% : {n} tracks read'.format(p=percent, - n=n_streams)) + iflogger.info('%i%% : %i tracks read', percent, n_streams) except ZeroDivisionError: - iflogger.info('{} stream read out of {}'.format(n_streams, - stream_count)) + iflogger.info('%i stream read out of %i', n_streams, + stream_count) track_points, nonfinite_list = points_per_track(offset) fileobj.seek(offset) streamlines = track_gen(track_points) @@ -200,14 +199,16 @@ def _run_interface(self, runtime): trk_header['n_count'] = header['count'] if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file): - iflogger.info('Applying transformation from matrix file {m}'.format(m=self.inputs.matrix_file)) + iflogger.info('Applying transformation from matrix file %s', + self.inputs.matrix_file) xfm = np.genfromtxt(self.inputs.matrix_file) iflogger.info(xfm) registration_image_file = nb.load(self.inputs.registration_image_file) reg_affine = registration_image_file.affine r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file) r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file) - iflogger.info('Using affine from registration image file {r}'.format(r=self.inputs.registration_image_file)) + iflogger.info('Using affine from registration image file %s', + self.inputs.registration_image_file) iflogger.info(reg_affine) trk_header['vox_to_ras'] = reg_affine trk_header['dim'] = [r_dx, r_dy, r_dz] @@ -225,18 +226,19 @@ def _run_interface(self, runtime): final_streamlines = move_streamlines(transformed_streamlines, aff) trk_tracks = ((ii, None, None) for ii in final_streamlines) trk.write(out_filename, trk_tracks, trk_header) - iflogger.info('Saving transformed Trackvis file as {out}'.format(out=out_filename)) + iflogger.info('Saving transformed Trackvis file as %s', out_filename) iflogger.info('New TrackVis Header:') iflogger.info(trk_header) else: - iflogger.info('Applying transformation from scanner coordinates to {img}'.format(img=self.inputs.image_file)) + iflogger.info('Applying transformation from scanner coordinates to %s', + self.inputs.image_file) axcode = aff2axcodes(affine) trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2] trk_header['vox_to_ras'] = affine transformed_streamlines = transform_to_affine(streamlines, trk_header, affine) trk_tracks = ((ii, None, None) for ii in transformed_streamlines) trk.write(out_filename, trk_tracks, trk_header) - iflogger.info('Saving Trackvis file as {out}'.format(out=out_filename)) + iflogger.info('Saving Trackvis file as %s', out_filename) iflogger.info('TrackVis Header:') iflogger.info(trk_header) return runtime diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py index 7ca6abd1fb..becee5088f 100644 --- a/nipype/interfaces/mrtrix/preprocess.py +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -144,7 +144,7 @@ class DWI2Tensor(CommandLine): >>> dwi2tensor = mrt.DWI2Tensor() >>> dwi2tensor.inputs.in_file = 'dwi.mif' >>> dwi2tensor.inputs.encoding_file = 'encoding.txt' - >>> dwi2tensor.cmdline # doctest: +ALLOW_UNICODE + >>> dwi2tensor.cmdline 'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif' >>> dwi2tensor.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py index 400f79676c..ef66cfc691 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -20,7 +20,8 @@ def test_ConstrainedSphericalDeconvolution_inputs(): filter_file=dict(argstr='-filter %s', position=-2, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -47,7 +48,8 @@ def test_ConstrainedSphericalDeconvolution_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold_value=dict(argstr='-threshold %s', ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py index 4593e247bb..54a028727e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -13,7 +13,8 @@ def test_DWI2SphericalHarmonicsImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_DWI2SphericalHarmonicsImage_inputs(): genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWI2SphericalHarmonicsImage.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py index c7d5675bc1..051172702f 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -15,7 +15,8 @@ def test_DWI2Tensor_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_slice_by_volume=dict(argstr='-ignoreslices %s', @@ -39,7 +40,8 @@ def test_DWI2Tensor_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWI2Tensor.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py index 1a3dcc9edb..46f45243ee 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -29,7 +29,8 @@ def test_DiffusionTensorStreamlineTrack_inputs(): mandatory=True, position=-2, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -97,7 +98,8 @@ def test_DiffusionTensorStreamlineTrack_inputs(): ), stop=dict(argstr='-stop', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py index 4a88fd9cb3..dfe29bbb7c 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -13,7 +13,8 @@ def test_Directions2Amplitude_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_Directions2Amplitude_inputs(): ), quiet_display=dict(argstr='-quiet', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Directions2Amplitude.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py index 7580cfd40c..874ad1ba0e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -15,7 +15,8 @@ def test_Erode_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_Erode_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Erode.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py index b0ee191fe1..5d81dbecd6 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -15,7 +15,8 @@ def test_EstimateResponseForSH_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_EstimateResponseForSH_inputs(): ), quiet=dict(argstr='-quiet', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EstimateResponseForSH.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py index 7b9dd09517..5c53341e8f 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -21,7 +21,8 @@ def test_FilterTracks_inputs(): units='mm', xor=['exclude_file', 'exclude_spec'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -53,7 +54,8 @@ def test_FilterTracks_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FilterTracks.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py index 5f14e69f35..f0571ad997 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -17,7 +17,8 @@ def test_FindShPeaks_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -42,7 +43,8 @@ def test_FindShPeaks_inputs(): ), quiet_display=dict(argstr='-quiet', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FindShPeaks.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py index ab805c35cb..8e4167167f 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -13,7 +13,8 @@ def test_GenerateDirections_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), niter=dict(argstr='-niter %s', @@ -32,7 +33,8 @@ def test_GenerateDirections_inputs(): ), quiet_display=dict(argstr='-quiet', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateDirections.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py index 2aa1a3cffa..bda204c9ad 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -17,7 +17,8 @@ def test_GenerateWhiteMatterMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_GenerateWhiteMatterMask_inputs(): genfile=True, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateWhiteMatterMask.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py index 7f970f0dc4..ed9071256e 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -19,7 +19,8 @@ def test_MRConvert_inputs(): position=2, sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -50,7 +51,8 @@ def test_MRConvert_inputs(): position=3, units='mm', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_dims=dict(argstr='-vox %s', position=3, diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py index 9074271d16..efb8b92249 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -12,7 +12,8 @@ def test_MRMultiply_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_MRMultiply_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRMultiply.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py index 28985f43b2..a685293371 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -15,7 +15,8 @@ def test_MRTransform_inputs(): flip_x=dict(argstr='-flipx', position=1, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -41,7 +42,8 @@ def test_MRTransform_inputs(): template_image=dict(argstr='-template %s', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation_file=dict(argstr='-transform %s', position=1, diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py index 09ffc2a900..4fe4d2952b 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -9,14 +9,16 @@ def test_MRTrixInfo_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRTrixInfo.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py index a6fe757114..19a0e6f710 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -12,7 +12,8 @@ def test_MRTrixViewer_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_MRTrixViewer_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRTrixViewer.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py index 796c607791..0b13574b78 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -12,7 +12,8 @@ def test_MedianFilter3D_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_MedianFilter3D_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MedianFilter3D.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py index 64605ad510..f3fcda4884 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -25,7 +25,8 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): units='mm', xor=['exclude_file', 'exclude_spec'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -95,7 +96,8 @@ def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): ), stop=dict(argstr='-stop', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py index fd23f4479d..a8ef768850 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -25,7 +25,8 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): units='mm', xor=['exclude_file', 'exclude_spec'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -93,7 +94,8 @@ def test_SphericallyDeconvolutedStreamlineTrack_inputs(): ), stop=dict(argstr='-stop', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py index 3e466057b0..5e028c40ac 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -25,7 +25,8 @@ def test_StreamlineTrack_inputs(): units='mm', xor=['exclude_file', 'exclude_spec'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -93,7 +94,8 @@ def test_StreamlineTrack_inputs(): ), stop=dict(argstr='-stop', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py index 8ffdad429f..e4f32e21d2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -12,7 +12,8 @@ def test_Tensor2ApparentDiffusion_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_Tensor2ApparentDiffusion_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Tensor2ApparentDiffusion.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py index e234065864..07535aa125 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -12,7 +12,8 @@ def test_Tensor2FractionalAnisotropy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_Tensor2FractionalAnisotropy_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Tensor2FractionalAnisotropy.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py index 08f0837540..724c9cb534 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -12,7 +12,8 @@ def test_Tensor2Vector_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_Tensor2Vector_inputs(): quiet=dict(argstr='-quiet', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Tensor2Vector.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py index 4ff6fa9759..124e87a0b2 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -14,7 +14,8 @@ def test_Threshold_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_Threshold_inputs(): replace_zeros_with_NaN=dict(argstr='-nan', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Threshold.input_spec() diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py index 079273f9e2..f1a2a08355 100644 --- a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -15,7 +15,8 @@ def test_Tracks2Prob_inputs(): fraction=dict(argstr='-fraction', position=3, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_Tracks2Prob_inputs(): template_file=dict(argstr='-template %s', position=1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel_dims=dict(argstr='-vox %s', position=2, diff --git a/nipype/interfaces/mrtrix/tracking.py b/nipype/interfaces/mrtrix/tracking.py index 5fa39d38d3..c013d7b04a 100644 --- a/nipype/interfaces/mrtrix/tracking.py +++ b/nipype/interfaces/mrtrix/tracking.py @@ -15,8 +15,10 @@ import os.path as op from ...utils.filemanip import split_filename -from ..base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File -from ..traits_extension import isdefined +from ..base import ( + CommandLineInputSpec, CommandLine, traits, TraitedSpec, File, + isdefined +) class FilterTracksInputSpec(CommandLineInputSpec): @@ -210,7 +212,7 @@ class StreamlineTrack(CommandLine): >>> strack.inputs.in_file = 'data.Bfloat' >>> strack.inputs.seed_file = 'seed_mask.nii' >>> strack.inputs.mask_file = 'mask.nii' - >>> strack.cmdline # doctest: +ALLOW_UNICODE + >>> strack.cmdline 'streamtrack -mask mask.nii -seed seed_mask.nii SD_PROB data.Bfloat data_tracked.tck' >>> strack.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py index 3ff5c8e2e7..53bc8f5f53 100644 --- a/nipype/interfaces/mrtrix3/__init__.py +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- from .utils import (Mesh2PVE, Generate5tt, BrainMask, TensorMetrics, - ComputeTDI, TCK2VTK) + ComputeTDI, TCK2VTK, MRMath, MRConvert, DWIExtract) from .preprocess import ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST from .tracking import Tractography from .reconst import FitTensor, EstimateFOD diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py index ab982b816a..0d91c3d56d 100644 --- a/nipype/interfaces/mrtrix3/base.py +++ b/nipype/interfaces/mrtrix3/base.py @@ -14,9 +14,8 @@ from __future__ import print_function, division, unicode_literals, absolute_import from ... import logging -from ..traits_extension import isdefined -from ..base import (CommandLineInputSpec, CommandLine, traits, File) -logger = logging.getLogger('interface') +from ..base import (CommandLineInputSpec, CommandLine, traits, File, isdefined) +iflogger = logging.getLogger('interface') class MRTrix3BaseInputSpec(CommandLineInputSpec): @@ -52,7 +51,7 @@ def _format_arg(self, name, trait_spec, value): from multiprocessing import cpu_count value = cpu_count() except: - logger.warn('Number of threads could not be computed') + iflogger.warn('Number of threads could not be computed') pass return trait_spec.argstr % value diff --git a/nipype/interfaces/mrtrix3/connectivity.py b/nipype/interfaces/mrtrix3/connectivity.py index a2e7db355d..3b79b16442 100644 --- a/nipype/interfaces/mrtrix3/connectivity.py +++ b/nipype/interfaces/mrtrix3/connectivity.py @@ -16,8 +16,7 @@ import os import os.path as op -from ..traits_extension import isdefined -from ..base import (CommandLineInputSpec, traits, TraitedSpec, File) +from ..base import (CommandLineInputSpec, traits, TraitedSpec, File, isdefined) from .base import MRTrix3Base @@ -96,7 +95,7 @@ class BuildConnectome(MRTrix3Base): >>> mat = mrt.BuildConnectome() >>> mat.inputs.in_file = 'tracks.tck' >>> mat.inputs.in_parc = 'aparc+aseg.nii' - >>> mat.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> mat.cmdline # doctest: +ELLIPSIS 'tck2connectome tracks.tck aparc+aseg.nii connectome.csv' >>> mat.run() # doctest: +SKIP """ @@ -155,7 +154,7 @@ class LabelConfig(MRTrix3Base): >>> labels = mrt.LabelConfig() >>> labels.inputs.in_file = 'aparc+aseg.nii' >>> labels.inputs.in_config = 'mrtrix3_labelconfig.txt' - >>> labels.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> labels.cmdline # doctest: +ELLIPSIS 'labelconfig aparc+aseg.nii mrtrix3_labelconfig.txt parcellation.mif' >>> labels.run() # doctest: +SKIP """ diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py index 91ec44d1f0..0eedc3f449 100644 --- a/nipype/interfaces/mrtrix3/preprocess.py +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -15,78 +15,37 @@ import os.path as op -from ..traits_extension import isdefined from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File) + File, isdefined, Undefined) from .base import MRTrix3BaseInputSpec, MRTrix3Base class ResponseSDInputSpec(MRTrix3BaseInputSpec): - in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, - desc='input diffusion weighted images') - - out_file = File( - 'response.txt', argstr='%s', mandatory=True, position=-1, - usedefault=True, desc='output file containing SH coefficients') - - # DW Shell selection options - shell = traits.List(traits.Float, sep=',', argstr='-shell %s', - desc='specify one or more dw gradient shells') + algorithm = traits.Enum('msmt_5tt','dhollander','tournier','tax', argstr='%s', position=-6, + mandatory=True, desc='response estimation algorithm (multi-tissue)') + in_file = File(exists=True, argstr='%s', position=-5, + mandatory=True, desc='input DWI image') + mtt_file = File(argstr='%s', position=-4, desc='input 5tt image') + wm_file = File('wm.txt', argstr='%s', position=-3, usedefault=True, + desc='output WM response text file') + gm_file = File(argstr='%s', position=-2, desc='output GM response text file') + csf_file = File(argstr='%s', position=-1, desc='output CSF response text file') in_mask = File(exists=True, argstr='-mask %s', - desc='provide initial mask image') + desc='provide initial mask image') max_sh = traits.Int(8, argstr='-lmax %d', - desc='maximum harmonic degree of response function') - out_sf = File('sf_mask.nii.gz', argstr='-sf %s', - desc='write a mask containing single-fibre voxels') - test_all = traits.Bool(False, argstr='-test_all', - desc='re-test all voxels at every iteration') - - # Optimization - iterations = traits.Int(0, argstr='-max_iters %d', - desc='maximum number of iterations per pass') - max_change = traits.Float( - argstr='-max_change %f', - desc=('maximum percentile change in any response function coefficient;' - ' if no individual coefficient changes by more than this ' - 'fraction, the algorithm is terminated.')) - - # Thresholds - vol_ratio = traits.Float( - .15, argstr='-volume_ratio %f', - desc=('maximal volume ratio between the sum of all other positive' - ' lobes in the voxel and the largest FOD lobe')) - disp_mult = traits.Float( - 1., argstr='-dispersion_multiplier %f', - desc=('dispersion of FOD lobe must not exceed some threshold as ' - 'determined by this multiplier and the FOD dispersion in other ' - 'single-fibre voxels. The threshold is: (mean + (multiplier * ' - '(mean - min))); default = 1.0. Criterion is only applied in ' - 'second pass of RF estimation.')) - int_mult = traits.Float( - 2., argstr='-integral_multiplier %f', - desc=('integral of FOD lobe must not be outside some range as ' - 'determined by this multiplier and FOD lobe integral in other' - ' single-fibre voxels. The range is: (mean +- (multiplier * ' - 'stdev)); default = 2.0. Criterion is only applied in second ' - 'pass of RF estimation.')) + desc='maximum harmonic degree of response function') class ResponseSDOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') - out_sf = File(desc=('mask containing single-fibre voxels')) + wm_file = File(argstr='%s', desc='output WM response text file') + gm_file = File(argstr='%s', desc='output GM response text file') + csf_file = File(argstr='%s', desc='output CSF response text file') class ResponseSD(MRTrix3Base): """ - Generate an appropriate response function from the image data for - spherical deconvolution. - - .. [1] Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. and - Leemans, A., Recursive calibration of the fiber response function - for spherical deconvolution of diffusion MRI data. NeuroImage, - 2014, 86, 67-80 - + Estimate response function(s) for spherical deconvolution using the specified algorithm. Example ------- @@ -94,10 +53,10 @@ class ResponseSD(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> resp = mrt.ResponseSD() >>> resp.inputs.in_file = 'dwi.mif' - >>> resp.inputs.in_mask = 'mask.nii.gz' + >>> resp.inputs.algorithm = 'tournier' >>> resp.inputs.grad_fsl = ('bvecs', 'bvals') - >>> resp.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE - 'dwi2response -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif response.txt' + >>> resp.cmdline # doctest: +ELLIPSIS + 'dwi2response -fslgrad bvecs bvals tournier dwi.mif wm.txt' >>> resp.run() # doctest: +SKIP """ @@ -107,10 +66,11 @@ class ResponseSD(MRTrix3Base): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) - - if isdefined(self.inputs.out_sf): - outputs['out_sf'] = op.abspath(self.inputs.out_sf) + outputs['wm_file'] = op.abspath(self.inputs.wm_file) + if self.inputs.gm_file != Undefined: + outputs['gm_file'] = op.abspath(self.inputs.gm_file) + if self.inputs.csf_file != Undefined: + outputs['csf_file'] = op.abspath(self.inputs.csf_file) return outputs @@ -139,7 +99,7 @@ class ACTPrepareFSL(CommandLine): >>> import nipype.interfaces.mrtrix3 as mrt >>> prep = mrt.ACTPrepareFSL() >>> prep.inputs.in_file = 'T1.nii.gz' - >>> prep.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> prep.cmdline # doctest: +ELLIPSIS 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif' >>> prep.run() # doctest: +SKIP """ @@ -185,7 +145,7 @@ class ReplaceFSwithFIRST(CommandLine): >>> prep.inputs.in_file = 'aparc+aseg.nii' >>> prep.inputs.in_t1w = 'T1.nii.gz' >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt' - >>> prep.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> prep.cmdline # doctest: +ELLIPSIS 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \ mrtrix3_labelconfig.txt aparc+first.mif' >>> prep.run() # doctest: +SKIP diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py index b1f71dd572..f7ea4f01e4 100644 --- a/nipype/interfaces/mrtrix3/reconst.py +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -15,7 +15,7 @@ import os.path as op -from ..base import traits, TraitedSpec, File +from ..base import traits, TraitedSpec, File, Undefined from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -58,7 +58,7 @@ class FitTensor(MRTrix3Base): >>> tsr.inputs.in_file = 'dwi.mif' >>> tsr.inputs.in_mask = 'mask.nii.gz' >>> tsr.inputs.grad_fsl = ('bvecs', 'bvals') - >>> tsr.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> tsr.cmdline # doctest: +ELLIPSIS 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif dti.mif' >>> tsr.run() # doctest: +SKIP """ @@ -74,108 +74,55 @@ def _list_outputs(self): class EstimateFODInputSpec(MRTrix3BaseInputSpec): - in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, - desc='input diffusion weighted images') - response = File( - exists=True, argstr='%s', mandatory=True, position=-2, - desc=('a text file containing the diffusion-weighted signal response ' - 'function coefficients for a single fibre population')) - out_file = File( - 'fods.mif', argstr='%s', mandatory=True, position=-1, - usedefault=True, desc=('the output spherical harmonics coefficients' - ' image')) + algorithm = traits.Enum('csd','msmt_csd', argstr='%s', position=-8, + mandatory=True, desc='FOD algorithm') + in_file = File(exists=True, argstr='%s', position=-7, + mandatory=True, desc='input DWI image') + wm_txt = File(argstr='%s', position=-6, + mandatory=True, desc='WM response text file') + wm_odf = File('wm.mif', argstr='%s', position=-5, usedefault=True, + mandatory=True, desc='output WM ODF') + gm_txt = File(argstr='%s', position=-4, desc='GM response text file') + gm_odf = File('gm.mif', argstr='%s', position=-3, desc='output GM ODF') + csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') + csf_odf = File('csf.mif', argstr='%s', position=-1, desc='output CSF ODF') + mask_file = File(exists=True, argstr='-mask %s', desc='mask image') # DW Shell selection options shell = traits.List(traits.Float, sep=',', argstr='-shell %s', desc='specify one or more dw gradient shells') - - # Spherical deconvolution options max_sh = traits.Int(8, argstr='-lmax %d', desc='maximum harmonic degree of response function') - in_mask = File(exists=True, argstr='-mask %s', - desc='provide initial mask image') in_dirs = File( exists=True, argstr='-directions %s', desc=('specify the directions over which to apply the non-negativity ' 'constraint (by default, the built-in 300 direction set is ' 'used). These should be supplied as a text file containing the ' '[ az el ] pairs for the directions.')) - sh_filter = File( - exists=True, argstr='-filter %s', - desc=('the linear frequency filtering parameters used for the initial ' - 'linear spherical deconvolution step (default = [ 1 1 1 0 0 ]). ' - 'These should be supplied as a text file containing the ' - 'filtering coefficients for each even harmonic order.')) - - neg_lambda = traits.Float( - 1.0, argstr='-neg_lambda %f', - desc=('the regularisation parameter lambda that controls the strength' - ' of the non-negativity constraint')) - thres = traits.Float( - 0.0, argstr='-threshold %f', - desc=('the threshold below which the amplitude of the FOD is assumed ' - 'to be zero, expressed as an absolute amplitude')) - - n_iter = traits.Int( - 50, argstr='-niter %d', desc=('the maximum number of iterations ' - 'to perform for each voxel')) class EstimateFODOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='the output response file') + wm_odf = File(argstr='%s', desc='output WM ODF') + gm_odf = File(argstr='%s', desc='output GM ODF') + csf_odf = File(argstr='%s', desc='output CSF ODF') class EstimateFOD(MRTrix3Base): """ - Convert diffusion-weighted images to tensor images - - Note that this program makes use of implied symmetries in the diffusion - profile. First, the fact the signal attenuation profile is real implies - that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)* (where * denotes - the complex conjugate). Second, the diffusion profile should be - antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l - components should be zero. Therefore, this program only computes the even - elements. - - Note that the spherical harmonics equations used here differ slightly from - those conventionally used, in that the (-1)^m factor has been omitted. - This should be taken into account in all subsequent calculations. - The spherical harmonic coefficients are stored as follows. First, since - the signal attenuation profile is real, it has conjugate symmetry, i.e. - Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the - diffusion profile should be antipodally symmetric (i.e. S(x) = S(-x)), - implying that all odd l components should be zero. Therefore, only the - even elements are computed. - - Note that the spherical harmonics equations used here differ slightly from - those conventionally used, in that the (-1)^m factor has been omitted. - This should be taken into account in all subsequent calculations. - Each volume in the output image corresponds to a different spherical - harmonic component. Each volume will correspond to the following: - - volume 0: l = 0, m = 0 - volume 1: l = 2, m = -2 (imaginary part of m=2 SH) - volume 2: l = 2, m = -1 (imaginary part of m=1 SH) - volume 3: l = 2, m = 0 - volume 4: l = 2, m = 1 (real part of m=1 SH) - volume 5: l = 2, m = 2 (real part of m=2 SH) - etc... - - + Estimate fibre orientation distributions from diffusion data using spherical deconvolution Example ------- >>> import nipype.interfaces.mrtrix3 as mrt >>> fod = mrt.EstimateFOD() + >>> fod.inputs.algorithm = 'csd' >>> fod.inputs.in_file = 'dwi.mif' - >>> fod.inputs.response = 'response.txt' - >>> fod.inputs.in_mask = 'mask.nii.gz' + >>> fod.inputs.wm_txt = 'wm.txt' >>> fod.inputs.grad_fsl = ('bvecs', 'bvals') - >>> fod.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE - 'dwi2fod -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif response.txt\ - fods.mif' + >>> fod.cmdline # doctest: +ELLIPSIS + 'dwi2fod -fslgrad bvecs bvals csd dwi.mif wm.txt wm.mif' >>> fod.run() # doctest: +SKIP """ @@ -185,5 +132,12 @@ class EstimateFOD(MRTrix3Base): def _list_outputs(self): outputs = self.output_spec().get() - outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs['wm_odf'] = op.abspath(self.inputs.wm_odf) + if self.inputs.gm_odf != Undefined: + outputs['gm_odf'] = op.abspath(self.inputs.gm_odf) + if self.inputs.csf_odf != Undefined: + outputs['csf_odf'] = op.abspath(self.inputs.csf_odf) return outputs + + + diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py index 91af4ef87e..9918c9ae32 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -9,7 +9,8 @@ def test_ACTPrepareFSL_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_ACTPrepareFSL_inputs(): position=-1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ACTPrepareFSL.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py index 33de89ccbb..25b9716f80 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -15,7 +15,8 @@ def test_BrainMask_inputs(): ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), @@ -33,7 +34,8 @@ def test_BrainMask_inputs(): position=-1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BrainMask.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py index 9e44d4134a..fdac1f65fb 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -9,7 +9,8 @@ def test_BuildConnectome_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -41,7 +42,8 @@ def test_BuildConnectome_inputs(): ), search_reverse=dict(argstr='-assignment_reverse_search %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vox_lookup=dict(argstr='-assignment_voxel_lookup', ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py index 18c6868538..0183045c56 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -19,7 +19,8 @@ def test_ComputeTDI_inputs(): ), fwhm_tck=dict(argstr='-fwhm_tck %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -49,7 +50,8 @@ def test_ComputeTDI_inputs(): ), tck_weights=dict(argstr='-tck_weights_in %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upsample=dict(argstr='-upsample %d', ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py new file mode 100644 index 0000000000..805b5b86b0 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import DWIExtract + + +def test_DWIExtract_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + bval_scale=dict(argstr='-bvalue_scaling %s', + ), + bzero=dict(argstr='-bzero', + ), + environ=dict(nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', + ), + grad_fsl=dict(argstr='-fslgrad %s %s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', + ), + in_file=dict(argstr='%s', + mandatory=True, + position=-2, + ), + nobzero=dict(argstr='-nobzero', + ), + nthreads=dict(argstr='-nthreads %d', + nohash=True, + ), + out_file=dict(argstr='%s', + mandatory=True, + position=-1, + ), + shell=dict(argstr='-shell %s', + sep=',', + ), + singleshell=dict(argstr='-singleshell', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = DWIExtract.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_DWIExtract_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = DWIExtract.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py index daaddaceca..a62c21d989 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -4,18 +4,35 @@ def test_EstimateFOD_inputs(): - input_map = dict(args=dict(argstr='%s', + input_map = dict(algorithm=dict(argstr='%s', + mandatory=True, + position=-8, + ), + args=dict(argstr='%s', ), bval_scale=dict(argstr='-bvalue_scaling %s', ), + csf_odf=dict(argstr='%s', + position=-1, + ), + csf_txt=dict(argstr='%s', + position=-2, + ), environ=dict(nohash=True, usedefault=True, ), + gm_odf=dict(argstr='%s', + position=-3, + ), + gm_txt=dict(argstr='%s', + position=-4, + ), grad_file=dict(argstr='-grad %s', ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), @@ -25,36 +42,29 @@ def test_EstimateFOD_inputs(): ), in_file=dict(argstr='%s', mandatory=True, - position=-3, + position=-7, ), - in_mask=dict(argstr='-mask %s', + mask_file=dict(argstr='-mask %s', ), max_sh=dict(argstr='-lmax %d', ), - n_iter=dict(argstr='-niter %d', + nthreads=dict(argstr='-nthreads %d', + nohash=True, ), - neg_lambda=dict(argstr='-neg_lambda %f', + shell=dict(argstr='-shell %s', + sep=',', ), - nthreads=dict(argstr='-nthreads %d', + terminal_output=dict(deprecated='1.0.0', nohash=True, ), - out_file=dict(argstr='%s', + wm_odf=dict(argstr='%s', mandatory=True, - position=-1, + position=-5, usedefault=True, ), - response=dict(argstr='%s', + wm_txt=dict(argstr='%s', mandatory=True, - position=-2, - ), - sh_filter=dict(argstr='-filter %s', - ), - shell=dict(argstr='-shell %s', - sep=',', - ), - terminal_output=dict(nohash=True, - ), - thres=dict(argstr='-threshold %f', + position=-6, ), ) inputs = EstimateFOD.input_spec() @@ -65,7 +75,12 @@ def test_EstimateFOD_inputs(): def test_EstimateFOD_outputs(): - output_map = dict(out_file=dict(), + output_map = dict(csf_odf=dict(argstr='%s', + ), + gm_odf=dict(argstr='%s', + ), + wm_odf=dict(argstr='%s', + ), ) outputs = EstimateFOD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py index fa7126432b..f61669d5c9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -15,7 +15,8 @@ def test_FitTensor_inputs(): ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), @@ -39,7 +40,8 @@ def test_FitTensor_inputs(): ), reg_term=dict(argstr='-regularisation %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FitTensor.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py index 6ad81cc00c..cc98ff316e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -4,27 +4,41 @@ def test_Generate5tt_inputs(): - input_map = dict(args=dict(argstr='%s', + input_map = dict(algorithm=dict(argstr='%s', + mandatory=True, + position=-3, + ), + args=dict(argstr='%s', + ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + grad_file=dict(argstr='-grad %s', + ), + grad_fsl=dict(argstr='-fslgrad %s %s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - in_fast=dict(argstr='%s', - mandatory=True, - position=-3, + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), - in_first=dict(argstr='%s', + in_file=dict(argstr='%s', + mandatory=True, position=-2, ), + nthreads=dict(argstr='-nthreads %d', + nohash=True, + ), out_file=dict(argstr='%s', mandatory=True, position=-1, - usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Generate5tt.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py index 564a986116..4594894ef1 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -9,7 +9,8 @@ def test_LabelConfig_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_config=dict(argstr='%s', @@ -37,7 +38,8 @@ def test_LabelConfig_inputs(): ), spine=dict(argstr='-spine %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = LabelConfig.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py new file mode 100644 index 0000000000..c1778a9ef6 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRConvert + + +def test_MRConvert_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + axes=dict(argstr='-axes %s', + sep=',', + ), + bval_scale=dict(argstr='-bvalue_scaling %s', + ), + coord=dict(argstr='-coord %s', + sep=' ', + ), + environ=dict(nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', + ), + grad_fsl=dict(argstr='-fslgrad %s %s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', + ), + in_file=dict(argstr='%s', + mandatory=True, + position=-2, + ), + nthreads=dict(argstr='-nthreads %d', + nohash=True, + ), + out_file=dict(argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + scaling=dict(argstr='-scaling %s', + sep=',', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + vox=dict(argstr='-vox %s', + sep=',', + ), + ) + inputs = MRConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_MRConvert_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = MRConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py new file mode 100644 index 0000000000..6b13903f0f --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRMath + + +def test_MRMath_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + axis=dict(argstr='-axis %d', + ), + bval_scale=dict(argstr='-bvalue_scaling %s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', + ), + grad_fsl=dict(argstr='-fslgrad %s %s', + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', + ), + in_file=dict(argstr='%s', + mandatory=True, + position=-3, + ), + nthreads=dict(argstr='-nthreads %d', + nohash=True, + ), + operation=dict(argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict(argstr='%s', + mandatory=True, + position=-1, + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = MRMath.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_MRMath_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = MRMath.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py index 63de8538d0..1d306b6a86 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -9,10 +9,12 @@ def test_MRTrix3Base_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MRTrix3Base.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py index 1e3c6983ed..30dac94dda 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -9,7 +9,8 @@ def test_Mesh2PVE_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_Mesh2PVE_inputs(): mandatory=True, position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Mesh2PVE.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py index ddefa4361e..b881dc7d1b 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -9,7 +9,8 @@ def test_ReplaceFSwithFIRST_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_config=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_ReplaceFSwithFIRST_inputs(): position=-1, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ReplaceFSwithFIRST.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py index 216e905c11..ff93d1a8a9 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -4,20 +4,29 @@ def test_ResponseSD_inputs(): - input_map = dict(args=dict(argstr='%s', + input_map = dict(algorithm=dict(argstr='%s', + mandatory=True, + position=-6, + ), + args=dict(argstr='%s', ), bval_scale=dict(argstr='-bvalue_scaling %s', ), - disp_mult=dict(argstr='-dispersion_multiplier %f', + csf_file=dict(argstr='%s', + position=-1, ), environ=dict(nohash=True, usedefault=True, ), + gm_file=dict(argstr='%s', + position=-2, + ), grad_file=dict(argstr='-grad %s', ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), @@ -25,36 +34,24 @@ def test_ResponseSD_inputs(): ), in_file=dict(argstr='%s', mandatory=True, - position=-2, + position=-5, ), in_mask=dict(argstr='-mask %s', ), - int_mult=dict(argstr='-integral_multiplier %f', - ), - iterations=dict(argstr='-max_iters %d', - ), - max_change=dict(argstr='-max_change %f', - ), max_sh=dict(argstr='-lmax %d', ), + mtt_file=dict(argstr='%s', + position=-4, + ), nthreads=dict(argstr='-nthreads %d', nohash=True, ), - out_file=dict(argstr='%s', - mandatory=True, - position=-1, - usedefault=True, - ), - out_sf=dict(argstr='-sf %s', - ), - shell=dict(argstr='-shell %s', - sep=',', - ), - terminal_output=dict(nohash=True, - ), - test_all=dict(argstr='-test_all', + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), - vol_ratio=dict(argstr='-volume_ratio %f', + wm_file=dict(argstr='%s', + position=-3, + usedefault=True, ), ) inputs = ResponseSD.input_spec() @@ -65,8 +62,12 @@ def test_ResponseSD_inputs(): def test_ResponseSD_outputs(): - output_map = dict(out_file=dict(), - out_sf=dict(), + output_map = dict(csf_file=dict(argstr='%s', + ), + gm_file=dict(argstr='%s', + ), + wm_file=dict(argstr='%s', + ), ) outputs = ResponseSD.output_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py index 558a90df40..d5f88bc470 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -9,7 +9,8 @@ def test_TCK2VTK_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_TCK2VTK_inputs(): ), reference=dict(argstr='-image %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), voxel=dict(argstr='-image %s', ), diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py index 2719e25ea6..d51f5fe53e 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -12,7 +12,8 @@ def test_TensorMetrics_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_TensorMetrics_inputs(): ), out_fa=dict(argstr='-fa %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TensorMetrics.input_spec() diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py index dcbc5a0489..6c715202b5 100644 --- a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -32,7 +32,8 @@ def test_Tractography_inputs(): ), grad_fsl=dict(argstr='-fslgrad %s %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_bval=dict(), @@ -101,7 +102,8 @@ def test_Tractography_inputs(): ), stop=dict(argstr='-stop', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py index 82c7294cfc..f2cc9c7c75 100644 --- a/nipype/interfaces/mrtrix3/tracking.py +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -227,7 +227,7 @@ class Tractography(MRTrix3Base): >>> tk.inputs.in_file = 'fods.mif' >>> tk.inputs.roi_mask = 'mask.nii.gz' >>> tk.inputs.seed_sphere = (80, 100, 70, 10) - >>> tk.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> tk.cmdline # doctest: +ELLIPSIS 'tckgen -algorithm iFOD2 -mask mask.nii.gz -seed_sphere \ 80.000000,100.000000,70.000000,10.000000 fods.mif tracked.tck' >>> tk.run() # doctest: +SKIP diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py index 99f308bd18..9f319456d6 100644 --- a/nipype/interfaces/mrtrix3/utils.py +++ b/nipype/interfaces/mrtrix3/utils.py @@ -15,9 +15,8 @@ import os.path as op -from ..traits_extension import isdefined from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, - File, InputMultiPath) + File, InputMultiPath, isdefined) from .base import MRTrix3BaseInputSpec, MRTrix3Base @@ -46,7 +45,7 @@ class BrainMask(CommandLine): >>> import nipype.interfaces.mrtrix3 as mrt >>> bmsk = mrt.BrainMask() >>> bmsk.inputs.in_file = 'dwi.mif' - >>> bmsk.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> bmsk.cmdline # doctest: +ELLIPSIS 'dwi2mask dwi.mif brainmask.mif' >>> bmsk.run() # doctest: +SKIP """ @@ -93,7 +92,7 @@ class Mesh2PVE(CommandLine): >>> m2p.inputs.in_file = 'surf1.vtk' >>> m2p.inputs.reference = 'dwi.mif' >>> m2p.inputs.in_first = 'T1.nii.gz' - >>> m2p.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> m2p.cmdline # doctest: +ELLIPSIS 'mesh2pve -first T1.nii.gz surf1.vtk dwi.mif mesh2volume.nii.gz' >>> m2p.run() # doctest: +SKIP """ @@ -108,41 +107,36 @@ def _list_outputs(self): return outputs -class Generate5ttInputSpec(CommandLineInputSpec): - in_fast = InputMultiPath( - File(exists=True), argstr='%s', mandatory=True, position=-3, - desc='list of PVE images from FAST') - in_first = File( - exists=True, argstr='%s', position=-2, - desc='combined segmentation file from FIRST') - out_file = File( - 'act-5tt.mif', argstr='%s', mandatory=True, position=-1, - usedefault=True, desc='name of output file') +class Generate5ttInputSpec(MRTrix3BaseInputSpec): + algorithm = traits.Enum('fsl','gif','freesurfer', argstr='%s', position=-3, + mandatory=True, desc='tissue segmentation algorithm') + in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, + desc='input image') + out_file = File(argstr='%s', mandatory=True, position=-1, + desc='output image') class Generate5ttOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='segmentation for ACT in 5tt format') + out_file = File(exists=True, desc='output image') -class Generate5tt(CommandLine): +class Generate5tt(MRTrix3Base): """ - Concatenate segmentation results from FSL FAST and FIRST into the 5TT - format required for ACT + Generate a 5TT image suitable for ACT using the selected algorithm Example ------- >>> import nipype.interfaces.mrtrix3 as mrt - >>> seg = mrt.Generate5tt() - >>> seg.inputs.in_fast = ['tpm_00.nii.gz', - ... 'tpm_01.nii.gz', 'tpm_02.nii.gz'] - >>> seg.inputs.in_first = 'first_merged.nii.gz' - >>> seg.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE - '5ttgen tpm_00.nii.gz tpm_01.nii.gz tpm_02.nii.gz first_merged.nii.gz\ - act-5tt.mif' - >>> seg.run() # doctest: +SKIP + >>> gen5tt = mrt.Generate5tt() + >>> gen5tt.inputs.in_file = 'T1.nii.gz' + >>> gen5tt.inputs.algorithm = 'fsl' + >>> gen5tt.inputs.out_file = '5tt.mif' + >>> gen5tt.cmdline # doctest: +ELLIPSIS + '5ttgen fsl T1.nii.gz 5tt.mif' + >>> gen5tt.run() # doctest: +SKIP """ _cmd = '5ttgen' @@ -197,7 +191,7 @@ class TensorMetrics(CommandLine): >>> comp = mrt.TensorMetrics() >>> comp.inputs.in_file = 'dti.mif' >>> comp.inputs.out_fa = 'fa.mif' - >>> comp.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> comp.cmdline # doctest: +ELLIPSIS 'tensor2metric -fa fa.mif dti.mif' >>> comp.run() # doctest: +SKIP """ @@ -337,7 +331,7 @@ class ComputeTDI(MRTrix3Base): >>> import nipype.interfaces.mrtrix3 as mrt >>> tdi = mrt.ComputeTDI() >>> tdi.inputs.in_file = 'dti.mif' - >>> tdi.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> tdi.cmdline # doctest: +ELLIPSIS 'tckmap dti.mif tdi.mif' >>> tdi.run() # doctest: +SKIP """ @@ -388,7 +382,7 @@ class TCK2VTK(MRTrix3Base): >>> vtk = mrt.TCK2VTK() >>> vtk.inputs.in_file = 'tracks.tck' >>> vtk.inputs.reference = 'b0.nii' - >>> vtk.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> vtk.cmdline # doctest: +ELLIPSIS 'tck2vtk -image b0.nii tracks.tck tracks.vtk' >>> vtk.run() # doctest: +SKIP """ @@ -401,3 +395,144 @@ def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = op.abspath(self.inputs.out_file) return outputs + + +class DWIExtractInputSpec(MRTrix3BaseInputSpec): + in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, + desc='input image') + out_file = File(argstr='%s', mandatory=True, position=-1, + desc='output image') + bzero = traits.Bool(argstr='-bzero', desc='extract b=0 volumes') + nobzero = traits.Bool(argstr='-nobzero', desc='extract non b=0 volumes') + singleshell = traits.Bool(argstr='-singleshell', desc='extract volumes with a specific shell') + shell = traits.List(traits.Float, sep=',', argstr='-shell %s', + desc='specify one or more gradient shells') + + +class DWIExtractOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class DWIExtract(MRTrix3Base): + + """ + Extract diffusion-weighted volumes, b=0 volumes, or certain shells from a + DWI dataset + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> dwiextract = mrt.DWIExtract() + >>> dwiextract.inputs.in_file = 'dwi.mif' + >>> dwiextract.inputs.bzero = True + >>> dwiextract.inputs.out_file = 'b0vols.mif' + >>> dwiextract.inputs.grad_fsl = ('bvecs', 'bvals') + >>> dwiextract.cmdline # doctest: +ELLIPSIS + 'dwiextract -bzero -fslgrad bvecs bvals dwi.mif b0vols.mif' + >>> dwiextract.run() # doctest: +SKIP + """ + + _cmd = 'dwiextract' + input_spec = DWIExtractInputSpec + output_spec = DWIExtractOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class MRConvertInputSpec(MRTrix3BaseInputSpec): + in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, + desc='input image') + out_file = File('dwi.mif', argstr='%s', mandatory=True, position=-1, + usedefault=True, desc='output image') + coord = traits.List(traits.Float, sep=' ', argstr='-coord %s', + desc='extract data at the specified coordinates') + vox = traits.List(traits.Float, sep=',', argstr='-vox %s', + desc='change the voxel dimensions') + axes = traits.List(traits.Int, sep=',', argstr='-axes %s', + desc='specify the axes that will be used') + scaling = traits.List(traits.Float, sep=',', argstr='-scaling %s', + desc='specify the data scaling parameter') + + +class MRConvertOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class MRConvert(MRTrix3Base): + + """ + Perform conversion between different file types and optionally extract a + subset of the input image + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> mrconvert = mrt.MRConvert() + >>> mrconvert.inputs.in_file = 'dwi.nii.gz' + >>> mrconvert.inputs.grad_fsl = ('bvecs', 'bvals') + >>> mrconvert.cmdline # doctest: +ELLIPSIS + 'mrconvert -fslgrad bvecs bvals dwi.nii.gz dwi.mif' + >>> mrconvert.run() # doctest: +SKIP + """ + + _cmd = 'mrconvert' + input_spec = MRConvertInputSpec + output_spec = MRConvertOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class MRMathInputSpec(MRTrix3BaseInputSpec): + in_file = File(exists=True, argstr='%s', mandatory=True, position=-3, + desc='input image') + out_file = File(argstr='%s', mandatory=True, position=-1, + desc='output image') + operation = traits.Enum('mean','median','sum','product','rms','norm', + 'var','std','min','max','absmax','magmax', argstr='%s', position=-2, + mandatory=True, desc='operation to computer along a specified axis') + axis = traits.Int(0, argstr='-axis %d', + desc='specfied axis to perform the operation along') + + +class MRMathOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class MRMath(MRTrix3Base): + + """ + Compute summary statistic on image intensities + along a specified axis of a single image + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> mrmath = mrt.MRMath() + >>> mrmath.inputs.in_file = 'dwi.mif' + >>> mrmath.inputs.operation = 'mean' + >>> mrmath.inputs.axis = 3 + >>> mrmath.inputs.out_file = 'dwi_mean.mif' + >>> mrmath.inputs.grad_fsl = ('bvecs', 'bvals') + >>> mrmath.cmdline # doctest: +ELLIPSIS + 'mrmath -axis 3 -fslgrad bvecs bvals dwi.mif mean dwi_mean.mif' + >>> mrmath.run() # doctest: +SKIP + """ + + _cmd = 'mrmath' + input_spec = MRMathInputSpec + output_spec = MRMathOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + diff --git a/nipype/interfaces/niftyfit/asl.py b/nipype/interfaces/niftyfit/asl.py index f0cc8bc19b..8f95a48192 100644 --- a/nipype/interfaces/niftyfit/asl.py +++ b/nipype/interfaces/niftyfit/asl.py @@ -147,7 +147,7 @@ class FitAsl(NiftyFitCommand): >>> from nipype.interfaces import niftyfit >>> node = niftyfit.FitAsl() >>> node.inputs.source_file = 'asl.nii.gz' - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'fit_asl -source asl.nii.gz -cbf asl_cbf.nii.gz -error asl_error.nii.gz \ -syn asl_syn.nii.gz' diff --git a/nipype/interfaces/niftyfit/base.py b/nipype/interfaces/niftyfit/base.py index a2f64fcd6b..58cbcaed45 100644 --- a/nipype/interfaces/niftyfit/base.py +++ b/nipype/interfaces/niftyfit/base.py @@ -19,16 +19,11 @@ """ import os -import warnings from ..base import CommandLine from ...utils.filemanip import split_filename -warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) - - class NiftyFitCommand(CommandLine): """ Base support interface for NiftyFit commands. diff --git a/nipype/interfaces/niftyfit/dwi.py b/nipype/interfaces/niftyfit/dwi.py index e368726656..67c5444bbe 100644 --- a/nipype/interfaces/niftyfit/dwi.py +++ b/nipype/interfaces/niftyfit/dwi.py @@ -248,7 +248,7 @@ class FitDwi(NiftyFitCommand): >>> fit_dwi.inputs.bvec_file = 'bvecs' >>> fit_dwi.inputs.bval_file = 'bvals' >>> fit_dwi.inputs.rgbmap_file = 'rgb.nii.gz' - >>> fit_dwi.cmdline # doctest: +ALLOW_UNICODE + >>> fit_dwi.cmdline 'fit_dwi -source dwi.nii.gz -bval bvals -bvec bvecs -dti \ -error dwi_error.nii.gz -famap dwi_famap.nii.gz -mcmap dwi_mcmap.nii.gz \ -mcout dwi_mcout.txt -mdmap dwi_mdmap.nii.gz -nodiff dwi_no_diff.nii.gz \ @@ -427,7 +427,7 @@ class DwiTool(NiftyFitCommand): >>> dwi_tool.inputs.mask_file = 'mask.nii.gz' >>> dwi_tool.inputs.b0_file = 'b0.nii.gz' >>> dwi_tool.inputs.rgbmap_file = 'rgb_map.nii.gz' - >>> dwi_tool.cmdline # doctest: +ALLOW_UNICODE + >>> dwi_tool.cmdline 'dwi_tool -source dwi.nii.gz -bval bvals -bvec bvecs -b0 b0.nii.gz \ -mask mask.nii.gz -dti -famap dwi_famap.nii.gz -logdti2 dwi_logdti2.nii.gz \ -mcmap dwi_mcmap.nii.gz -mdmap dwi_mdmap.nii.gz -rgbmap rgb_map.nii.gz \ diff --git a/nipype/interfaces/niftyfit/qt1.py b/nipype/interfaces/niftyfit/qt1.py index 6cb0cf7da1..b5ccfed88b 100644 --- a/nipype/interfaces/niftyfit/qt1.py +++ b/nipype/interfaces/niftyfit/qt1.py @@ -165,7 +165,7 @@ class FitQt1(NiftyFitCommand): >>> from nipype.interfaces.niftyfit import FitQt1 >>> fit_qt1 = FitQt1() >>> fit_qt1.inputs.source_file = 'TI4D.nii.gz' - >>> fit_qt1.cmdline # doctest: +ALLOW_UNICODE + >>> fit_qt1.cmdline 'fit_qt1 -source TI4D.nii.gz -comp TI4D_comp.nii.gz \ -error TI4D_error.nii.gz -m0map TI4D_m0map.nii.gz -mcmap TI4D_mcmap.nii.gz \ -res TI4D_res.nii.gz -syn TI4D_syn.nii.gz -t1map TI4D_t1map.nii.gz' diff --git a/nipype/interfaces/niftyfit/tests/test_asl.py b/nipype/interfaces/niftyfit/tests/test_asl.py index 7d0df3376a..f949e26b76 100644 --- a/nipype/interfaces/niftyfit/tests/test_asl.py +++ b/nipype/interfaces/niftyfit/tests/test_asl.py @@ -4,12 +4,14 @@ import pytest -from nipype.interfaces.niftyfit import FitAsl -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ..asl import FitAsl +from ...niftyreg.tests.test_regutils import no_nifty_tool -@pytest.mark.skipif(no_nifty_package(cmd='fit_asl'), + +@pytest.mark.skipif(no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed") def test_fit_asl(): """ Testing FitAsl interface.""" diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py index 8d5d0e2b14..6c0a773fa6 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -43,7 +43,8 @@ def test_DwiTool_inputs(): name_source=['source_file'], name_template='%s_famap.nii.gz', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ivim_flag=dict(argstr='-ivim', @@ -90,7 +91,8 @@ def test_DwiTool_inputs(): name_template='%s_syn.nii.gz', requires=['bvec_file', 'b0_file'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), v1map_file=dict(argstr='-v1map %s', name_source=['source_file'], diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py index aa67c92149..db865b495c 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -29,7 +29,8 @@ def test_FitAsl_inputs(): ), gm_ttt=dict(argstr='-gmTTT %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ir_output=dict(argstr='-IRoutput %s', @@ -89,7 +90,8 @@ def test_FitAsl_inputs(): ), t_inv2=dict(argstr='-Tinv2 %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), wm_plasma=dict(argstr='-wmL %f', ), diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py index e03d999463..96e8fd736f 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -50,7 +50,8 @@ def test_FitDwi_inputs(): gn_flag=dict(argstr='-gn', xor=['wls_flag'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ivim_flag=dict(argstr='-ivim', @@ -143,7 +144,8 @@ def test_FitDwi_inputs(): name_template='%s_tenmap.nii.gz', requires=['dti_flag'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), v1map_file=dict(argstr='-v1map %s', name_source=['source_file'], diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py index 86c15efaa5..32d1cb7b03 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -29,7 +29,8 @@ def test_FitQt1_inputs(): gn_flag=dict(argstr='-gn', position=8, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ir_flag=dict(argstr='-IR', @@ -97,7 +98,8 @@ def test_FitQt1_inputs(): te_value=dict(argstr='-TE %f', position=4, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tis=dict(argstr='-TIs %s', position=14, diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py index a89cdf40ce..1365775b56 100644 --- a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -9,10 +9,12 @@ def test_NiftyFitCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = NiftyFitCommand.input_spec() diff --git a/nipype/interfaces/niftyfit/tests/test_dwi.py b/nipype/interfaces/niftyfit/tests/test_dwi.py index aee809e9c5..5b7d0f7348 100644 --- a/nipype/interfaces/niftyfit/tests/test_dwi.py +++ b/nipype/interfaces/niftyfit/tests/test_dwi.py @@ -3,12 +3,14 @@ import pytest -from nipype.interfaces.niftyfit import FitDwi, DwiTool -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ..dwi import FitDwi, DwiTool +from ...niftyreg.tests.test_regutils import no_nifty_tool -@pytest.mark.skipif(no_nifty_package(cmd='fit_dwi'), + +@pytest.mark.skipif(no_nifty_tool(cmd='fit_dwi'), reason="niftyfit is not installed") def test_fit_dwi(): """ Testing FitDwi interface.""" @@ -56,7 +58,7 @@ def test_fit_dwi(): assert fit_dwi.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='dwi_tool'), +@pytest.mark.skipif(no_nifty_tool(cmd='dwi_tool'), reason="niftyfit is not installed") def test_dwi_tool(): """ Testing DwiTool interface.""" diff --git a/nipype/interfaces/niftyfit/tests/test_qt1.py b/nipype/interfaces/niftyfit/tests/test_qt1.py index 2d64a6cec5..e2e78ed37d 100644 --- a/nipype/interfaces/niftyfit/tests/test_qt1.py +++ b/nipype/interfaces/niftyfit/tests/test_qt1.py @@ -4,12 +4,13 @@ import pytest -from nipype.interfaces.niftyfit import FitQt1 -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from ..qt1 import FitQt1 -@pytest.mark.skipif(no_nifty_package(cmd='fit_qt1'), +@pytest.mark.skipif(no_nifty_tool(cmd='fit_qt1'), reason="niftyfit is not installed") def test_fit_qt1(): """ Testing FitQt1 interface.""" diff --git a/nipype/interfaces/niftyreg/__init__.py b/nipype/interfaces/niftyreg/__init__.py index 64cc60a0ab..04c066dcae 100644 --- a/nipype/interfaces/niftyreg/__init__.py +++ b/nipype/interfaces/niftyreg/__init__.py @@ -9,7 +9,7 @@ Top-level namespace for niftyreg. """ -from .base import no_nifty_package, get_custom_path +from .base import get_custom_path from .reg import RegAladin, RegF3D from .regutils import (RegResample, RegJacobian, RegAverage, RegTools, RegTransform, RegMeasure) diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py index bb09d96923..47859c9ec9 100644 --- a/nipype/interfaces/niftyreg/base.py +++ b/nipype/interfaces/niftyreg/base.py @@ -22,28 +22,18 @@ from builtins import property, super from distutils.version import StrictVersion import os -import shutil -import subprocess -from warnings import warn +from ... import logging from ..base import CommandLine, CommandLineInputSpec, traits, Undefined from ...utils.filemanip import split_filename +iflogger = logging.getLogger('interface') + def get_custom_path(command, env_dir='NIFTYREGDIR'): return os.path.join(os.getenv(env_dir, ''), command) -def no_nifty_package(cmd='reg_f3d'): - try: - return shutil.which(cmd) is None - except AttributeError: # Python < 3.3 - return not any( - [os.path.isfile(os.path.join(path, cmd)) and - os.access(os.path.join(path, cmd), os.X_OK) - for path in os.environ["PATH"].split(os.pathsep)]) - - class NiftyRegCommandInputSpec(CommandLineInputSpec): """Input Spec for niftyreg interfaces.""" # Set the number of omp thread to use @@ -65,18 +55,18 @@ def __init__(self, required_version=None, **inputs): self.num_threads = 1 super(NiftyRegCommand, self).__init__(**inputs) self.required_version = required_version - _version = self.get_version() + _version = self.version_from_command() if _version: _version = _version.decode("utf-8") if self._min_version is not None and \ StrictVersion(_version) < StrictVersion(self._min_version): msg = 'A later version of Niftyreg is required (%s < %s)' - warn(msg % (_version, self._min_version)) + iflogger.warning(msg, _version, self._min_version) if required_version is not None: if StrictVersion(_version) != StrictVersion(required_version): msg = 'The version of NiftyReg differs from the required' msg += '(%s != %s)' - warn(msg % (_version, self.required_version)) + iflogger.warning(msg, _version, self.required_version) self.inputs.on_trait_change(self._omp_update, 'omp_core_val') self.inputs.on_trait_change(self._environ_update, 'environ') self._omp_update() @@ -102,7 +92,7 @@ def _environ_update(self): self.inputs.omp_core_val = Undefined def check_version(self): - _version = self.get_version() + _version = self.version_from_command() if not _version: raise Exception('Niftyreg not found') # Decoding to string: @@ -116,18 +106,12 @@ def check_version(self): err += '(%s != %s)' raise ValueError(err % (_version, self.required_version)) - def get_version(self): - if no_nifty_package(cmd=self.cmd): - return None - exec_cmd = ''.join((self.cmd, ' -v')) - return subprocess.check_output(exec_cmd, shell=True).strip() - @property def version(self): - return self.get_version() + return self.version_from_command() def exists(self): - return self.get_version() is not None + return self.version_from_command() is not None def _format_arg(self, name, spec, value): if name == 'omp_core_val': diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py index e8ad87e3ee..bbc49ee2f2 100644 --- a/nipype/interfaces/niftyreg/reg.py +++ b/nipype/interfaces/niftyreg/reg.py @@ -20,17 +20,12 @@ absolute_import) from builtins import staticmethod import os -import warnings from ..base import TraitedSpec, File, traits, isdefined from .base import get_custom_path, NiftyRegCommand, NiftyRegCommandInputSpec from ...utils.filemanip import split_filename -warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) - - class RegAladinInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegAladin. """ # Input reference file @@ -156,7 +151,7 @@ class RegAladin(NiftyRegCommand): >>> node.inputs.flo_file = 'im2.nii' >>> node.inputs.rmask_file = 'mask.nii' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_aladin -aff im2_aff.txt -flo im2.nii -omp 4 -ref im1.nii \ -res im2_res.nii.gz -rmask mask.nii' @@ -367,7 +362,7 @@ class RegF3D(NiftyRegCommand): >>> node.inputs.flo_file = 'im2.nii' >>> node.inputs.rmask_file = 'mask.nii' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_f3d -cpp im2_cpp.nii.gz -flo im2.nii -omp 4 -ref im1.nii \ -res im2_res.nii.gz -rmask mask.nii' diff --git a/nipype/interfaces/niftyreg/regutils.py b/nipype/interfaces/niftyreg/regutils.py index 9c2ddc055d..4bbb73c687 100644 --- a/nipype/interfaces/niftyreg/regutils.py +++ b/nipype/interfaces/niftyreg/regutils.py @@ -18,7 +18,6 @@ from __future__ import (print_function, division, unicode_literals, absolute_import) from builtins import len, open, property, super -import warnings import os from ..base import TraitedSpec, File, traits, isdefined @@ -26,10 +25,6 @@ from ...utils.filemanip import split_filename -warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) - - class RegResampleInputSpec(NiftyRegCommandInputSpec): """ Input Spec for RegResample. """ # Input reference file @@ -106,7 +101,7 @@ class RegResample(NiftyRegCommand): >>> node.inputs.trans_file = 'warpfield.nii' >>> node.inputs.inter_val = 'LIN' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_resample -flo im2.nii -inter 1 -omp 4 -ref im1.nii -trans \ warpfield.nii -res im2_res.nii.gz' @@ -173,7 +168,7 @@ class RegJacobian(NiftyRegCommand): >>> node.inputs.ref_file = 'im1.nii' >>> node.inputs.trans_file = 'warpfield.nii' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_jacobian -omp 4 -ref im1.nii -trans warpfield.nii -jac \ warpfield_jac.nii.gz' @@ -289,7 +284,7 @@ class RegTools(NiftyRegCommand): >>> node.inputs.in_file = 'im1.nii' >>> node.inputs.mul_val = 4 >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_tools -in im1.nii -mul 4.0 -omp 4 -out im1_tools.nii.gz' """ @@ -391,15 +386,27 @@ class RegAverage(NiftyRegCommand): Examples -------- + + .. testsetup:: + + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() # changing to temporary file + + .. doctest:: + >>> from nipype.interfaces import niftyreg >>> node = niftyreg.RegAverage() >>> one_file = 'im1.nii' >>> two_file = 'im2.nii' >>> three_file = 'im3.nii' >>> node.inputs.avg_files = [one_file, two_file, three_file] - >>> node.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> node.cmdline # doctest: +ELLIPSIS 'reg_average --cmd_file .../reg_average_cmd' + .. testsetup:: + + >>> os.chdir(old.strpath) + """ _cmd = get_custom_path('reg_average') input_spec = RegAverageInputSpec @@ -602,7 +609,7 @@ class RegTransform(NiftyRegCommand): >>> node = niftyreg.RegTransform() >>> node.inputs.def_input = 'warpfield.nii' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ELLIPSIS +ALLOW_UNICODE + >>> node.cmdline # doctest: +ELLIPSIS 'reg_transform -omp 4 -def warpfield.nii .../warpfield_trans.nii.gz' """ @@ -714,7 +721,7 @@ class RegMeasure(NiftyRegCommand): >>> node.inputs.flo_file = 'im2.nii' >>> node.inputs.measure_type = 'lncc' >>> node.inputs.omp_core_val = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'reg_measure -flo im2.nii -lncc -omp 4 -out im2_lncc.txt -ref im1.nii' """ diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py index e18211fee7..ee4b020bd0 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -9,13 +9,15 @@ def test_NiftyRegCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), omp_core_val=dict(argstr='-omp %i', usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = NiftyRegCommand.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py index b4910d1a1e..d177916034 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -30,7 +30,8 @@ def test_RegAladin_inputs(): ), i_val=dict(argstr='-pi %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_aff_file=dict(argstr='-inaff %s', @@ -69,7 +70,8 @@ def test_RegAladin_inputs(): ), smoo_r_val=dict(argstr='-smooR %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), v_val=dict(argstr='-pv %d', ), diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py index 119b6c5e82..7898d68cd3 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -39,7 +39,8 @@ def test_RegAverage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), omp_core_val=dict(argstr='-omp %i', @@ -49,7 +50,8 @@ def test_RegAverage_inputs(): genfile=True, position=0, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warp_files=dict(argstr='%s', position=-1, diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py index 660532fb15..ba4301e756 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -38,7 +38,8 @@ def test_RegF3D_inputs(): ), fupth_thr_val=dict(argstr='--fUpTh %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), incpp_file=dict(argstr='-incpp %s', @@ -117,7 +118,8 @@ def test_RegF3D_inputs(): ), sz_val=dict(argstr='-sz %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vel_flag=dict(argstr='-vel', ), diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py index b5eb132c39..fd4d81b6a1 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -9,7 +9,8 @@ def test_RegJacobian_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), omp_core_val=dict(argstr='-omp %i', @@ -22,7 +23,8 @@ def test_RegJacobian_inputs(): ), ref_file=dict(argstr='-ref %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trans_file=dict(argstr='-trans %s', mandatory=True, diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py index f0504cb3dc..11b1248e6d 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -12,7 +12,8 @@ def test_RegMeasure_inputs(): flo_file=dict(argstr='-flo %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), measure_type=dict(argstr='-%s', @@ -28,7 +29,8 @@ def test_RegMeasure_inputs(): ref_file=dict(argstr='-ref %s', mandatory=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = RegMeasure.input_spec() diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py index e1ca405567..7efa3c5068 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -12,7 +12,8 @@ def test_RegResample_inputs(): flo_file=dict(argstr='-flo %s', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inter_val=dict(argstr='-inter %d', @@ -36,7 +37,8 @@ def test_RegResample_inputs(): ), tensor_flag=dict(argstr='-tensor ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trans_file=dict(argstr='-trans %s', ), diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py index 495e0854d7..8b8c371638 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -19,7 +19,8 @@ def test_RegTools_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -48,7 +49,8 @@ def test_RegTools_inputs(): ), sub_val=dict(argstr='-sub %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thr_val=dict(argstr='-thr %f', ), diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py index 00c017d1d3..3730a8b42d 100644 --- a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -41,7 +41,8 @@ def test_RegTransform_inputs(): position=-2, xor=['def_input', 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inv_aff_input=dict(argstr='-invAff %s', @@ -70,7 +71,8 @@ def test_RegTransform_inputs(): position=1, requires=['ref1_file'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upd_s_form_input=dict(argstr='-updSform %s', position=-3, diff --git a/nipype/interfaces/niftyreg/tests/test_reg.py b/nipype/interfaces/niftyreg/tests/test_reg.py index 9a3705fba7..eb5566f46f 100644 --- a/nipype/interfaces/niftyreg/tests/test_reg.py +++ b/nipype/interfaces/niftyreg/tests/test_reg.py @@ -4,13 +4,13 @@ import pytest -from nipype.interfaces.niftyreg import (no_nifty_package, get_custom_path, - RegAladin, RegF3D) -from nipype.testing import example_data +from ....testing import example_data +from .. import (get_custom_path, RegAladin, RegF3D) +from .test_regutils import no_nifty_tool @pytest.mark.skipif( - no_nifty_package(cmd='reg_aladin'), + no_nifty_tool(cmd='reg_aladin'), reason="niftyreg is not installed. reg_aladin not found.") def test_reg_aladin(): """ tests for reg_aladin interface""" @@ -48,7 +48,7 @@ def test_reg_aladin(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_f3d'), + no_nifty_tool(cmd='reg_f3d'), reason="niftyreg is not installed. reg_f3d not found.") def test_reg_f3d(): """ tests for reg_f3d interface""" diff --git a/nipype/interfaces/niftyreg/tests/test_regutils.py b/nipype/interfaces/niftyreg/tests/test_regutils.py index 4a8cf18fbf..763cb2f443 100644 --- a/nipype/interfaces/niftyreg/tests/test_regutils.py +++ b/nipype/interfaces/niftyreg/tests/test_regutils.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: - -from nipype.interfaces.niftyreg import (no_nifty_package, get_custom_path, - RegAverage, RegResample, RegJacobian, - RegTools, RegMeasure, RegTransform) -from nipype.testing import example_data import os import pytest +from ....utils.filemanip import which +from ....testing import example_data +from .. import ( + get_custom_path, RegAverage, RegResample, RegJacobian, + RegTools, RegMeasure, RegTransform +) + + +def no_nifty_tool(cmd=None): + return which(cmd) is None + @pytest.mark.skipif( - no_nifty_package(cmd='reg_resample'), + no_nifty_tool(cmd='reg_resample'), reason="niftyreg is not installed. reg_resample not found.") def test_reg_resample_res(): """ tests for reg_resample interface """ @@ -68,7 +74,7 @@ def test_reg_resample_res(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_jacobian'), + no_nifty_tool(cmd='reg_jacobian'), reason="niftyreg is not installed. reg_jacobian not found.") def test_reg_jacobian_jac(): """ Test interface for RegJacobian """ @@ -132,7 +138,7 @@ def test_reg_jacobian_jac(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_tools'), + no_nifty_tool(cmd='reg_tools'), reason="niftyreg is not installed. reg_tools not found.") def test_reg_tools_mul(): """ tests for reg_tools interface """ @@ -175,7 +181,7 @@ def test_reg_tools_mul(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_average'), + no_nifty_tool(cmd='reg_average'), reason="niftyreg is not installed. reg_average not found.") def test_reg_average(): """ tests for reg_average interface """ @@ -323,7 +329,7 @@ def test_reg_average(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_transform'), + no_nifty_tool(cmd='reg_transform'), reason="niftyreg is not installed. reg_transform not found.") def test_reg_transform_def(): """ tests for reg_transform interface """ @@ -432,7 +438,7 @@ def test_reg_transform_def(): @pytest.mark.skipif( - no_nifty_package(cmd='reg_measure'), + no_nifty_tool(cmd='reg_measure'), reason="niftyreg is not installed. reg_measure not found.") def test_reg_measure(): """ tests for reg_measure interface """ diff --git a/nipype/interfaces/niftyseg/base.py b/nipype/interfaces/niftyseg/base.py index 8025349714..a84fb9eb62 100644 --- a/nipype/interfaces/niftyseg/base.py +++ b/nipype/interfaces/niftyseg/base.py @@ -18,14 +18,7 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from ..niftyreg.base import no_nifty_package from ..niftyfit.base import NiftyFitCommand -import subprocess -import warnings - - -warn = warnings.warn -warnings.filterwarnings('always', category=UserWarning) class NiftySegCommand(NiftyFitCommand): @@ -39,9 +32,5 @@ def __init__(self, **inputs): super(NiftySegCommand, self).__init__(**inputs) def get_version(self): - if no_nifty_package(cmd=self.cmd): - return None - # exec_cmd = ''.join((self.cmd, ' --version')) - exec_cmd = 'seg_EM --version' - # Using seg_EM for version (E.G: seg_stats --version doesn't work) - return subprocess.check_output(exec_cmd, shell=True).strip('\n') + return super(NiftySegCommand, self).version_from_command( + cmd='seg_EM', flag='--version') diff --git a/nipype/interfaces/niftyseg/em.py b/nipype/interfaces/niftyseg/em.py index be39f7775b..f2e7359677 100644 --- a/nipype/interfaces/niftyseg/em.py +++ b/nipype/interfaces/niftyseg/em.py @@ -127,7 +127,7 @@ class EM(NiftySegCommand): >>> node = niftyseg.EM() >>> node.inputs.in_file = 'im1.nii' >>> node.inputs.no_prior = 4 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_EM -in im1.nii -nopriors 4 -bc_out im1_bc_em.nii.gz -out im1_em.nii.gz -out_outlier im1_outlier_em.nii.gz' """ diff --git a/nipype/interfaces/niftyseg/label_fusion.py b/nipype/interfaces/niftyseg/label_fusion.py index 82b19d1f3b..c1637cb258 100644 --- a/nipype/interfaces/niftyseg/label_fusion.py +++ b/nipype/interfaces/niftyseg/label_fusion.py @@ -147,7 +147,7 @@ class LabelFusion(NiftySegCommand): >>> node.inputs.template_file = 'im3.nii' >>> node.inputs.template_num = 2 >>> node.inputs.classifier_type = 'STEPS' - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_LabFusion -in im1.nii -STEPS 2.000000 2 im2.nii im3.nii -out im1_steps.nii' """ @@ -298,7 +298,7 @@ class CalcTopNCC(NiftySegCommand): >>> node.inputs.num_templates = 2 >>> node.inputs.in_templates = ['im2.nii', 'im3.nii'] >>> node.inputs.top_templates = 1 - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_CalcTopNCC -target im1.nii -templates 2 im2.nii im3.nii -n 1' """ diff --git a/nipype/interfaces/niftyseg/lesions.py b/nipype/interfaces/niftyseg/lesions.py index d531b5c464..489837e6dd 100644 --- a/nipype/interfaces/niftyseg/lesions.py +++ b/nipype/interfaces/niftyseg/lesions.py @@ -109,7 +109,7 @@ class FillLesions(NiftySegCommand): >>> node = niftyseg.FillLesions() >>> node.inputs.in_file = 'im1.nii' >>> node.inputs.lesion_mask = 'im2.nii' - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_FillLesions -i im1.nii -l im2.nii -o im1_lesions_filled.nii.gz' """ diff --git a/nipype/interfaces/niftyseg/maths.py b/nipype/interfaces/niftyseg/maths.py index bd05ab5df3..b8a5c0a1b8 100644 --- a/nipype/interfaces/niftyseg/maths.py +++ b/nipype/interfaces/niftyseg/maths.py @@ -169,31 +169,31 @@ class UnaryMaths(MathsCommand): >>> # Test sqrt operation >>> unary_sqrt = copy.deepcopy(unary) >>> unary_sqrt.inputs.operation = 'sqrt' - >>> unary_sqrt.cmdline # doctest: +ALLOW_UNICODE + >>> unary_sqrt.cmdline 'seg_maths im1.nii -sqrt -odt float im1_sqrt.nii' >>> unary_sqrt.run() # doctest: +SKIP >>> # Test sqrt operation >>> unary_abs = copy.deepcopy(unary) >>> unary_abs.inputs.operation = 'abs' - >>> unary_abs.cmdline # doctest: +ALLOW_UNICODE + >>> unary_abs.cmdline 'seg_maths im1.nii -abs -odt float im1_abs.nii' >>> unary_abs.run() # doctest: +SKIP >>> # Test bin operation >>> unary_bin = copy.deepcopy(unary) >>> unary_bin.inputs.operation = 'bin' - >>> unary_bin.cmdline # doctest: +ALLOW_UNICODE + >>> unary_bin.cmdline 'seg_maths im1.nii -bin -odt float im1_bin.nii' >>> unary_bin.run() # doctest: +SKIP >>> # Test otsu operation >>> unary_otsu = copy.deepcopy(unary) >>> unary_otsu.inputs.operation = 'otsu' - >>> unary_otsu.cmdline # doctest: +ALLOW_UNICODE + >>> unary_otsu.cmdline 'seg_maths im1.nii -otsu -odt float im1_otsu.nii' >>> unary_otsu.run() # doctest: +SKIP >>> # Test isnan operation >>> unary_isnan = copy.deepcopy(unary) >>> unary_isnan.inputs.operation = 'isnan' - >>> unary_isnan.cmdline # doctest: +ALLOW_UNICODE + >>> unary_isnan.cmdline 'seg_maths im1.nii -isnan -odt float im1_isnan.nii' >>> unary_isnan.run() # doctest: +SKIP @@ -302,28 +302,28 @@ class BinaryMaths(MathsCommand): >>> binary_sub = copy.deepcopy(binary) >>> binary_sub.inputs.operation = 'sub' >>> binary_sub.inputs.operand_file = 'im2.nii' - >>> binary_sub.cmdline # doctest: +ALLOW_UNICODE + >>> binary_sub.cmdline 'seg_maths im1.nii -sub im2.nii -odt float im1_sub.nii' >>> binary_sub.run() # doctest: +SKIP >>> # Test mul operation >>> binary_mul = copy.deepcopy(binary) >>> binary_mul.inputs.operation = 'mul' >>> binary_mul.inputs.operand_value = 2.0 - >>> binary_mul.cmdline # doctest: +ALLOW_UNICODE + >>> binary_mul.cmdline 'seg_maths im1.nii -mul 2.00000000 -odt float im1_mul.nii' >>> binary_mul.run() # doctest: +SKIP >>> # Test llsnorm operation >>> binary_llsnorm = copy.deepcopy(binary) >>> binary_llsnorm.inputs.operation = 'llsnorm' >>> binary_llsnorm.inputs.operand_file = 'im2.nii' - >>> binary_llsnorm.cmdline # doctest: +ALLOW_UNICODE + >>> binary_llsnorm.cmdline 'seg_maths im1.nii -llsnorm im2.nii -odt float im1_llsnorm.nii' >>> binary_llsnorm.run() # doctest: +SKIP >>> # Test splitinter operation >>> binary_splitinter = copy.deepcopy(binary) >>> binary_splitinter.inputs.operation = 'splitinter' >>> binary_splitinter.inputs.operand_str = 'z' - >>> binary_splitinter.cmdline # doctest: +ALLOW_UNICODE + >>> binary_splitinter.cmdline 'seg_maths im1.nii -splitinter z -odt float im1_splitinter.nii' >>> binary_splitinter.run() # doctest: +SKIP @@ -419,21 +419,21 @@ class BinaryMathsInteger(MathsCommand): >>> binaryi_dil = copy.deepcopy(binaryi) >>> binaryi_dil.inputs.operation = 'dil' >>> binaryi_dil.inputs.operand_value = 2 - >>> binaryi_dil.cmdline # doctest: +ALLOW_UNICODE + >>> binaryi_dil.cmdline 'seg_maths im1.nii -dil 2 -odt float im1_dil.nii' >>> binaryi_dil.run() # doctest: +SKIP >>> # Test dil operation >>> binaryi_ero = copy.deepcopy(binaryi) >>> binaryi_ero.inputs.operation = 'ero' >>> binaryi_ero.inputs.operand_value = 1 - >>> binaryi_ero.cmdline # doctest: +ALLOW_UNICODE + >>> binaryi_ero.cmdline 'seg_maths im1.nii -ero 1 -odt float im1_ero.nii' >>> binaryi_ero.run() # doctest: +SKIP >>> # Test pad operation >>> binaryi_pad = copy.deepcopy(binaryi) >>> binaryi_pad.inputs.operation = 'pad' >>> binaryi_pad.inputs.operand_value = 4 - >>> binaryi_pad.cmdline # doctest: +ALLOW_UNICODE + >>> binaryi_pad.cmdline 'seg_maths im1.nii -pad 4 -odt float im1_pad.nii' >>> binaryi_pad.run() # doctest: +SKIP @@ -512,7 +512,7 @@ class TupleMaths(MathsCommand): >>> tuple_lncc.inputs.operation = 'lncc' >>> tuple_lncc.inputs.operand_file1 = 'im2.nii' >>> tuple_lncc.inputs.operand_value2 = 2.0 - >>> tuple_lncc.cmdline # doctest: +ALLOW_UNICODE + >>> tuple_lncc.cmdline 'seg_maths im1.nii -lncc im2.nii 2.00000000 -odt float im1_lncc.nii' >>> tuple_lncc.run() # doctest: +SKIP @@ -521,7 +521,7 @@ class TupleMaths(MathsCommand): >>> tuple_lssd.inputs.operation = 'lssd' >>> tuple_lssd.inputs.operand_file1 = 'im2.nii' >>> tuple_lssd.inputs.operand_value2 = 1.0 - >>> tuple_lssd.cmdline # doctest: +ALLOW_UNICODE + >>> tuple_lssd.cmdline 'seg_maths im1.nii -lssd im2.nii 1.00000000 -odt float im1_lssd.nii' >>> tuple_lssd.run() # doctest: +SKIP @@ -530,7 +530,7 @@ class TupleMaths(MathsCommand): >>> tuple_lltsnorm.inputs.operation = 'lltsnorm' >>> tuple_lltsnorm.inputs.operand_file1 = 'im2.nii' >>> tuple_lltsnorm.inputs.operand_value2 = 0.01 - >>> tuple_lltsnorm.cmdline # doctest: +ALLOW_UNICODE + >>> tuple_lltsnorm.cmdline 'seg_maths im1.nii -lltsnorm im2.nii 0.01000000 -odt float \ im1_lltsnorm.nii' >>> tuple_lltsnorm.run() # doctest: +SKIP @@ -575,7 +575,7 @@ class Merge(MathsCommand): >>> node.inputs.merge_files = files >>> node.inputs.dimension = 2 >>> node.inputs.output_datatype = 'float' - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_maths im1.nii -merge 2 2 im2.nii im3.nii -odt float im1_merged.nii' """ diff --git a/nipype/interfaces/niftyseg/patchmatch.py b/nipype/interfaces/niftyseg/patchmatch.py index d598a08928..207764f086 100644 --- a/nipype/interfaces/niftyseg/patchmatch.py +++ b/nipype/interfaces/niftyseg/patchmatch.py @@ -102,7 +102,7 @@ class PatchMatch(NiftySegCommand): >>> node.inputs.in_file = 'im1.nii' >>> node.inputs.mask_file = 'im2.nii' >>> node.inputs.database_file = 'db.xml' - >>> node.cmdline # doctest: +ALLOW_UNICODE + >>> node.cmdline 'seg_PatchMatch -i im1.nii -m im2.nii -db db.xml -o im1_pm.nii.gz' """ diff --git a/nipype/interfaces/niftyseg/stats.py b/nipype/interfaces/niftyseg/stats.py index 5591a5888e..cef03b6177 100644 --- a/nipype/interfaces/niftyseg/stats.py +++ b/nipype/interfaces/niftyseg/stats.py @@ -66,7 +66,6 @@ class StatsCommand(NiftySegCommand): def _parse_stdout(self, stdout): out = [] for string_line in stdout.split("\n"): - print('parsing line {0}'.format(string_line)) if string_line.startswith('#'): continue if len(string_line) <= 1: @@ -76,8 +75,7 @@ def _parse_stdout(self, stdout): return np.array(out).squeeze() def _run_interface(self, runtime): - print('parsing output in run_interface') - new_runtime = super(UnaryStats, self)._run_interface(runtime) + new_runtime = super(StatsCommand, self)._run_interface(runtime) self.output = self._parse_stdout(new_runtime.stdout) return new_runtime @@ -156,19 +154,19 @@ class UnaryStats(StatsCommand): >>> # Test v operation >>> unary_v = copy.deepcopy(unary) >>> unary_v.inputs.operation = 'v' - >>> unary_v.cmdline # doctest: +ALLOW_UNICODE + >>> unary_v.cmdline 'seg_stats im1.nii -v' >>> unary_v.run() # doctest: +SKIP >>> # Test vl operation >>> unary_vl = copy.deepcopy(unary) >>> unary_vl.inputs.operation = 'vl' - >>> unary_vl.cmdline # doctest: +ALLOW_UNICODE + >>> unary_vl.cmdline 'seg_stats im1.nii -vl' >>> unary_vl.run() # doctest: +SKIP >>> # Test x operation >>> unary_x = copy.deepcopy(unary) >>> unary_x.inputs.operation = 'x' - >>> unary_x.cmdline # doctest: +ALLOW_UNICODE + >>> unary_x.cmdline 'seg_stats im1.nii -x' >>> unary_x.run() # doctest: +SKIP @@ -245,21 +243,21 @@ class BinaryStats(StatsCommand): >>> binary_sa = copy.deepcopy(binary) >>> binary_sa.inputs.operation = 'sa' >>> binary_sa.inputs.operand_value = 2.0 - >>> binary_sa.cmdline # doctest: +ALLOW_UNICODE + >>> binary_sa.cmdline 'seg_stats im1.nii -sa 2.00000000' >>> binary_sa.run() # doctest: +SKIP >>> # Test ncc operation >>> binary_ncc = copy.deepcopy(binary) >>> binary_ncc.inputs.operation = 'ncc' >>> binary_ncc.inputs.operand_file = 'im2.nii' - >>> binary_ncc.cmdline # doctest: +ALLOW_UNICODE + >>> binary_ncc.cmdline 'seg_stats im1.nii -ncc im2.nii' >>> binary_ncc.run() # doctest: +SKIP >>> # Test Nl operation >>> binary_nl = copy.deepcopy(binary) >>> binary_nl.inputs.operation = 'Nl' >>> binary_nl.inputs.operand_file = 'output.csv' - >>> binary_nl.cmdline # doctest: +ALLOW_UNICODE + >>> binary_nl.cmdline 'seg_stats im1.nii -Nl output.csv' >>> binary_nl.run() # doctest: +SKIP diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py index 714e201fc3..5a4a5cd3f3 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -9,7 +9,8 @@ def test_BinaryMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -43,7 +44,8 @@ def test_BinaryMaths_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BinaryMaths.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py index 484f2ac3b4..b8ea1f87f7 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -9,7 +9,8 @@ def test_BinaryMathsInteger_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_BinaryMathsInteger_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BinaryMathsInteger.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py index 14ea5463b0..dce54af8dc 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -9,7 +9,8 @@ def test_BinaryStats_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -36,7 +37,8 @@ def test_BinaryStats_inputs(): mandatory=True, position=4, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BinaryStats.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py index a54501c730..201f15153a 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -9,7 +9,8 @@ def test_CalcTopNCC_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-target %s', @@ -26,7 +27,8 @@ def test_CalcTopNCC_inputs(): mandatory=True, position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), top_templates=dict(argstr='-n %s', mandatory=True, diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py index c42acf6a70..9790ad9757 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_EM.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -13,7 +13,8 @@ def test_EM_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -58,7 +59,8 @@ def test_EM_inputs(): ), relax_priors=dict(argstr='-rf %s %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EM.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py index aae126636a..a95651bf61 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -15,7 +15,8 @@ def test_FillLesions_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_dilation=dict(argstr='-dil %d', @@ -45,7 +46,8 @@ def test_FillLesions_inputs(): ), smooth=dict(argstr='-smo %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), use_2d=dict(argstr='-2D', ), diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py index b572ac940c..3530644d7d 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -18,7 +18,8 @@ def test_LabelFusion_inputs(): ), file_to_seg=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -50,7 +51,8 @@ def test_LabelFusion_inputs(): ), template_file=dict(), template_num=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), unc=dict(argstr='-unc', ), diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py index 640c7088bf..67435c1def 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -9,7 +9,8 @@ def test_MathsCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_MathsCommand_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MathsCommand.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py index 3980bc9ac3..4d13929b23 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -11,7 +11,8 @@ def test_Merge_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -30,7 +31,8 @@ def test_Merge_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Merge.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py index 55dc5d9d1d..d7483fc228 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -9,10 +9,12 @@ def test_NiftySegCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = NiftySegCommand.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py new file mode 100644 index 0000000000..ba6ea5a40c --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..patchmatch import PatchMatch + + +def test_PatchMatch_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + cs_size=dict(argstr='-cs %i', + ), + database_file=dict(argstr='-db %s', + mandatory=True, + position=3, + ), + environ=dict(nohash=True, + usedefault=True, + ), + ignore_exception=dict(deprecated='1.0.0', + nohash=True, + usedefault=True, + ), + in_file=dict(argstr='-i %s', + mandatory=True, + position=1, + ), + it_num=dict(argstr='-it %i', + ), + mask_file=dict(argstr='-m %s', + mandatory=True, + position=2, + ), + match_num=dict(argstr='-match %i', + ), + out_file=dict(argstr='-o %s', + name_source=['in_file'], + name_template='%s_pm.nii.gz', + position=4, + ), + patch_size=dict(argstr='-size %i', + ), + pm_num=dict(argstr='-pm %i', + ), + terminal_output=dict(deprecated='1.0.0', + nohash=True, + ), + ) + inputs = PatchMatch.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_PatchMatch_outputs(): + output_map = dict(out_file=dict(), + ) + outputs = PatchMatch.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py index f535133dee..6ca821c52f 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -9,7 +9,8 @@ def test_StatsCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_StatsCommand_inputs(): mask_file=dict(argstr='-m %s', position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = StatsCommand.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py index 9bd5ca771d..702b3961d9 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -9,7 +9,8 @@ def test_TupleMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -48,7 +49,8 @@ def test_TupleMaths_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TupleMaths.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py index ed98de196f..c5144bbc65 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -9,7 +9,8 @@ def test_UnaryMaths_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_UnaryMaths_inputs(): output_datatype=dict(argstr='-odt %s', position=-3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = UnaryMaths.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py index 17252084c6..81bea6f18b 100644 --- a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -9,7 +9,8 @@ def test_UnaryStats_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_UnaryStats_inputs(): mandatory=True, position=4, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = UnaryStats.input_spec() diff --git a/nipype/interfaces/niftyseg/tests/test_em_interfaces.py b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py index 810a782b63..b95d574357 100644 --- a/nipype/interfaces/niftyseg/tests/test_em_interfaces.py +++ b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py @@ -3,12 +3,13 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import EM -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import EM -@pytest.mark.skipif(no_nifty_package(cmd='seg_EM'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_EM'), reason="niftyseg is not installed") def test_seg_em(): diff --git a/nipype/interfaces/niftyseg/tests/test_label_fusion.py b/nipype/interfaces/niftyseg/tests/test_label_fusion.py index f34fc9149f..6f41086531 100644 --- a/nipype/interfaces/niftyseg/tests/test_label_fusion.py +++ b/nipype/interfaces/niftyseg/tests/test_label_fusion.py @@ -3,12 +3,13 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import LabelFusion, CalcTopNCC -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import LabelFusion, CalcTopNCC -@pytest.mark.skipif(no_nifty_package(cmd='seg_LabFusion'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_LabFusion'), reason="niftyseg is not installed") def test_seg_lab_fusion(): """ Test interfaces for seg_labfusion""" @@ -90,7 +91,7 @@ def test_seg_lab_fusion(): assert mv_node.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_CalcTopNCC'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_CalcTopNCC'), reason="niftyseg is not installed") def test_seg_calctopncc(): """ Test interfaces for seg_CalctoNCC""" diff --git a/nipype/interfaces/niftyseg/tests/test_lesions.py b/nipype/interfaces/niftyseg/tests/test_lesions.py index 55250bde92..aaca2df0ba 100644 --- a/nipype/interfaces/niftyseg/tests/test_lesions.py +++ b/nipype/interfaces/niftyseg/tests/test_lesions.py @@ -3,12 +3,13 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import FillLesions -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import FillLesions -@pytest.mark.skipif(no_nifty_package(cmd='seg_FillLesions'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_FillLesions'), reason="niftyseg is not installed") def test_seg_filllesions(): diff --git a/nipype/interfaces/niftyseg/tests/test_maths.py b/nipype/interfaces/niftyseg/tests/test_maths.py index 307adb503d..38dc765f10 100644 --- a/nipype/interfaces/niftyseg/tests/test_maths.py +++ b/nipype/interfaces/niftyseg/tests/test_maths.py @@ -3,14 +3,15 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import (UnaryMaths, BinaryMaths, - BinaryMathsInteger, TupleMaths, - Merge) -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import (UnaryMaths, BinaryMaths, + BinaryMathsInteger, TupleMaths, + Merge) -@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") def test_unary_maths(): @@ -39,7 +40,7 @@ def test_unary_maths(): assert unarym.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") def test_binary_maths(): @@ -70,7 +71,7 @@ def test_binary_maths(): assert binarym.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") def test_int_binary_maths(): @@ -100,7 +101,7 @@ def test_int_binary_maths(): assert ibinarym.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") def test_tuple_maths(): @@ -134,7 +135,7 @@ def test_tuple_maths(): assert tuplem.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") def test_merge(): diff --git a/nipype/interfaces/niftyseg/tests/test_patchmatch.py b/nipype/interfaces/niftyseg/tests/test_patchmatch.py index b88552fb0d..ae2500a7d2 100644 --- a/nipype/interfaces/niftyseg/tests/test_patchmatch.py +++ b/nipype/interfaces/niftyseg/tests/test_patchmatch.py @@ -3,12 +3,13 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import PatchMatch -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import PatchMatch -@pytest.mark.skipif(no_nifty_package(cmd='seg_PatchMatch'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_PatchMatch'), reason="niftyseg is not installed") def test_seg_patchmatch(): diff --git a/nipype/interfaces/niftyseg/tests/test_stats.py b/nipype/interfaces/niftyseg/tests/test_stats.py index ae3cfbfc6e..cd0948d1ae 100644 --- a/nipype/interfaces/niftyseg/tests/test_stats.py +++ b/nipype/interfaces/niftyseg/tests/test_stats.py @@ -3,12 +3,13 @@ import pytest -from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path -from nipype.interfaces.niftyseg import UnaryStats, BinaryStats -from nipype.testing import example_data +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import UnaryStats, BinaryStats -@pytest.mark.skipif(no_nifty_package(cmd='seg_stats'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") def test_unary_stats(): """ Test for the seg_stats interfaces """ @@ -35,7 +36,7 @@ def test_unary_stats(): assert unarys.cmdline == expected_cmd -@pytest.mark.skipif(no_nifty_package(cmd='seg_stats'), +@pytest.mark.skipif(no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") def test_binary_stats(): """ Test for the seg_stats interfaces """ diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py index e7984c654a..db47b57e8b 100644 --- a/nipype/interfaces/nilearn.py +++ b/nipype/interfaces/nilearn.py @@ -21,7 +21,7 @@ from .. import logging from ..interfaces.base import (traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, InputMultiPath) -IFLOG = logging.getLogger('interface') +IFLOGGER = logging.getLogger('interface') class SignalExtractionInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file') diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py index 915f2c85d2..401f2b0f62 100644 --- a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -6,7 +6,8 @@ def test_ComputeMask_inputs(): input_map = dict(M=dict(), cc=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), m=dict(), diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py index 7d44248cbc..78f6d02c03 100644 --- a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -14,7 +14,8 @@ def test_EstimateContrast_inputs(): ), dof=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask=dict(), diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py index 5c3f881179..5aca7d345c 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -10,7 +10,8 @@ def test_FitGLM_inputs(): ), hrf_model=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask=dict(), diff --git a/nipype/interfaces/nipy/tests/test_auto_FmriRealign4d.py b/nipype/interfaces/nipy/tests/test_auto_FmriRealign4d.py index 80902a7d0c..0360e53df0 100644 --- a/nipype/interfaces/nipy/tests/test_auto_FmriRealign4d.py +++ b/nipype/interfaces/nipy/tests/test_auto_FmriRealign4d.py @@ -6,7 +6,8 @@ def test_FmriRealign4d_inputs(): input_map = dict(between_loops=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py index f9c815fedb..d65cf36b02 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Similarity.py +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -4,7 +4,8 @@ def test_Similarity_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask1=dict(), diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py index b4e495a434..358bd6efa8 100644 --- a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -4,7 +4,8 @@ def test_SpaceTimeRealigner_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py index c1bc16e103..0d2ad9063b 100644 --- a/nipype/interfaces/nipy/tests/test_auto_Trim.py +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -8,7 +8,8 @@ def test_Trim_inputs(): ), end_index=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py index 4d970500d2..159907b978 100644 --- a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -11,7 +11,8 @@ def test_CoherenceAnalyzer_inputs(): ), frequency_range=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_TS=dict(), diff --git a/nipype/interfaces/nitime/tests/test_nitime.py b/nipype/interfaces/nitime/tests/test_nitime.py index fa6ace4014..d37fea4f4f 100644 --- a/nipype/interfaces/nitime/tests/test_nitime.py +++ b/nipype/interfaces/nitime/tests/test_nitime.py @@ -30,11 +30,12 @@ def test_read_csv(): @pytest.mark.skipif(no_nitime, reason="nitime is not installed") -def test_coherence_analysis(): +def test_coherence_analysis(tmpdir): """Test that the coherence analyzer works """ import nitime.analysis as nta import nitime.timeseries as ts + tmpdir.chdir() # This is the nipype interface analysis: CA = nitime.CoherenceAnalyzer() CA.inputs.TR = 1.89 diff --git a/nipype/interfaces/quickshear.py b/nipype/interfaces/quickshear.py index d1782d5755..a0e9c79a1c 100644 --- a/nipype/interfaces/quickshear.py +++ b/nipype/interfaces/quickshear.py @@ -40,7 +40,7 @@ class Quickshear(CommandLine): >>> from nipype.interfaces.quickshear import Quickshear >>> qs = Quickshear(in_file='T1.nii', mask_file='brain_mask.nii') - >>> qs.cmdline # doctest: +ALLOW_UNICODE + >>> qs.cmdline 'quickshear T1.nii brain_mask.nii T1_defaced.nii' In the absence of a precomputed mask, a simple pipeline can be generated diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py index 9c3d3928e5..1c2f7ce2cb 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -9,7 +9,8 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBasalGmVolume=dict(argstr='--inputBasalGmVolume %s', @@ -29,7 +30,8 @@ def test_BRAINSPosteriorToContinuousClass_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSPosteriorToContinuousClass.input_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py index 273d140224..1168288a8b 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -29,7 +29,8 @@ def test_BRAINSTalairach_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -40,7 +41,8 @@ def test_BRAINSTalairach_inputs(): outputGrid=dict(argstr='--outputGrid %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSTalairach.input_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py index daee0ded09..6638bd2e5e 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -13,7 +13,8 @@ def test_BRAINSTalairachMask_inputs(): ), hemisphereMode=dict(argstr='--hemisphereMode %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -25,7 +26,8 @@ def test_BRAINSTalairachMask_inputs(): ), talairachParameters=dict(argstr='--talairachParameters %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSTalairachMask.input_spec() diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py index f5275319a6..cfee35cd08 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -9,7 +9,8 @@ def test_GenerateEdgeMapImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRVolumes=dict(argstr='--inputMRVolumes %s...', @@ -30,7 +31,8 @@ def test_GenerateEdgeMapImage_inputs(): outputMaximumGradientImage=dict(argstr='--outputMaximumGradientImage %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upperPercentileMatching=dict(argstr='--upperPercentileMatching %f', ), diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py index 262ef2c485..48c4aca838 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -9,7 +9,8 @@ def test_GeneratePurePlugMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputImageModalities=dict(argstr='--inputImageModalities %s...', @@ -20,7 +21,8 @@ def test_GeneratePurePlugMask_inputs(): outputMaskFile=dict(argstr='--outputMaskFile %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--threshold %f', ), diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py index c2d76581be..ac921cb7f6 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -11,7 +11,8 @@ def test_HistogramMatchingFilter_inputs(): ), histogramAlgorithm=dict(argstr='--histogramAlgorithm %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', @@ -29,7 +30,8 @@ def test_HistogramMatchingFilter_inputs(): ), referenceVolume=dict(argstr='--referenceVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py index c7bac4f4f6..e9ddc61051 100644 --- a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -11,14 +11,16 @@ def test_SimilarityIndex_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputManualVolume=dict(argstr='--inputManualVolume %s', ), outputCSVFilename=dict(argstr='--outputCSVFilename %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresholdInterval=dict(argstr='--thresholdInterval %f', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py index b355239d30..0a7e27fee1 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -20,7 +20,8 @@ def test_DWIConvert_inputs(): gradientVectorFile=dict(argstr='--gradientVectorFile %s', hash_files=False, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBValues=dict(argstr='--inputBValues %s', @@ -45,7 +46,8 @@ def test_DWIConvert_inputs(): ), smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transposeInputBVectors=dict(argstr='--transposeInputBVectors ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py index 209c267fdc..5b88f17a54 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -11,7 +11,8 @@ def test_compareTractInclusion_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), numberOfPoints=dict(argstr='--numberOfPoints %d', @@ -20,7 +21,8 @@ def test_compareTractInclusion_inputs(): ), standardFiber=dict(argstr='--standardFiber %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), testFiber=dict(argstr='--testFiber %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py index c3bff362a2..4cbbf0e4ea 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -11,7 +11,8 @@ def test_dtiaverage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputs=dict(argstr='--inputs %s...', @@ -19,7 +20,8 @@ def test_dtiaverage_inputs(): tensor_output=dict(argstr='--tensor_output %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py index 74c63221dc..80a0f78fe0 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -31,7 +31,8 @@ def test_dtiestim_inputs(): idwi=dict(argstr='--idwi %s', hash_files=False, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), method=dict(argstr='--method %s', @@ -47,7 +48,8 @@ def test_dtiestim_inputs(): tensor_output=dict(argstr='--tensor_output %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--threshold %d', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py index f6822c5558..6090c81526 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -44,7 +44,8 @@ def test_dtiprocess_inputs(): ), hField=dict(argstr='--hField ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interpolation=dict(argstr='--interpolation %s', @@ -83,7 +84,8 @@ def test_dtiprocess_inputs(): ), sigma=dict(argstr='--sigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py index b6a904d649..ca4ceaa0fa 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -9,7 +9,8 @@ def test_extractNrrdVectorIndex_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -21,7 +22,8 @@ def test_extractNrrdVectorIndex_inputs(): ), setImageOrientation=dict(argstr='--setImageOrientation %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), vectorIndex=dict(argstr='--vectorIndex %d', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py index c9a1a591cc..1caaeaac78 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -11,7 +11,8 @@ def test_gtractAnisotropyMap_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTensorVolume=dict(argstr='--inputTensorVolume %s', @@ -21,7 +22,8 @@ def test_gtractAnisotropyMap_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractAnisotropyMap.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py index 318ad5fea4..574119212f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -13,7 +13,8 @@ def test_gtractAverageBvalues_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -23,7 +24,8 @@ def test_gtractAverageBvalues_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractAverageBvalues.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py index 0b3f2ec979..e3c4f337b7 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -13,7 +13,8 @@ def test_gtractClipAnisotropy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -23,7 +24,8 @@ def test_gtractClipAnisotropy_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractClipAnisotropy.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py index 9453af96ea..a8aab5aedf 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -18,7 +18,8 @@ def test_gtractCoRegAnatomy_inputs(): gridSize=dict(argstr='--gridSize %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', @@ -50,7 +51,8 @@ def test_gtractCoRegAnatomy_inputs(): ), spatialScale=dict(argstr='--spatialScale %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py index 68d85d66b6..276952f0f9 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -11,7 +11,8 @@ def test_gtractConcatDwi_inputs(): ), ignoreOrigins=dict(argstr='--ignoreOrigins ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s...', @@ -21,7 +22,8 @@ def test_gtractConcatDwi_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractConcatDwi.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py index 13fa034804..dfb9a73889 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -9,7 +9,8 @@ def test_gtractCopyImageOrientation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', @@ -21,7 +22,8 @@ def test_gtractCopyImageOrientation_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractCopyImageOrientation.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py index fa7444c2a2..c367284ef3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -17,7 +17,8 @@ def test_gtractCoregBvalues_inputs(): ), fixedVolumeIndex=dict(argstr='--fixedVolumeIndex %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), maximumStepSize=dict(argstr='--maximumStepSize %f', @@ -46,7 +47,8 @@ def test_gtractCoregBvalues_inputs(): ), spatialScale=dict(argstr='--spatialScale %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractCoregBvalues.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py index 6cdb0ca6b8..f50f8c47aa 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -11,7 +11,8 @@ def test_gtractCostFastMarching_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', @@ -34,7 +35,8 @@ def test_gtractCostFastMarching_inputs(): ), stoppingValue=dict(argstr='--stoppingValue %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractCostFastMarching.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py index 1d2da52d72..bd461a549b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -9,7 +9,8 @@ def test_gtractCreateGuideFiber_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputFiber=dict(argstr='--inputFiber %s', @@ -21,7 +22,8 @@ def test_gtractCreateGuideFiber_inputs(): outputFiber=dict(argstr='--outputFiber %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py index 9b30f161c6..0e6676c0fc 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -11,7 +11,8 @@ def test_gtractFastMarchingTracking_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', @@ -37,7 +38,8 @@ def test_gtractFastMarchingTracking_inputs(): ), startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trackingThreshold=dict(argstr='--trackingThreshold %f', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py index e3db9ee6d2..15b4876080 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -19,7 +19,8 @@ def test_gtractFiberTracking_inputs(): ), guidedCurvatureThreshold=dict(argstr='--guidedCurvatureThreshold %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', @@ -57,7 +58,8 @@ def test_gtractFiberTracking_inputs(): ), tendG=dict(argstr='--tendG %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trackingMethod=dict(argstr='--trackingMethod %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py index a78b5bb9f9..0ae9c227a8 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -9,7 +9,8 @@ def test_gtractImageConformity_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', @@ -21,7 +22,8 @@ def test_gtractImageConformity_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractImageConformity.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py index de662d068b..36363523d0 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -9,7 +9,8 @@ def test_gtractInvertBSplineTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', @@ -24,7 +25,8 @@ def test_gtractInvertBSplineTransform_inputs(): outputTransform=dict(argstr='--outputTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractInvertBSplineTransform.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py index 10b14f9def..4e7cdebdd3 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -13,7 +13,8 @@ def test_gtractInvertDisplacementField_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), numberOfThreads=dict(argstr='--numberOfThreads %d', @@ -23,7 +24,8 @@ def test_gtractInvertDisplacementField_inputs(): ), subsamplingFactor=dict(argstr='--subsamplingFactor %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractInvertDisplacementField.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py index a995a4e4cd..8cc8cc1e4b 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -9,7 +9,8 @@ def test_gtractInvertRigidTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTransform=dict(argstr='--inputTransform %s', @@ -19,7 +20,8 @@ def test_gtractInvertRigidTransform_inputs(): outputTransform=dict(argstr='--outputTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractInvertRigidTransform.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py index e9b668a716..4cc895fb06 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -9,7 +9,8 @@ def test_gtractResampleAnisotropy_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', @@ -23,7 +24,8 @@ def test_gtractResampleAnisotropy_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py index edc706cf4e..c77c067c51 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -9,7 +9,8 @@ def test_gtractResampleB0_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', @@ -23,7 +24,8 @@ def test_gtractResampleB0_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py index 860e96fd09..5fd928f854 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -9,7 +9,8 @@ def test_gtractResampleCodeImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputCodeVolume=dict(argstr='--inputCodeVolume %s', @@ -23,7 +24,8 @@ def test_gtractResampleCodeImage_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py index 3ecd5742e5..569320ba6f 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -11,7 +11,8 @@ def test_gtractResampleDWIInPlace_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), imageOutputSize=dict(argstr='--imageOutputSize %s', @@ -31,7 +32,8 @@ def test_gtractResampleDWIInPlace_inputs(): ), referenceVolume=dict(argstr='--referenceVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warpDWITransform=dict(argstr='--warpDWITransform %s', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py index 34997e8799..b3c4abc11a 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -9,7 +9,8 @@ def test_gtractResampleFibers_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputForwardDeformationFieldVolume=dict(argstr='--inputForwardDeformationFieldVolume %s', @@ -23,7 +24,8 @@ def test_gtractResampleFibers_inputs(): outputTract=dict(argstr='--outputTract %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py index 0cd3f101db..af662c076c 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -18,7 +18,8 @@ def test_gtractTensor_inputs(): ignoreIndex=dict(argstr='--ignoreIndex %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -39,7 +40,8 @@ def test_gtractTensor_inputs(): ), size=dict(argstr='--size %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractTensor.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py index 7feeabae6f..8f074267fe 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -9,7 +9,8 @@ def test_gtractTransformToDisplacementField_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', @@ -21,7 +22,8 @@ def test_gtractTransformToDisplacementField_inputs(): outputDeformationFieldVolume=dict(argstr='--outputDeformationFieldVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = gtractTransformToDisplacementField.input_spec() diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py index 7ded2e168c..8efcf5d131 100644 --- a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -9,7 +9,8 @@ def test_maxcurvature_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image=dict(argstr='--image %s', @@ -19,7 +20,8 @@ def test_maxcurvature_inputs(): ), sigma=dict(argstr='--sigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py index 0927be112c..0342389ba4 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -23,7 +23,8 @@ def test_UKFTractography_inputs(): ), fullTensorModel=dict(argstr='--fullTensorModel ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), labels=dict(argstr='--labels %s', @@ -71,7 +72,8 @@ def test_UKFTractography_inputs(): ), storeGlyphs=dict(argstr='--storeGlyphs ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tracts=dict(argstr='--tracts %s', hash_files=False, diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py index 11c67161dc..8da03c52fc 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -20,7 +20,8 @@ def test_fiberprocess_inputs(): ), h_field=dict(argstr='--h_field %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), index_space=dict(argstr='--index_space ', @@ -33,7 +34,8 @@ def test_fiberprocess_inputs(): ), tensor_volume=dict(argstr='--tensor_volume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py index 613664bb15..4d241dc205 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -11,10 +11,12 @@ def test_fiberstats_inputs(): ), fiber_file=dict(argstr='--fiber_file %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py index 3dda03843f..b1a503a711 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -13,7 +13,8 @@ def test_fibertrack_inputs(): ), force=dict(argstr='--force ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_roi_file=dict(argstr='--input_roi_file %s', @@ -35,7 +36,8 @@ def test_fibertrack_inputs(): ), target_label=dict(argstr='--target_label %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py index 446f520077..ca3669e020 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -9,7 +9,8 @@ def test_CannyEdge_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -19,7 +20,8 @@ def test_CannyEdge_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upperThreshold=dict(argstr='--upperThreshold %f', ), diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py index 6014c01238..366fd626ab 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -15,7 +15,8 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialModel=dict(argstr='--initialModel %s', @@ -32,7 +33,8 @@ def test_CannySegmentationLevelSetImageFilter_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = CannySegmentationLevelSetImageFilter.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py index 6690a83005..ce2c59dff9 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -9,7 +9,8 @@ def test_DilateImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -21,7 +22,8 @@ def test_DilateImage_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DilateImage.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py index 80c7fe1636..1e95808c9f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -9,7 +9,8 @@ def test_DilateMask_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', @@ -23,7 +24,8 @@ def test_DilateMask_inputs(): ), sizeStructuralElement=dict(argstr='--sizeStructuralElement %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DilateMask.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py index ad886bd5c5..a9136fcbd5 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -9,7 +9,8 @@ def test_DistanceMaps_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputLabelVolume=dict(argstr='--inputLabelVolume %s', @@ -21,7 +22,8 @@ def test_DistanceMaps_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DistanceMaps.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py index 017f27c3af..7504cc311e 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -9,14 +9,16 @@ def test_DumpBinaryTrainingVectors_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputHeaderFilename=dict(argstr='--inputHeaderFilename %s', ), inputVectorFilename=dict(argstr='--inputVectorFilename %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DumpBinaryTrainingVectors.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py index c5cbd6fc35..659c88471c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -9,7 +9,8 @@ def test_ErodeImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -21,7 +22,8 @@ def test_ErodeImage_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ErodeImage.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py index 6e73eb584a..e81e2cf9d9 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -9,7 +9,8 @@ def test_FlippedDifference_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -19,7 +20,8 @@ def test_FlippedDifference_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FlippedDifference.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py index 5a3bcbd888..234494c3f4 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -9,7 +9,8 @@ def test_GenerateBrainClippedImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputImg=dict(argstr='--inputImg %s', @@ -21,7 +22,8 @@ def test_GenerateBrainClippedImage_inputs(): outputFileName=dict(argstr='--outputFileName %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateBrainClippedImage.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py index ddca6453e8..d073e15ab0 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -11,7 +11,8 @@ def test_GenerateSummedGradientImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='--inputVolume1 %s', @@ -23,7 +24,8 @@ def test_GenerateSummedGradientImage_inputs(): outputFileName=dict(argstr='--outputFileName %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateSummedGradientImage.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py index 09915d813c..f9fb21b9da 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -9,7 +9,8 @@ def test_GenerateTestImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -21,7 +22,8 @@ def test_GenerateTestImage_inputs(): ), outputVolumeSize=dict(argstr='--outputVolumeSize %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upperBoundOfOutputVolume=dict(argstr='--upperBoundOfOutputVolume %f', ), diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py index 625a0fe338..63215e0b8a 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -11,7 +11,8 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -21,7 +22,8 @@ def test_GradientAnisotropicDiffusionImageFilter_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timeStep=dict(argstr='--timeStep %f', ), diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py index 128d3d62d1..6920812aae 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -13,7 +13,8 @@ def test_HammerAttributeCreator_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputCSFVolume=dict(argstr='--inputCSFVolume %s', @@ -24,7 +25,8 @@ def test_HammerAttributeCreator_inputs(): ), outputVolumeBase=dict(argstr='--outputVolumeBase %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = HammerAttributeCreator.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py index c029f33409..c23362562c 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -9,7 +9,8 @@ def test_NeighborhoodMean_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -21,7 +22,8 @@ def test_NeighborhoodMean_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = NeighborhoodMean.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py index 4a80af0377..ff2cdd38af 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -9,7 +9,8 @@ def test_NeighborhoodMedian_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -21,7 +22,8 @@ def test_NeighborhoodMedian_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = NeighborhoodMedian.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py index 03ffe65d04..45c55236d0 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -9,7 +9,8 @@ def test_STAPLEAnalysis_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputDimension=dict(argstr='--inputDimension %d', @@ -19,7 +20,8 @@ def test_STAPLEAnalysis_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = STAPLEAnalysis.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py index de0816897c..d351d8d56f 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -9,7 +9,8 @@ def test_TextureFromNoiseImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputRadius=dict(argstr='--inputRadius %d', @@ -19,7 +20,8 @@ def test_TextureFromNoiseImageFilter_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TextureFromNoiseImageFilter.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py index de8a74c45e..771fa52f03 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -11,7 +11,8 @@ def test_TextureMeasureFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolume=dict(argstr='--inputMaskVolume %s', @@ -23,7 +24,8 @@ def test_TextureMeasureFilter_inputs(): outputFilename=dict(argstr='--outputFilename %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = TextureMeasureFilter.input_spec() diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py index f20b6b5ca7..e237ca41db 100644 --- a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -11,7 +11,8 @@ def test_UnbiasedNonLocalMeans_inputs(): ), hp=dict(argstr='--hp %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_UnbiasedNonLocalMeans_inputs(): ), sigma=dict(argstr='--sigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = UnbiasedNonLocalMeans.input_spec() diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py index 83aaec5ea3..ed643de764 100644 --- a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -13,7 +13,8 @@ def test_scalartransform_inputs(): ), h_field=dict(argstr='--h_field ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_image=dict(argstr='--input_image %s', @@ -25,7 +26,8 @@ def test_scalartransform_inputs(): output_image=dict(argstr='--output_image %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformation=dict(argstr='--transformation %s', hash_files=False, diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py index 9aee3d80d1..3df134093f 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -27,7 +27,8 @@ def test_BRAINSDemonWarp_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', @@ -96,7 +97,8 @@ def test_BRAINSDemonWarp_inputs(): ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py index 7447f574af..00248d9093 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -38,7 +38,8 @@ def test_BRAINSFit_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialTransform=dict(argstr='--initialTransform %s', @@ -130,7 +131,8 @@ def test_BRAINSFit_inputs(): strippedOutputTransform=dict(argstr='--strippedOutputTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', sep=',', diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py index 6e10f86ca0..a2444018e2 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -16,7 +16,8 @@ def test_BRAINSResample_inputs(): gridSpacing=dict(argstr='--gridSpacing %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -34,7 +35,8 @@ def test_BRAINSResample_inputs(): ), referenceVolume=dict(argstr='--referenceVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warpTransform=dict(argstr='--warpTransform %s', ), diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py index 4c90eaf915..c394a6cee1 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -9,7 +9,8 @@ def test_BRAINSResize_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -21,7 +22,8 @@ def test_BRAINSResize_inputs(): ), scaleFactor=dict(argstr='--scaleFactor %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSResize.input_spec() diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py index bc0ead4e53..928fa49e61 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -13,7 +13,8 @@ def test_BRAINSTransformFromFiducials_inputs(): ), fixedLandmarksFile=dict(argstr='--fixedLandmarksFile %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), movingLandmarks=dict(argstr='--movingLandmarks %s...', @@ -25,7 +26,8 @@ def test_BRAINSTransformFromFiducials_inputs(): saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py index 96e28abafa..1fd9c45b34 100644 --- a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -27,7 +27,8 @@ def test_VBRAINSDemonWarp_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', @@ -96,7 +97,8 @@ def test_VBRAINSDemonWarp_inputs(): ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py index 110cfcef77..d07166b086 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -34,7 +34,8 @@ def test_BRAINSABC_inputs(): gridSize=dict(argstr='--gridSize %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), implicitOutputs=dict(argstr='--implicitOutputs %s...', @@ -84,7 +85,8 @@ def test_BRAINSABC_inputs(): ), subjectIntermodeTransformType=dict(argstr='--subjectIntermodeTransformType %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useKNN=dict(argstr='--useKNN ', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py index 30ffbaa945..865ce1fe93 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -41,7 +41,8 @@ def test_BRAINSConstellationDetector_inputs(): ), houghEyeDetectorMode=dict(argstr='--houghEyeDetectorMode %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputLandmarksEMSP=dict(argstr='--inputLandmarksEMSP %s', @@ -98,7 +99,8 @@ def test_BRAINSConstellationDetector_inputs(): ), rpc=dict(argstr='--rpc %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py index 5a8f506310..b3962fd835 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -18,7 +18,8 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): foregroundPriors=dict(argstr='--foregroundPriors %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inclusionThreshold=dict(argstr='--inclusionThreshold %f', @@ -30,7 +31,8 @@ def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): priorLabelCodes=dict(argstr='--priorLabelCodes %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py index 6e7652979e..5e8a6f99bd 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -21,7 +21,8 @@ def test_BRAINSCut_inputs(): ), histogramEqualization=dict(argstr='--histogramEqualization ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), method=dict(argstr='--method %s', @@ -38,7 +39,8 @@ def test_BRAINSCut_inputs(): ), randomTreeDepth=dict(argstr='--randomTreeDepth %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trainModel=dict(argstr='--trainModel ', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py index 1cd57a8267..38035b7903 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -9,7 +9,8 @@ def test_BRAINSMultiSTAPLE_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputCompositeT1Volume=dict(argstr='--inputCompositeT1Volume %s', @@ -30,7 +31,8 @@ def test_BRAINSMultiSTAPLE_inputs(): ), skipResampling=dict(argstr='--skipResampling ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSMultiSTAPLE.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py index fe1ce50a3d..577f01aba6 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -15,7 +15,8 @@ def test_BRAINSROIAuto_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -34,7 +35,8 @@ def test_BRAINSROIAuto_inputs(): ), outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresholdCorrectionFactor=dict(argstr='--thresholdCorrectionFactor %f', ), diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py index 2e754dd1b1..f9c7d4a191 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -9,7 +9,8 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', @@ -31,7 +32,8 @@ def test_BinaryMaskEditorBasedOnLandmarks_inputs(): setCutDirectionForObliquePlane=dict(argstr='--setCutDirectionForObliquePlane %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py index 4ebd23e30f..cd29a7fd82 100644 --- a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -13,7 +13,8 @@ def test_ESLR_inputs(): ), high=dict(argstr='--high %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -31,7 +32,8 @@ def test_ESLR_inputs(): ), safetySize=dict(argstr='--safetySize %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ESLR.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py index 2d50880990..a9c60b1229 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -9,14 +9,16 @@ def test_DWICompare_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='--inputVolume1 %s', ), inputVolume2=dict(argstr='--inputVolume2 %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWICompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py index 437d2d9087..271085be8c 100644 --- a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -11,14 +11,16 @@ def test_DWISimpleCompare_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='--inputVolume1 %s', ), inputVolume2=dict(argstr='--inputVolume2 %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWISimpleCompare.input_spec() diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py index 0ae702b805..83d0194964 100644 --- a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -9,7 +9,8 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputCassifiedVolume=dict(argstr='--inputCassifiedVolume %s', @@ -17,7 +18,8 @@ def test_GenerateCsfClippedFromClassifiedImage_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateCsfClippedFromClassifiedImage.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py index 9636e284f7..5c5db3a206 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -14,7 +14,8 @@ def test_BRAINSAlignMSP_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -33,7 +34,8 @@ def test_BRAINSAlignMSP_inputs(): resultsDir=dict(argstr='--resultsDir %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py index d08f270b5e..a6868c4e61 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -13,7 +13,8 @@ def test_BRAINSClipInferior_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -23,7 +24,8 @@ def test_BRAINSClipInferior_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSClipInferior.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py index ff5447109c..80d8158e5e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -11,7 +11,8 @@ def test_BRAINSConstellationModeler_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTrainingList=dict(argstr='--inputTrainingList %s', @@ -35,7 +36,8 @@ def test_BRAINSConstellationModeler_inputs(): ), saveOptimizedLandmarks=dict(argstr='--saveOptimizedLandmarks ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py index b9286e8835..59f4b66f60 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -11,7 +11,8 @@ def test_BRAINSEyeDetector_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -21,7 +22,8 @@ def test_BRAINSEyeDetector_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSEyeDetector.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py index d8288ff86f..b1f25c5be1 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -9,7 +9,8 @@ def test_BRAINSInitializedControlPoints_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -27,7 +28,8 @@ def test_BRAINSInitializedControlPoints_inputs(): splineGridSize=dict(argstr='--splineGridSize %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSInitializedControlPoints.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py index 332534edf8..edb4119b52 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -9,7 +9,8 @@ def test_BRAINSLandmarkInitializer_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputFixedLandmarkFilename=dict(argstr='--inputFixedLandmarkFilename %s', @@ -21,7 +22,8 @@ def test_BRAINSLandmarkInitializer_inputs(): outputTransformFilename=dict(argstr='--outputTransformFilename %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSLandmarkInitializer.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py index 9bcf5409c7..8d8bfdab55 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -9,14 +9,16 @@ def test_BRAINSLinearModelerEPCA_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTrainingList=dict(argstr='--inputTrainingList %s', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSLinearModelerEPCA.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py index 048b66b32b..0210db6299 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -9,7 +9,8 @@ def test_BRAINSLmkTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputFixedLandmarks=dict(argstr='--inputFixedLandmarks %s', @@ -28,7 +29,8 @@ def test_BRAINSLmkTransform_inputs(): outputResampledVolume=dict(argstr='--outputResampledVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSLmkTransform.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py index 31548abd1c..20b0a4467e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -19,7 +19,8 @@ def test_BRAINSMush_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputFirstVolume=dict(argstr='--inputFirstVolume %s', @@ -46,7 +47,8 @@ def test_BRAINSMush_inputs(): seed=dict(argstr='--seed %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upperThresholdFactor=dict(argstr='--upperThresholdFactor %f', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py index a4fd3abf5d..81d0f89d08 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -9,7 +9,8 @@ def test_BRAINSSnapShotWriter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', @@ -31,7 +32,8 @@ def test_BRAINSSnapShotWriter_inputs(): outputFilename=dict(argstr='--outputFilename %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSSnapShotWriter.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py index 5c168fbb6a..789b1f4a42 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -12,7 +12,8 @@ def test_BRAINSTransformConvert_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTransform=dict(argstr='--inputTransform %s', @@ -26,7 +27,8 @@ def test_BRAINSTransformConvert_inputs(): ), referenceVolume=dict(argstr='--referenceVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSTransformConvert.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py index 364747314a..212b60c3d5 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -17,7 +17,8 @@ def test_BRAINSTrimForegroundInDirection_inputs(): ), headSizeLimit=dict(argstr='--headSizeLimit %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -29,7 +30,8 @@ def test_BRAINSTrimForegroundInDirection_inputs(): outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BRAINSTrimForegroundInDirection.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py index bf91238a1d..42064ce399 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -9,7 +9,8 @@ def test_CleanUpOverlapLabels_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', @@ -17,7 +18,8 @@ def test_CleanUpOverlapLabels_inputs(): outputBinaryVolumes=dict(argstr='--outputBinaryVolumes %s...', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = CleanUpOverlapLabels.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py index d15f647808..9c0d44ea96 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -39,7 +39,8 @@ def test_FindCenterOfBrain_inputs(): ), headSizeLimit=dict(argstr='--headSizeLimit %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), imageMask=dict(argstr='--imageMask %s', @@ -50,7 +51,8 @@ def test_FindCenterOfBrain_inputs(): ), otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = FindCenterOfBrain.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py index cda3720812..2b0a376b28 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -9,7 +9,8 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolumes=dict(argstr='--inputVolumes %s...', @@ -19,7 +20,8 @@ def test_GenerateLabelMapFromProbabilityMap_inputs(): outputLabelVolume=dict(argstr='--outputLabelVolume %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GenerateLabelMapFromProbabilityMap.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py index 6823172b50..7e0f0c7b7c 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -9,7 +9,8 @@ def test_ImageRegionPlotter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputBinaryROIVolume=dict(argstr='--inputBinaryROIVolume %s', @@ -24,7 +25,8 @@ def test_ImageRegionPlotter_inputs(): ), outputJointHistogramData=dict(argstr='--outputJointHistogramData %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useIntensityForHistogram=dict(argstr='--useIntensityForHistogram ', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py index 9b8df83880..ee2f544417 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -9,7 +9,8 @@ def test_JointHistogram_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMaskVolumeInXAxis=dict(argstr='--inputMaskVolumeInXAxis %s', @@ -22,7 +23,8 @@ def test_JointHistogram_inputs(): ), outputJointHistogramImage=dict(argstr='--outputJointHistogramImage %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py index 1cf264afcc..228dfe4234 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -9,7 +9,8 @@ def test_ShuffleVectorsModule_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVectorFileBaseName=dict(argstr='--inputVectorFileBaseName %s', @@ -19,7 +20,8 @@ def test_ShuffleVectorsModule_inputs(): ), resampleProportion=dict(argstr='--resampleProportion %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ShuffleVectorsModule.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py index 927a33fd04..3885c5ac5e 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -9,7 +9,8 @@ def test_fcsv_to_hdf5_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), landmarkGlobPattern=dict(argstr='--landmarkGlobPattern %s', @@ -24,7 +25,8 @@ def test_fcsv_to_hdf5_inputs(): ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), versionID=dict(argstr='--versionID %s', ), diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py index abb847e478..aedcbb3e63 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -9,7 +9,8 @@ def test_insertMidACPCpoint_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputLandmarkFile=dict(argstr='--inputLandmarkFile %s', @@ -17,7 +18,8 @@ def test_insertMidACPCpoint_inputs(): outputLandmarkFile=dict(argstr='--outputLandmarkFile %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = insertMidACPCpoint.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py index 3122d627ed..ef1668861f 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -9,7 +9,8 @@ def test_landmarksConstellationAligner_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputLandmarksPaired=dict(argstr='--inputLandmarksPaired %s', @@ -17,7 +18,8 @@ def test_landmarksConstellationAligner_inputs(): outputLandmarksPaired=dict(argstr='--outputLandmarksPaired %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = landmarksConstellationAligner.input_spec() diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py index 49772ca873..4bf7c61ab7 100644 --- a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -11,7 +11,8 @@ def test_landmarksConstellationWeights_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTemplateModel=dict(argstr='--inputTemplateModel %s', @@ -21,7 +22,8 @@ def test_landmarksConstellationWeights_inputs(): outputWeightsList=dict(argstr='--outputWeightsList %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = landmarksConstellationWeights.input_spec() diff --git a/nipype/interfaces/setup.py b/nipype/interfaces/setup.py deleted file mode 100644 index 4a15082b2b..0000000000 --- a/nipype/interfaces/setup.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, division, unicode_literals, absolute_import - - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('interfaces', parent_package, top_path) - - config.add_subpackage('afni') - config.add_subpackage('ants') - config.add_subpackage('camino') - config.add_subpackage('camino2trackvis') - config.add_subpackage('cmtk') - config.add_subpackage('diffusion_toolkit') - config.add_subpackage('dipy') - config.add_subpackage('elastix') - config.add_subpackage('freesurfer') - config.add_subpackage('fsl') - config.add_subpackage('minc') - config.add_subpackage('mipav') - config.add_subpackage('mne') - config.add_subpackage('mrtrix') - config.add_subpackage('mrtrix3') - config.add_subpackage('niftyfit') - config.add_subpackage('niftyreg') - config.add_subpackage('niftyseg') - config.add_subpackage('nipy') - config.add_subpackage('spm') - config.add_subpackage('slicer') - - config.add_data_dir('script_templates') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py index 649b1db802..d3c499df21 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -9,7 +9,8 @@ def test_DTIexport_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputTensor=dict(argstr='%s', @@ -19,7 +20,8 @@ def test_DTIexport_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DTIexport.input_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py index 05f18b318e..cd4ae462f4 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -9,7 +9,8 @@ def test_DTIimport_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputFile=dict(argstr='%s', @@ -19,7 +20,8 @@ def test_DTIimport_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), testingmode=dict(argstr='--testingmode ', ), diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py index 9cf7cc1008..2a97030210 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -11,7 +11,8 @@ def test_DWIJointRicianLMMSEFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_DWIJointRicianLMMSEFilter_inputs(): rf=dict(argstr='--rf %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWIJointRicianLMMSEFilter.input_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py index 97c015d7f4..e71ae3106a 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -13,7 +13,8 @@ def test_DWIRicianLMMSEFilter_inputs(): ), hrf=dict(argstr='--hrf %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -39,7 +40,8 @@ def test_DWIRicianLMMSEFilter_inputs(): rf=dict(argstr='--rf %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uav=dict(argstr='--uav ', ), diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py index ba807a5052..938855c9cf 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -11,7 +11,8 @@ def test_DWIToDTIEstimation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_DWIToDTIEstimation_inputs(): ), shiftNeg=dict(argstr='--shiftNeg ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWIToDTIEstimation.input_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py index 0b997a9c40..80b9d9b745 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -11,7 +11,8 @@ def test_DiffusionTensorScalarMeasurements_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_DiffusionTensorScalarMeasurements_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DiffusionTensorScalarMeasurements.input_spec() diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py index 0deaf6543e..247cd2612a 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -9,7 +9,8 @@ def test_DiffusionWeightedVolumeMasking_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_DiffusionWeightedVolumeMasking_inputs(): ), removeislands=dict(argstr='--removeislands ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresholdMask=dict(argstr='%s', hash_files=False, diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py index d54f99f55d..423ef009be 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -26,7 +26,8 @@ def test_ResampleDTIVolume_inputs(): ), hfieldtype=dict(argstr='--hfieldtype %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_center=dict(argstr='--image_center %s', @@ -58,7 +59,8 @@ def test_ResampleDTIVolume_inputs(): ), spline_order=dict(argstr='--spline_order %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='--transform %s', ), diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py index 55f127a6c9..2dc06051ca 100644 --- a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -18,7 +18,8 @@ def test_TractographyLabelMapSeeding_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputroi=dict(argstr='--inputroi %s', @@ -46,7 +47,8 @@ def test_TractographyLabelMapSeeding_inputs(): ), stoppingvalue=dict(argstr='--stoppingvalue %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useindexspace=dict(argstr='--useindexspace ', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py index 7914b71736..6d4ab7458b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -9,7 +9,8 @@ def test_AddScalarVolumes_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_AddScalarVolumes_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = AddScalarVolumes.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py index eed01c2996..5a0c99453a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -16,10 +16,12 @@ def test_CastScalarVolume_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), type=dict(argstr='--type %s', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py index be6ae4ba84..0fe407f1de 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -12,7 +12,8 @@ def test_CheckerBoardFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_CheckerBoardFilter_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = CheckerBoardFilter.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py index 01c28d842f..7139b3f13a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -11,7 +11,8 @@ def test_CurvatureAnisotropicDiffusion_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_CurvatureAnisotropicDiffusion_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timeStep=dict(argstr='--timeStep %f', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py index 8ec1aa362c..f41d25c28d 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -18,14 +18,16 @@ def test_ExtractSkeleton_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), numPoints=dict(argstr='--numPoints %d', ), pointsFile=dict(argstr='--pointsFile %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), type=dict(argstr='--type %s', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py index c5aa979bc6..0ef6a909ca 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -9,7 +9,8 @@ def test_GaussianBlurImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_GaussianBlurImageFilter_inputs(): ), sigma=dict(argstr='--sigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GaussianBlurImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py index ce307bde81..a041642006 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -11,7 +11,8 @@ def test_GradientAnisotropicDiffusion_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -23,7 +24,8 @@ def test_GradientAnisotropicDiffusion_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timeStep=dict(argstr='--timeStep %f', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py index 115c25ceab..e42fe05e13 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -9,7 +9,8 @@ def test_GrayscaleFillHoleImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -19,7 +20,8 @@ def test_GrayscaleFillHoleImageFilter_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GrayscaleFillHoleImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py index 12c4c5402f..6f1257ee41 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -9,7 +9,8 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -19,7 +20,8 @@ def test_GrayscaleGrindPeakImageFilter_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = GrayscaleGrindPeakImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py index 00ef1b26dc..36223bd829 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -9,7 +9,8 @@ def test_HistogramMatching_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -26,7 +27,8 @@ def test_HistogramMatching_inputs(): referenceVolume=dict(argstr='%s', position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--threshold ', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py index 9640cf5457..2ba011470b 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -21,10 +21,12 @@ def test_ImageLabelCombine_inputs(): ), first_overwrites=dict(argstr='--first_overwrites ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ImageLabelCombine.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py index c9f6c1bd8a..77a20ffe64 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -19,14 +19,16 @@ def test_MaskScalarVolume_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), label=dict(argstr='--label %d', ), replace=dict(argstr='--replace %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MaskScalarVolume.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py index 07f11bddae..d0409009ec 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -9,7 +9,8 @@ def test_MedianImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -22,7 +23,8 @@ def test_MedianImageFilter_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MedianImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py index f53fe36ef0..ed171521a8 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -9,7 +9,8 @@ def test_MultiplyScalarVolumes_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_MultiplyScalarVolumes_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MultiplyScalarVolumes.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py index a9cf9f449d..8397f5f1c5 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -16,7 +16,8 @@ def test_N4ITKBiasFieldCorrection_inputs(): histogramsharpening=dict(argstr='--histogramsharpening %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputimage=dict(argstr='--inputimage %s', @@ -39,7 +40,8 @@ def test_N4ITKBiasFieldCorrection_inputs(): ), splinedistance=dict(argstr='--splinedistance %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), weightimage=dict(argstr='--weightimage %s', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py index d317e139f8..23be2e6372 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -24,7 +24,8 @@ def test_ResampleScalarVectorDWIVolume_inputs(): ), hfieldtype=dict(argstr='--hfieldtype %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_center=dict(argstr='--image_center %s', @@ -56,7 +57,8 @@ def test_ResampleScalarVectorDWIVolume_inputs(): ), spline_order=dict(argstr='--spline_order %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform=dict(argstr='--transform %s', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py index 78fd010e43..b46b24dc6a 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -9,7 +9,8 @@ def test_SubtractScalarVolumes_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', @@ -24,7 +25,8 @@ def test_SubtractScalarVolumes_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SubtractScalarVolumes.input_spec() diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py index 840f527211..49c3a67455 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -16,14 +16,16 @@ def test_ThresholdScalarVolume_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), lower=dict(argstr='--lower %d', ), outsidevalue=dict(argstr='--outsidevalue %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--threshold %d', ), diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py index 9d8a717dac..fd98549305 100644 --- a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -13,7 +13,8 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): ), foreground=dict(argstr='--foreground %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -28,7 +29,8 @@ def test_VotingBinaryHoleFillingImageFilter_inputs(): radius=dict(argstr='--radius %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = VotingBinaryHoleFillingImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index 77a633f5f8..f0bc8274bb 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -18,9 +18,9 @@ def force_to_valid_python_variable_name(old_name): """ Valid c++ names are not always valid in python, so provide alternate naming - >>> force_to_valid_python_variable_name('lambda') # doctest: +ALLOW_UNICODE + >>> force_to_valid_python_variable_name('lambda') 'opt_lambda' - >>> force_to_valid_python_variable_name('inputVolume') # doctest: +ALLOW_UNICODE + >>> force_to_valid_python_variable_name('inputVolume') 'inputVolume' """ new_name = old_name diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py index de89d21763..7f25ed92cc 100644 --- a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -11,7 +11,8 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): ), hp=dict(argstr='--hp %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_DWIUnbiasedNonLocalMeansFilter_inputs(): rs=dict(argstr='--rs %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py index 1095a2169b..91f4b37591 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -19,7 +19,8 @@ def test_AffineRegistration_inputs(): ), histogrambins=dict(argstr='--histogrambins %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', @@ -36,7 +37,8 @@ def test_AffineRegistration_inputs(): ), spatialsamples=dict(argstr='--spatialsamples %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), translationscale=dict(argstr='--translationscale %f', ), diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py index 2965724b45..98be6fbe12 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -23,7 +23,8 @@ def test_BSplineDeformableRegistration_inputs(): ), histogrambins=dict(argstr='--histogrambins %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', @@ -43,7 +44,8 @@ def test_BSplineDeformableRegistration_inputs(): ), spatialsamples=dict(argstr='--spatialsamples %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = BSplineDeformableRegistration.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py index 9b0c0cb41e..e984395aa4 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -12,12 +12,14 @@ def test_BSplineToDeformationField_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), refImage=dict(argstr='--refImage %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), tfm=dict(argstr='--tfm %s', ), diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py index 5c6ca38748..9b90f3dc9b 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -34,7 +34,8 @@ def test_ExpertAutomatedRegistration_inputs(): ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialization=dict(argstr='--initialization %s', @@ -70,7 +71,8 @@ def test_ExpertAutomatedRegistration_inputs(): saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbosityLevel=dict(argstr='--verbosityLevel %s', ), diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py index e62a728d7d..c80b8b66fb 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -19,7 +19,8 @@ def test_LinearRegistration_inputs(): ), histogrambins=dict(argstr='--histogrambins %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', @@ -40,7 +41,8 @@ def test_LinearRegistration_inputs(): ), spatialsamples=dict(argstr='--spatialsamples %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), translationscale=dict(argstr='--translationscale %f', ), diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py index cea84022e4..d1262047eb 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -16,7 +16,8 @@ def test_MultiResolutionAffineRegistration_inputs(): ), fixedImageROI=dict(argstr='--fixedImageROI %s', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), metricTolerance=dict(argstr='--metricTolerance %f', @@ -38,7 +39,8 @@ def test_MultiResolutionAffineRegistration_inputs(): ), stepTolerance=dict(argstr='--stepTolerance %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MultiResolutionAffineRegistration.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py index a598be2eee..9c5fcd5c1f 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -9,7 +9,8 @@ def test_OtsuThresholdImageFilter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_OtsuThresholdImageFilter_inputs(): ), outsideValue=dict(argstr='--outsideValue %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = OtsuThresholdImageFilter.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py index ee088157b2..72a68ca5b9 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -13,7 +13,8 @@ def test_OtsuThresholdSegmentation_inputs(): ), faceConnected=dict(argstr='--faceConnected ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_OtsuThresholdSegmentation_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = OtsuThresholdSegmentation.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py index 6084ba5d83..34db34c00d 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -16,7 +16,8 @@ def test_ResampleScalarVolume_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interpolation=dict(argstr='--interpolation %s', @@ -24,7 +25,8 @@ def test_ResampleScalarVolume_inputs(): spacing=dict(argstr='--spacing %s', sep=',', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ResampleScalarVolume.input_spec() diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py index ef5b7f2168..a56ad3b98e 100644 --- a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -19,7 +19,8 @@ def test_RigidRegistration_inputs(): ), histogrambins=dict(argstr='--histogrambins %d', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialtransform=dict(argstr='--initialtransform %s', @@ -40,7 +41,8 @@ def test_RigidRegistration_inputs(): ), spatialsamples=dict(argstr='--spatialsamples %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), testingmode=dict(argstr='--testingmode ', ), diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py index fc174efcfa..f4809280f9 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -20,7 +20,8 @@ def test_IntensityDifferenceMetric_inputs(): followupVolume=dict(argstr='%s', position=-2, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), outputVolume=dict(argstr='%s', @@ -32,7 +33,8 @@ def test_IntensityDifferenceMetric_inputs(): ), sensitivityThreshold=dict(argstr='--sensitivityThreshold %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = IntensityDifferenceMetric.input_spec() diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py index 0b66af94f3..4dfbdc66dd 100644 --- a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -24,7 +24,8 @@ def test_PETStandardUptakeValueComputation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), labelMap=dict(argstr='--labelMap %s', @@ -33,7 +34,8 @@ def test_PETStandardUptakeValueComputation_inputs(): ), petVolume=dict(argstr='--petVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PETStandardUptakeValueComputation.input_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py index 35e08a6db1..fe5c7895f4 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -13,7 +13,8 @@ def test_ACPCTransform_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), midline=dict(argstr='--midline %s...', @@ -21,7 +22,8 @@ def test_ACPCTransform_inputs(): outputTransform=dict(argstr='--outputTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ACPCTransform.input_spec() diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py index 9aee3d80d1..3df134093f 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -27,7 +27,8 @@ def test_BRAINSDemonWarp_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', @@ -96,7 +97,8 @@ def test_BRAINSDemonWarp_inputs(): ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py index f7521f7551..943629d5cc 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -44,7 +44,8 @@ def test_BRAINSFit_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initialTransform=dict(argstr='--initialTransform %s', @@ -124,7 +125,8 @@ def test_BRAINSFit_inputs(): strippedOutputTransform=dict(argstr='--strippedOutputTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', sep=',', diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py index 6e10f86ca0..a2444018e2 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -16,7 +16,8 @@ def test_BRAINSResample_inputs(): gridSpacing=dict(argstr='--gridSpacing %s', sep=',', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -34,7 +35,8 @@ def test_BRAINSResample_inputs(): ), referenceVolume=dict(argstr='--referenceVolume %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), warpTransform=dict(argstr='--warpTransform %s', ), diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py index ee3db65e07..81d4422012 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -11,7 +11,8 @@ def test_FiducialRegistration_inputs(): ), fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), movingLandmarks=dict(argstr='--movingLandmarks %s...', @@ -23,7 +24,8 @@ def test_FiducialRegistration_inputs(): saveTransform=dict(argstr='--saveTransform %s', hash_files=False, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transformType=dict(argstr='--transformType %s', ), diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py index 96e28abafa..1fd9c45b34 100644 --- a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -27,7 +27,8 @@ def test_VBRAINSDemonWarp_inputs(): ), histogramMatch=dict(argstr='--histogramMatch ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', @@ -96,7 +97,8 @@ def test_VBRAINSDemonWarp_inputs(): ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py index 8792856f51..5b8a66ba36 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -13,7 +13,8 @@ def test_BRAINSROIAuto_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', @@ -30,7 +31,8 @@ def test_BRAINSROIAuto_inputs(): ), outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), thresholdCorrectionFactor=dict(argstr='--thresholdCorrectionFactor %f', ), diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py index 3e51e217f2..e09922f0b1 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -22,7 +22,8 @@ def test_EMSegmentCommandLine_inputs(): generateEmptyMRMLSceneAndQuit=dict(argstr='--generateEmptyMRMLSceneAndQuit %s', hash_files=False, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), intermediateResultsDirectory=dict(argstr='--intermediateResultsDirectory %s', @@ -55,7 +56,8 @@ def test_EMSegmentCommandLine_inputs(): ), taskPreProcessingSetting=dict(argstr='--taskPreProcessingSetting %s', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='--verbose ', ), diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py index 844bf8a0e0..4bd8a64f94 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -13,7 +13,8 @@ def test_RobustStatisticsSegmenter_inputs(): ), expectedVolume=dict(argstr='--expectedVolume %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), intensityHomogeneity=dict(argstr='--intensityHomogeneity %f', @@ -32,7 +33,8 @@ def test_RobustStatisticsSegmenter_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = RobustStatisticsSegmenter.input_spec() diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py index 9600134d40..03eba8a580 100644 --- a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -9,7 +9,8 @@ def test_SimpleRegionGrowingSegmentation_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -31,7 +32,8 @@ def test_SimpleRegionGrowingSegmentation_inputs(): ), smoothingIterations=dict(argstr='--smoothingIterations %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), timestep=dict(argstr='--timestep %f', ), diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py index b533e70237..425541245c 100644 --- a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -9,7 +9,8 @@ def test_DicomToNrrdConverter_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', @@ -21,7 +22,8 @@ def test_DicomToNrrdConverter_inputs(): ), smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), useBMatrixGradientDirections=dict(argstr='--useBMatrixGradientDirections ', ), diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py index fd8a401276..5e83f5f935 100644 --- a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -9,7 +9,8 @@ def test_EMSegmentTransformToNewFormat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputMRMLFileName=dict(argstr='--inputMRMLFileName %s', @@ -19,7 +20,8 @@ def test_EMSegmentTransformToNewFormat_inputs(): ), templateFlag=dict(argstr='--templateFlag ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = EMSegmentTransformToNewFormat.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py index 7c9d4b027d..2b2f1ad0ee 100644 --- a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -18,7 +18,8 @@ def test_GrayscaleModelMaker_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), name=dict(argstr='--name %s', @@ -29,7 +30,8 @@ def test_GrayscaleModelMaker_inputs(): ), splitnormals=dict(argstr='--splitnormals ', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), threshold=dict(argstr='--threshold %f', ), diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py index b066a5081f..ded9cbe0be 100644 --- a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -11,7 +11,8 @@ def test_LabelMapSmoothing_inputs(): ), gaussianSigma=dict(argstr='--gaussianSigma %f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume=dict(argstr='%s', @@ -27,7 +28,8 @@ def test_LabelMapSmoothing_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = LabelMapSmoothing.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py index 2102c77cdf..4ba0f98458 100644 --- a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -19,10 +19,12 @@ def test_MergeModels_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = MergeModels.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py index 4e84c252a9..c779eb3238 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -24,7 +24,8 @@ def test_ModelMaker_inputs(): ), generateAll=dict(argstr='--generateAll ', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), jointsmooth=dict(argstr='--jointsmooth ', @@ -51,7 +52,8 @@ def test_ModelMaker_inputs(): ), start=dict(argstr='--start %d', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ModelMaker.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py index 1b7dcd8076..a54ee1fea6 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -18,13 +18,15 @@ def test_ModelToLabelMap_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), surface=dict(argstr='%s', position=-2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ModelToLabelMap.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py index a75c12d463..dd0a987239 100644 --- a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -9,7 +9,8 @@ def test_OrientScalarVolume_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inputVolume1=dict(argstr='%s', @@ -21,7 +22,8 @@ def test_OrientScalarVolume_inputs(): hash_files=False, position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = OrientScalarVolume.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py index d5e50cf6c9..48b75608c8 100644 --- a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -19,10 +19,12 @@ def test_ProbeVolumeWithModel_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = ProbeVolumeWithModel.input_spec() diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py index 1a24d5901e..369a25b77d 100644 --- a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -9,10 +9,12 @@ def test_SlicerCommandLine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py index 6c3fbab32e..7882fa1280 100644 --- a/nipype/interfaces/spm/base.py +++ b/nipype/interfaces/spm/base.py @@ -29,8 +29,11 @@ # Local imports from ... import logging from ...utils import spm_docs as sd, NUMPY_MMAP -from ..base import (BaseInterface, traits, isdefined, InputMultiPath, - BaseInterfaceInputSpec, Directory, Undefined, ImageFile) +from ..base import ( + BaseInterface, traits, isdefined, InputMultiPath, + BaseInterfaceInputSpec, Directory, Undefined, + ImageFile, PackageInfo +) from ..matlab import MatlabCommand from ...external.due import due, Doi, BibTeX @@ -123,12 +126,37 @@ def scans_for_fnames(fnames, keep4d=False, separate_sessions=False): return flist -class Info(object): +class Info(PackageInfo): """Handles SPM version information """ - @staticmethod - def version(matlab_cmd=None, paths=None, use_mcr=None): - """Returns the path to the SPM directory in the Matlab path + _path = None + _name = None + + @classmethod + def path(klass, matlab_cmd=None, paths=None, use_mcr=None): + if klass._path: + return klass._path + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._path + + @classmethod + def version(klass, matlab_cmd=None, paths=None, use_mcr=None): + if klass._version: + return klass._version + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._version + + @classmethod + def name(klass, matlab_cmd=None, paths=None, use_mcr=None): + if klass._name: + return klass._name + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._name + + @classmethod + def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): + """ + Returns the path to the SPM directory in the Matlab path If path not found, returns None. Parameters @@ -151,19 +179,21 @@ def version(matlab_cmd=None, paths=None, use_mcr=None): returns None of path not found """ - if use_mcr or 'FORCE_SPMMCR' in os.environ: - use_mcr = True - if matlab_cmd is None: - try: - matlab_cmd = os.environ['SPMMCRCMD'] - except KeyError: - pass - if matlab_cmd is None: - try: - matlab_cmd = os.environ['MATLABCMD'] - except KeyError: - matlab_cmd = 'matlab -nodesktop -nosplash' - mlab = MatlabCommand(matlab_cmd=matlab_cmd) + + if klass._name and klass._path and klass._version: + return { + 'name': klass._name, + 'path': klass._path, + 'release': klass._version + } + + use_mcr = use_mcr or 'FORCE_SPMMCR' in os.environ + matlab_cmd = ( + (use_mcr and os.getenv('SPMMCRCMD')) or + os.getenv('MATLABCMD', 'matlab -nodesktop -nosplash')) + + mlab = MatlabCommand(matlab_cmd=matlab_cmd, + resource_monitor=False) mlab.inputs.mfile = False if paths: mlab.inputs.paths = paths @@ -187,15 +217,19 @@ def version(matlab_cmd=None, paths=None, use_mcr=None): except (IOError, RuntimeError) as e: # if no Matlab at all -- exception could be raised # No Matlab -- no spm - logger.debug(str(e)) + logger.debug('%s', e) return None - else: - out = sd._strip_header(out.runtime.stdout) - out_dict = {} - for part in out.split('|'): - key, val = part.split(':') - out_dict[key] = val - return out_dict + + out = sd._strip_header(out.runtime.stdout) + out_dict = {} + for part in out.split('|'): + key, val = part.split(':') + out_dict[key] = val + + klass._version = out_dict['release'] + klass._path = out_dict['path'] + klass._name = out_dict['name'] + return out_dict def no_spm(): @@ -276,11 +310,12 @@ def _find_mlab_cmd_defaults(self): def _matlab_cmd_update(self): # MatlabCommand has to be created here, - # because matlab_cmb is not a proper input + # because matlab_cmd is not a proper input # and can be set only during init self.mlab = MatlabCommand(matlab_cmd=self.inputs.matlab_cmd, mfile=self.inputs.mfile, - paths=self.inputs.paths) + paths=self.inputs.paths, + resource_monitor=False) self.mlab.inputs.script_file = 'pyscript_%s.m' % \ self.__class__.__name__.split('.')[-1].lower() if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr: @@ -292,13 +327,15 @@ def _matlab_cmd_update(self): @property def version(self): - version_dict = Info.version(matlab_cmd=self.inputs.matlab_cmd, - paths=self.inputs.paths, - use_mcr=self.inputs.use_mcr) - if version_dict: - return '.'.join((version_dict['name'].split('SPM')[-1], - version_dict['release'])) - return version_dict + info_dict = Info.getinfo( + matlab_cmd=self.inputs.matlab_cmd, + paths=self.inputs.paths, + use_mcr=self.inputs.use_mcr + ) + if info_dict: + return '%s.%s' % ( + info_dict['name'].split('SPM')[-1], + info_dict['release']) @property def jobtype(self): diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py index ddf35ef449..8ddc06a9b0 100644 --- a/nipype/interfaces/spm/model.py +++ b/nipype/interfaces/spm/model.py @@ -32,7 +32,7 @@ scans_for_fnames, ImageFileSPM) __docformat__ = 'restructuredtext' -logger = logging.getLogger('interface') +iflogger = logging.getLogger('interface') class Level1DesignInputSpec(SPMCommandInputSpec): diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py index c5064f2f59..dfcfe7744c 100644 --- a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -6,7 +6,8 @@ def test_Analyze2nii_inputs(): input_map = dict(analyze_file=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), @@ -26,7 +27,8 @@ def test_Analyze2nii_inputs(): def test_Analyze2nii_outputs(): - output_map = dict(ignore_exception=dict(nohash=True, + output_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py index 5847ad98fe..36f32cc0c2 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -7,7 +7,8 @@ def test_ApplyDeformations_inputs(): input_map = dict(deformation_field=dict(field='comp{1}.def', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='fnames', diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py index 849c5580db..ffe254c824 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -12,7 +12,8 @@ def test_ApplyInverseDeformation_inputs(): deformation_field=dict(field='comp{1}.inv.comp{1}.def', xor=['deformation'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='fnames', diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py index 8100981604..b1fc483046 100644 --- a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -4,7 +4,8 @@ def test_ApplyTransform_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(copyfile=True, diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py index 04bae31f0d..46caa23009 100644 --- a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -4,7 +4,8 @@ def test_CalcCoregAffine_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), invmat=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py index 468ad7e3e3..3c1ab4b50c 100644 --- a/nipype/interfaces/spm/tests/test_auto_Coregister.py +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -11,7 +11,8 @@ def test_Coregister_inputs(): ), fwhm=dict(field='eoptions.fwhm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), jobtype=dict(usedefault=True, diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py index c1a8d34725..f188d42e9e 100644 --- a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -8,7 +8,8 @@ def test_CreateWarped_inputs(): field='crt_warped.flowfields', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_files=dict(copyfile=False, diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py index c7197a586f..345c2b0b8c 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTEL.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -4,7 +4,8 @@ def test_DARTEL_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_files=dict(copyfile=False, diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py index d3e7815756..1743a5d791 100644 --- a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -15,7 +15,8 @@ def test_DARTELNorm2MNI_inputs(): ), fwhm=dict(field='mni_norm.fwhm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py index dff4b04d06..48abb3c646 100644 --- a/nipype/interfaces/spm/tests/test_auto_DicomImport.py +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -10,7 +10,8 @@ def test_DicomImport_inputs(): icedims=dict(field='convopts.icedims', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='data', diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py index 76d4a25bf5..de1ac9ca63 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -11,7 +11,8 @@ def test_EstimateContrast_inputs(): ), group_contrast=dict(xor=['use_derivs'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py index 703c97c6fc..b9636e44ed 100644 --- a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -8,7 +8,8 @@ def test_EstimateModel_inputs(): mandatory=True, ), flags=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py index eaa4272d8d..34de3b2efe 100644 --- a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -19,7 +19,8 @@ def test_FactorialDesign_inputs(): ), global_normalization=dict(field='globalm.glonorm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py index 908672beb7..5d0a14f5c5 100644 --- a/nipype/interfaces/spm/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -11,7 +11,8 @@ def test_Level1Design_inputs(): ), global_intensity_normalization=dict(field='global', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), interscan_interval=dict(field='timing.RT', diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py index 54ec275450..6c8a465865 100644 --- a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -19,7 +19,8 @@ def test_MultipleRegressionDesign_inputs(): ), global_normalization=dict(field='globalm.glonorm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='des.mreg.scans', diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py index 4c77c5d203..6a2b9e1334 100644 --- a/nipype/interfaces/spm/tests/test_auto_NewSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -12,7 +12,8 @@ def test_NewSegment_inputs(): ), channel_info=dict(field='channel', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py index f6cb425d6a..7aa7949a11 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -11,7 +11,8 @@ def test_Normalize_inputs(): apply_to_files=dict(copyfile=True, field='subj.resample', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), jobtype=dict(usedefault=True, diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py index 9d537e34b1..74bf60132a 100644 --- a/nipype/interfaces/spm/tests/test_auto_Normalize12.py +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -18,7 +18,8 @@ def test_Normalize12_inputs(): mandatory=True, xor=['image_to_align', 'tpm'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), image_to_align=dict(copyfile=True, diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py index 1148cbf9fa..323660a95d 100644 --- a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -19,7 +19,8 @@ def test_OneSampleTTestDesign_inputs(): ), global_normalization=dict(field='globalm.glonorm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='des.t1.scans', diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py index f9cce92a37..d2ab89aed8 100644 --- a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -23,7 +23,8 @@ def test_PairedTTestDesign_inputs(): ), grand_mean_scaling=dict(field='des.pt.gmsca', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py index 6c54c4a945..ef1989bc19 100644 --- a/nipype/interfaces/spm/tests/test_auto_Realign.py +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -6,7 +6,8 @@ def test_Realign_inputs(): input_map = dict(fwhm=dict(field='eoptions.fwhm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(copyfile=True, diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py index 4a433e5b3d..a2f10d727c 100644 --- a/nipype/interfaces/spm/tests/test_auto_Reslice.py +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -4,7 +4,8 @@ def test_Reslice_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py index 06e8f2e607..4bca83c6cf 100644 --- a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -6,7 +6,8 @@ def test_ResliceToReference_inputs(): input_map = dict(bounding_box=dict(field='comp{2}.idbbvox.bb', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(field='fnames', diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py index ed841142dd..0f36f719d7 100644 --- a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -4,7 +4,8 @@ def test_SPMCommand_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py index 739a4e1ca9..b18f405de1 100644 --- a/nipype/interfaces/spm/tests/test_auto_Segment.py +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -22,7 +22,8 @@ def test_Segment_inputs(): ), gm_output_type=dict(field='output.GM', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), mask_image=dict(field='opts.msk', diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py index 739d0157a1..357096a402 100644 --- a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -4,7 +4,8 @@ def test_SliceTiming_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(copyfile=False, diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py index 378f504328..3f0426abb0 100644 --- a/nipype/interfaces/spm/tests/test_auto_Smooth.py +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -8,7 +8,8 @@ def test_Smooth_inputs(): ), fwhm=dict(field='fwhm', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), implicit_masking=dict(field='im', diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py index e30b163857..017b1c5325 100644 --- a/nipype/interfaces/spm/tests/test_auto_Threshold.py +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -16,7 +16,8 @@ def test_Threshold_inputs(): ), height_threshold_type=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py index d73cd4f98f..7cd496ce94 100644 --- a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -10,7 +10,8 @@ def test_ThresholdStatistics_inputs(): ), height_threshold=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py index cb19a35f62..f38f8023be 100644 --- a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -27,7 +27,8 @@ def test_TwoSampleTTestDesign_inputs(): group2_files=dict(field='des.t2.scans2', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), matlab_cmd=dict(), diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py index f02579b66c..d61f7c623f 100644 --- a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -56,7 +56,8 @@ def test_VBMSegment_inputs(): gm_normalized=dict(field='estwrite.output.GM.warped', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_files=dict(copyfile=False, diff --git a/nipype/interfaces/spm/tests/test_base.py b/nipype/interfaces/spm/tests/test_base.py index d1c517a0d3..57d0d88c21 100644 --- a/nipype/interfaces/spm/tests/test_base.py +++ b/nipype/interfaces/spm/tests/test_base.py @@ -16,12 +16,8 @@ from nipype.interfaces.spm.base import SPMCommandInputSpec from nipype.interfaces.base import traits -try: - matlab_cmd = os.environ['MATLABCMD'] -except: - matlab_cmd = 'matlab' - -mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) +mlab.MatlabCommand.set_default_matlab_cmd( + os.getenv('MATLABCMD', 'matlab')) def test_scan_for_fnames(create_files_in_directory): @@ -35,10 +31,10 @@ def test_scan_for_fnames(create_files_in_directory): if not save_time: @pytest.mark.skipif(no_spm(), reason="spm is not installed") def test_spm_path(): - spm_path = spm.Info.version()['path'] + spm_path = spm.Info.path() if spm_path is not None: assert isinstance(spm_path, (str, bytes)) - assert 'spm' in spm_path + assert 'spm' in spm_path.lower() def test_use_mfile(): diff --git a/nipype/interfaces/spm/tests/test_model.py b/nipype/interfaces/spm/tests/test_model.py index e9e8a48849..307c4f1786 100644 --- a/nipype/interfaces/spm/tests/test_model.py +++ b/nipype/interfaces/spm/tests/test_model.py @@ -6,12 +6,8 @@ import nipype.interfaces.spm.model as spm import nipype.interfaces.matlab as mlab -try: - matlab_cmd = os.environ['MATLABCMD'] -except: - matlab_cmd = 'matlab' - -mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) +mlab.MatlabCommand.set_default_matlab_cmd( + os.getenv('MATLABCMD', 'matlab')) def test_level1design(): diff --git a/nipype/interfaces/spm/tests/test_preprocess.py b/nipype/interfaces/spm/tests/test_preprocess.py index 4bf86285ad..f167ad521a 100644 --- a/nipype/interfaces/spm/tests/test_preprocess.py +++ b/nipype/interfaces/spm/tests/test_preprocess.py @@ -10,12 +10,8 @@ from nipype.interfaces.spm import no_spm import nipype.interfaces.matlab as mlab -try: - matlab_cmd = os.environ['MATLABCMD'] -except: - matlab_cmd = 'matlab' - -mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) +mlab.MatlabCommand.set_default_matlab_cmd( + os.getenv('MATLABCMD', 'matlab')) def test_slicetiming(): @@ -88,7 +84,7 @@ def test_normalize12_list_outputs(create_files_in_directory): @pytest.mark.skipif(no_spm(), reason="spm is not installed") def test_segment(): - if spm.Info.version()['name'] == "SPM12": + if spm.Info.name() == "SPM12": assert spm.Segment()._jobtype == 'tools' assert spm.Segment()._jobname == 'oldseg' else: @@ -98,7 +94,7 @@ def test_segment(): @pytest.mark.skipif(no_spm(), reason="spm is not installed") def test_newsegment(): - if spm.Info.version()['name'] == "SPM12": + if spm.Info.name() == "SPM12": assert spm.NewSegment()._jobtype == 'spatial' assert spm.NewSegment()._jobname == 'preproc' else: diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py new file mode 100644 index 0000000000..36d02d5fe9 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..bids_utils import BIDSDataGrabber + + +def test_BIDSDataGrabber_inputs(): + input_map = dict(base_dir=dict(mandatory=True, + ), + output_query=dict(), + raise_on_empty=dict(usedefault=True, + ), + return_type=dict(usedefault=True, + ), + ) + inputs = BIDSDataGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +def test_BIDSDataGrabber_outputs(): + output_map = dict() + outputs = BIDSDataGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py index b67b83bf5f..ec4151cc8f 100644 --- a/nipype/interfaces/tests/test_auto_Bru2.py +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -15,7 +15,8 @@ def test_Bru2_inputs(): ), force_conversion=dict(argstr='-f', ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), input_dir=dict(argstr='%s', @@ -25,7 +26,8 @@ def test_Bru2_inputs(): output_filename=dict(argstr='-o %s', genfile=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Bru2.input_spec() diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py index 2acfbfbaab..3abbf26110 100644 --- a/nipype/interfaces/tests/test_auto_C3dAffineTool.py +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -12,7 +12,8 @@ def test_C3dAffineTool_inputs(): fsl2ras=dict(argstr='-fsl2ras', position=4, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), itk_transform=dict(argstr='-oitk %s', @@ -25,7 +26,8 @@ def test_C3dAffineTool_inputs(): source_file=dict(argstr='-src %s', position=2, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), transform_file=dict(argstr='%s', position=3, diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py index f402bdc53d..82b74b6017 100644 --- a/nipype/interfaces/tests/test_auto_DataFinder.py +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -4,7 +4,8 @@ def test_DataFinder_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ignore_regexes=dict(), diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py index 5795ce969d..8d95bf9637 100644 --- a/nipype/interfaces/tests/test_auto_DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -5,7 +5,8 @@ def test_DataGrabber_inputs(): input_map = dict(base_directory=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), raise_on_empty=dict(usedefault=True, diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py index 0ea2b71a6d..7c739969a7 100644 --- a/nipype/interfaces/tests/test_auto_DataSink.py +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -11,7 +11,8 @@ def test_DataSink_inputs(): container=dict(), creds_path=dict(), encrypt_bucket_keys=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), local_copy=dict(), diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py index eb155ff975..20a29004cb 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2nii.py +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -33,7 +33,8 @@ def test_Dcm2nii_inputs(): id_in_filename=dict(argstr='-i', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), nii_output=dict(argstr='-n', @@ -67,7 +68,8 @@ def test_Dcm2nii_inputs(): spm_analyze=dict(argstr='-s', xor=['nii_output'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Dcm2nii.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py index a396853b70..3d496f3aba 100644 --- a/nipype/interfaces/tests/test_auto_Dcm2niix.py +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -21,7 +21,8 @@ def test_Dcm2niix_inputs(): has_private=dict(argstr='-t', usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), merge_imgs=dict(argstr='-m', @@ -47,7 +48,8 @@ def test_Dcm2niix_inputs(): position=-1, xor=['source_dir'], ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), verbose=dict(argstr='-v', usedefault=True, diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py index 1af0874410..a99ddb9d4f 100644 --- a/nipype/interfaces/tests/test_auto_FreeSurferSource.py +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -6,7 +6,8 @@ def test_FreeSurferSource_inputs(): input_map = dict(hemi=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), subject_id=dict(mandatory=True, diff --git a/nipype/interfaces/tests/test_auto_IOBase.py b/nipype/interfaces/tests/test_auto_IOBase.py index 02e45692a9..d8db29919a 100644 --- a/nipype/interfaces/tests/test_auto_IOBase.py +++ b/nipype/interfaces/tests/test_auto_IOBase.py @@ -4,7 +4,8 @@ def test_IOBase_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py index 3a93359459..d6458e1e8f 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -5,7 +5,8 @@ def test_JSONFileGrabber_inputs(): input_map = dict(defaults=dict(), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(), diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py index 32b51c9dc5..1d569e76d4 100644 --- a/nipype/interfaces/tests/test_auto_JSONFileSink.py +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -6,7 +6,8 @@ def test_JSONFileSink_inputs(): input_map = dict(_outputs=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_dict=dict(usedefault=True, diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py index 6801f40353..71a5587767 100644 --- a/nipype/interfaces/tests/test_auto_MatlabCommand.py +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -9,7 +9,8 @@ def test_MatlabCommand_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), logfile=dict(argstr='-logfile %s', @@ -38,7 +39,8 @@ def test_MatlabCommand_inputs(): single_comp_thread=dict(argstr='-singleCompThread', nohash=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uses_mcr=dict(nohash=True, xor=['nodesktop', 'nosplash', 'single_comp_thread'], diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py index 7abd5878a0..04c314e2e2 100644 --- a/nipype/interfaces/tests/test_auto_MeshFix.py +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -38,7 +38,8 @@ def test_MeshFix_inputs(): finetuning_substeps=dict(argstr='%d', requires=['finetuning_distance'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file1=dict(argstr='%s', @@ -78,7 +79,8 @@ def test_MeshFix_inputs(): ), set_intersections_to_one=dict(argstr='--intersect', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), uniform_remeshing_steps=dict(argstr='-u %d', requires=['uniform_remeshing_vertices'], diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py index 1218d8fac0..80bf344e63 100644 --- a/nipype/interfaces/tests/test_auto_MySQLSink.py +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -14,7 +14,8 @@ def test_MySQLSink_inputs(): usedefault=True, xor=['config'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), password=dict(), diff --git a/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py index 773e7e24a3..0846313121 100644 --- a/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py +++ b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py @@ -4,7 +4,8 @@ def test_NiftiGeneratorBase_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py index 9c62a83a23..6dbac76fa2 100644 --- a/nipype/interfaces/tests/test_auto_PETPVC.py +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -23,7 +23,8 @@ def test_PETPVC_inputs(): fwhm_z=dict(argstr='-z %.4f', mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-i %s', @@ -45,7 +46,8 @@ def test_PETPVC_inputs(): ), stop_crit=dict(argstr='-a %.4f', ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = PETPVC.input_spec() diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py index 7debd4dd84..e2edaf37c6 100644 --- a/nipype/interfaces/tests/test_auto_Quickshear.py +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -12,7 +12,8 @@ def test_Quickshear_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='%s', @@ -29,7 +30,8 @@ def test_Quickshear_inputs(): name_template='%s_defaced', position=3, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Quickshear.input_spec() diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py index a3c918c465..d5a2536eb8 100644 --- a/nipype/interfaces/tests/test_auto_S3DataGrabber.py +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -10,7 +10,8 @@ def test_S3DataGrabber_inputs(): ), bucket_path=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), local_directory=dict(), diff --git a/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py index c7aee569d5..2012b9d9e1 100644 --- a/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py @@ -12,7 +12,8 @@ def test_SEMLikeCommandLine_inputs(): ignore_exception=dict(nohash=True, usedefault=True, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SEMLikeCommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py index 74c9caaa46..e7319e4d29 100644 --- a/nipype/interfaces/tests/test_auto_SQLiteSink.py +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -6,7 +6,8 @@ def test_SQLiteSink_inputs(): input_map = dict(database_file=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), table_name=dict(mandatory=True, diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py index 99e71d1ffe..1c350203e6 100644 --- a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -10,7 +10,8 @@ def test_SSHDataGrabber_inputs(): ), hostname=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), password=dict(), diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py index da119bfcf6..12ca6ac859 100644 --- a/nipype/interfaces/tests/test_auto_SelectFiles.py +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -7,7 +7,8 @@ def test_SelectFiles_inputs(): input_map = dict(base_directory=dict(), force_lists=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), raise_on_empty=dict(usedefault=True, diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py index 4f101450b0..6f314f2f11 100644 --- a/nipype/interfaces/tests/test_auto_SignalExtraction.py +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -8,7 +8,8 @@ def test_SignalExtraction_inputs(): ), detrend=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(mandatory=True, diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py index 891eff2394..26a4700a0e 100644 --- a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -9,11 +9,13 @@ def test_SlicerCommandLine_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), module=dict(), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = SlicerCommandLine.input_spec() diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py index 286c8b2ca9..e4ce926c6e 100644 --- a/nipype/interfaces/tests/test_auto_XNATSink.py +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -14,7 +14,8 @@ def test_XNATSink_inputs(): ), experiment_id=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), project_id=dict(mandatory=True, diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py index b399d143aa..afd02e6c9d 100644 --- a/nipype/interfaces/tests/test_auto_XNATSource.py +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -8,7 +8,8 @@ def test_XNATSource_inputs(): config=dict(mandatory=True, xor=['server'], ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), pwd=dict(), diff --git a/nipype/interfaces/tests/test_base.py b/nipype/interfaces/tests/test_base.py deleted file mode 100644 index 34d1134e42..0000000000 --- a/nipype/interfaces/tests/test_base.py +++ /dev/null @@ -1,736 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -from __future__ import print_function, unicode_literals -from future import standard_library -standard_library.install_aliases() - -from builtins import open, str, bytes -import os -import warnings -import simplejson as json - -import pytest -from nipype.testing import example_data - -import nipype.interfaces.base as nib -from nipype.utils.filemanip import split_filename -from nipype.interfaces.base import Undefined, config -import traits.api as traits - -@pytest.mark.parametrize("args", [ - {}, - {'a' : 1, 'b' : [2, 3]} -]) -def test_bunch(args): - b = nib.Bunch(**args) - assert b.__dict__ == args - - -def test_bunch_attribute(): - b = nib.Bunch(a=1, b=[2, 3], c=None) - assert b.a == 1 - assert b.b == [2, 3] - assert b.c == None - - -def test_bunch_repr(): - b = nib.Bunch(b=2, c=3, a=dict(n=1, m=2)) - assert repr(b) == "Bunch(a={'m': 2, 'n': 1}, b=2, c=3)" - - -def test_bunch_methods(): - b = nib.Bunch(a=2) - b.update(a=3) - newb = b.dictcopy() - assert b.a == 3 - assert b.get('a') == 3 - assert b.get('badkey', 'otherthing') == 'otherthing' - assert b != newb - assert type(dict()) == type(newb) - assert newb['a'] == 3 - - -def test_bunch_hash(): - # NOTE: Since the path to the json file is included in the Bunch, - # the hash will be unique to each machine. - pth = os.path.split(os.path.abspath(__file__))[0] - json_pth = os.path.join(pth, 'realign_json.json') - b = nib.Bunch(infile=json_pth, - otherthing='blue', - yat=True) - newbdict, bhash = b._get_bunch_hash() - assert bhash == 'ddcc7b4ec5675df8cf317a48bd1857fa' - # Make sure the hash stored in the json file for `infile` is correct. - jshash = nib.md5() - with open(json_pth, 'r') as fp: - jshash.update(fp.read().encode('utf-8')) - assert newbdict['infile'][0][1] == jshash.hexdigest() - assert newbdict['yat'] == True - - -@pytest.fixture(scope="module") -def setup_file(request, tmpdir_factory): - tmp_dir = str(tmpdir_factory.mktemp('files')) - tmp_infile = os.path.join(tmp_dir, 'foo.txt') - with open(tmp_infile, 'w') as fp: - fp.writelines([u'123456789']) - - os.chdir(tmp_dir) - - return tmp_infile - - -def test_TraitedSpec(): - assert nib.TraitedSpec().get_hashval() - assert nib.TraitedSpec().__repr__() == '\n\n' - - class spec(nib.TraitedSpec): - foo = nib.traits.Int - goo = nib.traits.Float(usedefault=True) - - assert spec().foo == Undefined - assert spec().goo == 0.0 - specfunc = lambda x: spec(hoo=x) - with pytest.raises(nib.traits.TraitError): specfunc(1) - infields = spec(foo=1) - hashval = ([('foo', 1), ('goo', '0.0000000000')], 'e89433b8c9141aa0fda2f8f4d662c047') - assert infields.get_hashval() == hashval - assert infields.__repr__() == '\nfoo = 1\ngoo = 0.0\n' - - -@pytest.mark.skip -def test_TraitedSpec_dynamic(): - from pickle import dumps, loads - a = nib.BaseTraitedSpec() - a.add_trait('foo', nib.traits.Int) - a.foo = 1 - assign_a = lambda: setattr(a, 'foo', 'a') - with pytest.raises(Exception): assign_a - pkld_a = dumps(a) - unpkld_a = loads(pkld_a) - assign_a_again = lambda: setattr(unpkld_a, 'foo', 'a') - with pytest.raises(Exception): assign_a_again - - -def test_TraitedSpec_logic(): - class spec3(nib.TraitedSpec): - _xor_inputs = ('foo', 'bar') - - foo = nib.traits.Int(xor=_xor_inputs, - desc='foo or bar, not both') - bar = nib.traits.Int(xor=_xor_inputs, - desc='bar or foo, not both') - kung = nib.traits.Float(requires=('foo',), - position=0, - desc='kung foo') - - class out3(nib.TraitedSpec): - output = nib.traits.Int - - class MyInterface(nib.BaseInterface): - input_spec = spec3 - output_spec = out3 - - myif = MyInterface() - # NOTE_dj, FAIL: I don't get a TypeError, only a UserWarning - #with pytest.raises(TypeError): - # setattr(myif.inputs, 'kung', 10.0) - myif.inputs.foo = 1 - assert myif.inputs.foo == 1 - set_bar = lambda: setattr(myif.inputs, 'bar', 1) - with pytest.raises(IOError): set_bar() - assert myif.inputs.foo == 1 - myif.inputs.kung = 2 - assert myif.inputs.kung == 2.0 - - -def test_deprecation(): - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - - class DeprecationSpec1(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='0.1') - spec_instance = DeprecationSpec1() - set_foo = lambda: setattr(spec_instance, 'foo', 1) - with pytest.raises(nib.TraitError): set_foo() - assert len(w) == 0, 'no warnings, just errors' - - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - - class DeprecationSpec2(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='100', new_name='bar') - spec_instance = DeprecationSpec2() - set_foo = lambda: setattr(spec_instance, 'foo', 1) - with pytest.raises(nib.TraitError): set_foo() - assert len(w) == 0, 'no warnings, just errors' - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - - class DeprecationSpec3(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='1000', new_name='bar') - bar = nib.traits.Int() - spec_instance = DeprecationSpec3() - not_raised = True - try: - spec_instance.foo = 1 - except nib.TraitError: - not_raised = False - assert not_raised - assert len(w) == 1, 'deprecated warning 1 %s' % [w1.message for w1 in w] - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - - class DeprecationSpec3(nib.TraitedSpec): - foo = nib.traits.Int(deprecated='1000', new_name='bar') - bar = nib.traits.Int() - spec_instance = DeprecationSpec3() - not_raised = True - try: - spec_instance.foo = 1 - except nib.TraitError: - not_raised = False - assert not_raised - assert spec_instance.foo == Undefined - assert spec_instance.bar == 1 - assert len(w) == 1, 'deprecated warning 2 %s' % [w1.message for w1 in w] - - -def test_namesource(setup_file): - tmp_infile = setup_file - tmpd, nme, ext = split_filename(tmp_infile) - - class spec2(nib.CommandLineInputSpec): - moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", - position=2) - doo = nib.File(exists=True, argstr="%s", position=1) - goo = traits.Int(argstr="%d", position=4) - poo = nib.File(name_source=['goo'], hash_files=False, argstr="%s", - position=3) - - class TestName(nib.CommandLine): - _cmd = "mycommand" - input_spec = spec2 - testobj = TestName() - testobj.inputs.doo = tmp_infile - testobj.inputs.goo = 99 - assert '%s_generated' % nme in testobj.cmdline - assert '%d_generated' % testobj.inputs.goo in testobj.cmdline - testobj.inputs.moo = "my_%s_template" - assert 'my_%s_template' % nme in testobj.cmdline - - -def test_chained_namesource(setup_file): - tmp_infile = setup_file - tmpd, nme, ext = split_filename(tmp_infile) - - class spec2(nib.CommandLineInputSpec): - doo = nib.File(exists=True, argstr="%s", position=1) - moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", - position=2, name_template='%s_mootpl') - poo = nib.File(name_source=['moo'], hash_files=False, - argstr="%s", position=3) - - class TestName(nib.CommandLine): - _cmd = "mycommand" - input_spec = spec2 - - testobj = TestName() - testobj.inputs.doo = tmp_infile - res = testobj.cmdline - assert '%s' % tmp_infile in res - assert '%s_mootpl ' % nme in res - assert '%s_mootpl_generated' % nme in res - - -def test_cycle_namesource1(setup_file): - tmp_infile = setup_file - tmpd, nme, ext = split_filename(tmp_infile) - - class spec3(nib.CommandLineInputSpec): - moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", - position=1, name_template='%s_mootpl') - poo = nib.File(name_source=['moo'], hash_files=False, - argstr="%s", position=2) - doo = nib.File(name_source=['poo'], hash_files=False, - argstr="%s", position=3) - - class TestCycle(nib.CommandLine): - _cmd = "mycommand" - input_spec = spec3 - - # Check that an exception is raised - to0 = TestCycle() - not_raised = True - try: - to0.cmdline - except nib.NipypeInterfaceError: - not_raised = False - assert not not_raised - - -def test_cycle_namesource2(setup_file): - tmp_infile = setup_file - tmpd, nme, ext = split_filename(tmp_infile) - - class spec3(nib.CommandLineInputSpec): - moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s", - position=1, name_template='%s_mootpl') - poo = nib.File(name_source=['moo'], hash_files=False, - argstr="%s", position=2) - doo = nib.File(name_source=['poo'], hash_files=False, - argstr="%s", position=3) - - class TestCycle(nib.CommandLine): - _cmd = "mycommand" - input_spec = spec3 - - # Check that loop can be broken by setting one of the inputs - to1 = TestCycle() - to1.inputs.poo = tmp_infile - - not_raised = True - try: - res = to1.cmdline - except nib.NipypeInterfaceError: - not_raised = False - print(res) - - assert not_raised - assert '%s' % tmp_infile in res - assert '%s_generated' % nme in res - assert '%s_generated_mootpl' % nme in res - - -def test_TraitedSpec_withFile(setup_file): - tmp_infile = setup_file - tmpd, nme = os.path.split(tmp_infile) - assert os.path.exists(tmp_infile) - - class spec2(nib.TraitedSpec): - moo = nib.File(exists=True) - doo = nib.traits.List(nib.File(exists=True)) - infields = spec2(moo=tmp_infile, doo=[tmp_infile]) - hashval = infields.get_hashval(hash_method='content') - assert hashval[1] == 'a00e9ee24f5bfa9545a515b7a759886b' - - -def test_TraitedSpec_withNoFileHashing(setup_file): - tmp_infile = setup_file - tmpd, nme = os.path.split(tmp_infile) - assert os.path.exists(tmp_infile) - - class spec2(nib.TraitedSpec): - moo = nib.File(exists=True, hash_files=False) - doo = nib.traits.List(nib.File(exists=True)) - infields = spec2(moo=nme, doo=[tmp_infile]) - hashval = infields.get_hashval(hash_method='content') - assert hashval[1] == '8da4669ff5d72f670a46ea3e7a203215' - - class spec3(nib.TraitedSpec): - moo = nib.File(exists=True, name_source="doo") - doo = nib.traits.List(nib.File(exists=True)) - infields = spec3(moo=nme, doo=[tmp_infile]) - hashval1 = infields.get_hashval(hash_method='content') - - class spec4(nib.TraitedSpec): - moo = nib.File(exists=True) - doo = nib.traits.List(nib.File(exists=True)) - infields = spec4(moo=nme, doo=[tmp_infile]) - hashval2 = infields.get_hashval(hash_method='content') - assert hashval1[1] != hashval2[1] - - -def test_Interface(): - assert nib.Interface.input_spec == None - assert nib.Interface.output_spec == None - with pytest.raises(NotImplementedError): nib.Interface() - with pytest.raises(NotImplementedError): nib.Interface.help() - with pytest.raises(NotImplementedError): nib.Interface._inputs_help() - with pytest.raises(NotImplementedError): nib.Interface._outputs_help() - with pytest.raises(NotImplementedError): nib.Interface._outputs() - - class DerivedInterface(nib.Interface): - def __init__(self): - pass - - nif = DerivedInterface() - with pytest.raises(NotImplementedError): nif.run() - with pytest.raises(NotImplementedError): nif.aggregate_outputs() - with pytest.raises(NotImplementedError): nif._list_outputs() - with pytest.raises(NotImplementedError): nif._get_filecopy_info() - - -def test_BaseInterface(): - assert nib.BaseInterface.help() == None - assert nib.BaseInterface._get_filecopy_info() == [] - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - goo = nib.traits.Int(desc='a random int', mandatory=True) - moo = nib.traits.Int(desc='a random int', mandatory=False) - hoo = nib.traits.Int(desc='a random int', usedefault=True) - zoo = nib.File(desc='a file', copyfile=False) - woo = nib.File(desc='a file', copyfile=True) - - class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - - class DerivedInterface(nib.BaseInterface): - input_spec = InputSpec - - assert DerivedInterface.help() == None - assert 'moo' in ''.join(DerivedInterface._inputs_help()) - assert DerivedInterface()._outputs() == None - assert DerivedInterface._get_filecopy_info()[0]['key'] == 'woo' - assert DerivedInterface._get_filecopy_info()[0]['copy'] - assert DerivedInterface._get_filecopy_info()[1]['key'] == 'zoo' - assert not DerivedInterface._get_filecopy_info()[1]['copy'] - assert DerivedInterface().inputs.foo == Undefined - with pytest.raises(ValueError): DerivedInterface()._check_mandatory_inputs() - assert DerivedInterface(goo=1)._check_mandatory_inputs() == None - with pytest.raises(ValueError): DerivedInterface().run() - with pytest.raises(NotImplementedError): DerivedInterface(goo=1).run() - - class DerivedInterface2(DerivedInterface): - output_spec = OutputSpec - - def _run_interface(self, runtime): - return runtime - - assert DerivedInterface2.help() == None - assert DerivedInterface2()._outputs().foo == Undefined - with pytest.raises(NotImplementedError): DerivedInterface2(goo=1).run() - - default_inpu_spec = nib.BaseInterface.input_spec - nib.BaseInterface.input_spec = None - with pytest.raises(Exception): nib.BaseInterface() - nib.BaseInterface.input_spec = default_inpu_spec - - -def test_BaseInterface_load_save_inputs(tmpdir): - tmp_json = os.path.join(str(tmpdir), 'settings.json') - - class InputSpec(nib.TraitedSpec): - input1 = nib.traits.Int() - input2 = nib.traits.Float() - input3 = nib.traits.Bool() - input4 = nib.traits.Str() - - class DerivedInterface(nib.BaseInterface): - input_spec = InputSpec - - def __init__(self, **inputs): - super(DerivedInterface, self).__init__(**inputs) - - inputs_dict = {'input1': 12, 'input3': True, - 'input4': 'some string'} - bif = DerivedInterface(**inputs_dict) - bif.save_inputs_to_json(tmp_json) - bif2 = DerivedInterface() - bif2.load_inputs_from_json(tmp_json) - assert bif2.inputs.get_traitsfree() == inputs_dict - - bif3 = DerivedInterface(from_file=tmp_json) - assert bif3.inputs.get_traitsfree() == inputs_dict - - inputs_dict2 = inputs_dict.copy() - inputs_dict2.update({'input4': 'some other string'}) - bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4']) - assert bif4.inputs.get_traitsfree() == inputs_dict2 - - bif5 = DerivedInterface(input4=inputs_dict2['input4']) - bif5.load_inputs_from_json(tmp_json, overwrite=False) - assert bif5.inputs.get_traitsfree() == inputs_dict2 - - bif6 = DerivedInterface(input4=inputs_dict2['input4']) - bif6.load_inputs_from_json(tmp_json) - assert bif6.inputs.get_traitsfree() == inputs_dict - - # test get hashval in a complex interface - from nipype.interfaces.ants import Registration - settings = example_data(example_data('smri_ants_registration_settings.json')) - with open(settings) as setf: - data_dict = json.load(setf) - - tsthash = Registration() - tsthash.load_inputs_from_json(settings) - assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree()) - - tsthash2 = Registration(from_file=settings) - assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree()) - - _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp') - assert 'ec5755e07287e04a4b409e03b77a517c' == hashvalue - - -def test_input_version(): - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - obj = DerivedInterface1() - obj._check_version_requirements(obj.inputs) - - config.set('execution', 'stop_on_unknown_version', True) - - with pytest.raises(Exception): obj._check_version_requirements(obj.inputs) - - config.set_default_config() - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - _version = '0.8' - obj = DerivedInterface1() - obj.inputs.foo = 1 - with pytest.raises(Exception): obj._check_version_requirements() - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - _version = '0.10' - obj = DerivedInterface1() - obj._check_version_requirements(obj.inputs) - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - _version = '0.9' - obj = DerivedInterface1() - obj.inputs.foo = 1 - not_raised = True - obj._check_version_requirements(obj.inputs) - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', max_ver='0.7') - - class DerivedInterface2(nib.BaseInterface): - input_spec = InputSpec - _version = '0.8' - obj = DerivedInterface2() - obj.inputs.foo = 1 - with pytest.raises(Exception): obj._check_version_requirements() - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', max_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - _version = '0.9' - obj = DerivedInterface1() - obj.inputs.foo = 1 - not_raised = True - obj._check_version_requirements(obj.inputs) - - -def test_output_version(): - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - - class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.9') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - output_spec = OutputSpec - _version = '0.10' - obj = DerivedInterface1() - assert obj._check_version_requirements(obj._outputs()) == [] - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - - class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.11') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - output_spec = OutputSpec - _version = '0.10' - obj = DerivedInterface1() - assert obj._check_version_requirements(obj._outputs()) == ['foo'] - - class InputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int') - - class OutputSpec(nib.TraitedSpec): - foo = nib.traits.Int(desc='a random int', min_ver='0.11') - - class DerivedInterface1(nib.BaseInterface): - input_spec = InputSpec - output_spec = OutputSpec - _version = '0.10' - - def _run_interface(self, runtime): - return runtime - - def _list_outputs(self): - return {'foo': 1} - obj = DerivedInterface1() - with pytest.raises(KeyError): obj.run() - - -def test_Commandline(): - with pytest.raises(Exception): nib.CommandLine() - ci = nib.CommandLine(command='which') - assert ci.cmd == 'which' - assert ci.inputs.args == Undefined - ci2 = nib.CommandLine(command='which', args='ls') - assert ci2.cmdline == 'which ls' - ci3 = nib.CommandLine(command='echo') - ci3.inputs.environ = {'MYENV': 'foo'} - res = ci3.run() - assert res.runtime.environ['MYENV'] == 'foo' - assert res.outputs == None - - class CommandLineInputSpec1(nib.CommandLineInputSpec): - foo = nib.Str(argstr='%s', desc='a str') - goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0) - hoo = nib.traits.List(argstr='-l %s', desc='a list') - moo = nib.traits.List(argstr='-i %d...', desc='a repeated list', - position=-1) - noo = nib.traits.Int(argstr='-x %d', desc='an int') - roo = nib.traits.Str(desc='not on command line') - soo = nib.traits.Bool(argstr="-soo") - nib.CommandLine.input_spec = CommandLineInputSpec1 - ci4 = nib.CommandLine(command='cmd') - ci4.inputs.foo = 'foo' - ci4.inputs.goo = True - ci4.inputs.hoo = ['a', 'b'] - ci4.inputs.moo = [1, 2, 3] - ci4.inputs.noo = 0 - ci4.inputs.roo = 'hello' - ci4.inputs.soo = False - cmd = ci4._parse_inputs() - assert cmd[0] == '-g' - assert cmd[-1] == '-i 1 -i 2 -i 3' - assert 'hello' not in ' '.join(cmd) - assert '-soo' not in ' '.join(cmd) - ci4.inputs.soo = True - cmd = ci4._parse_inputs() - assert '-soo' in ' '.join(cmd) - - class CommandLineInputSpec2(nib.CommandLineInputSpec): - foo = nib.File(argstr='%s', desc='a str', genfile=True) - nib.CommandLine.input_spec = CommandLineInputSpec2 - ci5 = nib.CommandLine(command='cmd') - with pytest.raises(NotImplementedError): ci5._parse_inputs() - - class DerivedClass(nib.CommandLine): - input_spec = CommandLineInputSpec2 - - def _gen_filename(self, name): - return 'filename' - - ci6 = DerivedClass(command='cmd') - assert ci6._parse_inputs()[0] == 'filename' - nib.CommandLine.input_spec = nib.CommandLineInputSpec - - -def test_Commandline_environ(): - from nipype import config - config.set_default_config() - ci3 = nib.CommandLine(command='echo') - res = ci3.run() - assert res.runtime.environ['DISPLAY'] == ':1' - config.set('execution', 'display_variable', ':3') - res = ci3.run() - assert not 'DISPLAY' in ci3.inputs.environ - assert res.runtime.environ['DISPLAY'] == ':3' - ci3.inputs.environ = {'DISPLAY': ':2'} - res = ci3.run() - assert res.runtime.environ['DISPLAY'] == ':2' - - -def test_CommandLine_output(setup_file): - tmp_infile = setup_file - tmpd, name = os.path.split(tmp_infile) - assert os.path.exists(tmp_infile) - ci = nib.CommandLine(command='ls -l') - ci.inputs.terminal_output = 'allatonce' - res = ci.run() - assert res.runtime.merged == '' - assert name in res.runtime.stdout - ci = nib.CommandLine(command='ls -l') - ci.inputs.terminal_output = 'file' - res = ci.run() - assert 'stdout.nipype' in res.runtime.stdout - assert isinstance(res.runtime.stdout, (str, bytes)) - ci = nib.CommandLine(command='ls -l') - ci.inputs.terminal_output = 'none' - res = ci.run() - assert res.runtime.stdout == '' - ci = nib.CommandLine(command='ls -l') - res = ci.run() - assert 'stdout.nipype' in res.runtime.stdout - - -def test_global_CommandLine_output(setup_file): - tmp_infile = setup_file - tmpd, name = os.path.split(tmp_infile) - ci = nib.CommandLine(command='ls -l') - res = ci.run() - assert name in res.runtime.stdout - assert os.path.exists(tmp_infile) - nib.CommandLine.set_default_terminal_output('allatonce') - ci = nib.CommandLine(command='ls -l') - res = ci.run() - assert res.runtime.merged == '' - assert name in res.runtime.stdout - nib.CommandLine.set_default_terminal_output('file') - ci = nib.CommandLine(command='ls -l') - res = ci.run() - assert 'stdout.nipype' in res.runtime.stdout - nib.CommandLine.set_default_terminal_output('none') - ci = nib.CommandLine(command='ls -l') - res = ci.run() - assert res.runtime.stdout == '' - - -def check_dict(ref_dict, tst_dict): - """Compare dictionaries of inputs and and those loaded from json files""" - def to_list(x): - if isinstance(x, tuple): - x = list(x) - - if isinstance(x, list): - for i, xel in enumerate(x): - x[i] = to_list(xel) - - return x - - failed_dict = {} - for key, value in list(ref_dict.items()): - newval = to_list(tst_dict[key]) - if newval != value: - failed_dict[key] = (value, newval) - return failed_dict - -def test_ImageFile(): - x = nib.BaseInterface().inputs - - # setup traits - x.add_trait('nifti', nib.ImageFile(types=['nifti1', 'dicom'])) - x.add_trait('anytype', nib.ImageFile()) - x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) - x.add_trait('nocompress', nib.ImageFile(types=['mgh'], - allow_compressed=False)) - - with pytest.raises(nib.TraitError): x.nifti = 'test.mgz' - x.nifti = 'test.nii' - x.anytype = 'test.xml' - with pytest.raises(AttributeError): x.newtype = 'test.nii' - with pytest.raises(nib.TraitError): x.nocompress = 'test.nii.gz' - x.nocompress = 'test.mgh' diff --git a/nipype/interfaces/tests/test_bids.py b/nipype/interfaces/tests/test_bids.py new file mode 100644 index 0000000000..aa5bc6c359 --- /dev/null +++ b/nipype/interfaces/tests/test_bids.py @@ -0,0 +1,50 @@ +import os +import json +import sys + +import pytest +from nipype.interfaces.bids_utils import BIDSDataGrabber +from nipype.utils.filemanip import dist_is_editable + +have_pybids = True +try: + import bids + from bids import grabbids as gb + filepath = os.path.realpath(os.path.dirname(bids.__file__)) + datadir = os.path.realpath(os.path.join(filepath, 'grabbids/tests/data/')) +except ImportError: + have_pybids = False + + +# There are three reasons these tests will be skipped: +@pytest.mark.skipif(not have_pybids, + reason="Pybids is not installed") +@pytest.mark.skipif(sys.version_info < (3, 0), + reason="Pybids no longer supports Python 2") +@pytest.mark.skipif(not dist_is_editable('pybids'), + reason="Pybids is not installed in editable mode") +def test_bids_grabber(tmpdir): + tmpdir.chdir() + bg = BIDSDataGrabber() + bg.inputs.base_dir = os.path.join(datadir, 'ds005') + bg.inputs.subject = '01' + results = bg.run() + assert os.path.basename(results.outputs.anat[0]) == 'sub-01_T1w.nii.gz' + assert os.path.basename(results.outputs.func[0]) == ( + 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz') + + +@pytest.mark.skipif(not have_pybids, + reason="Pybids is not installed") +@pytest.mark.skipif(sys.version_info < (3, 0), + reason="Pybids no longer supports Python 2") +@pytest.mark.skipif(not dist_is_editable('pybids'), + reason="Pybids is not installed in editable mode") +def test_bids_fields(tmpdir): + tmpdir.chdir() + bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) + bg.inputs.base_dir = os.path.join(datadir, 'ds005') + bg.inputs.subject = '01' + bg.inputs.output_query['dwi'] = dict(modality='dwi') + results = bg.run() + assert os.path.basename(results.outputs.dwi[0]) == 'sub-01_dwi.nii.gz' diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py index 75eb323c4b..ff56c9ec9d 100644 --- a/nipype/interfaces/tests/test_io.py +++ b/nipype/interfaces/tests/test_io.py @@ -11,6 +11,7 @@ import os.path as op from subprocess import Popen import hashlib +from collections import namedtuple import pytest import nipype @@ -42,7 +43,6 @@ except CalledProcessError: fakes3 = False -from tempfile import mkstemp, mkdtemp def test_datagrabber(): dg = nio.DataGrabber() @@ -62,6 +62,7 @@ def test_s3datagrabber(): templates1 = {"model": "interfaces/{package}/model.py", "preprocess": "interfaces/{package}/pre*.py"} templates2 = {"converter": "interfaces/dcm{to!s}nii.py"} +templates3 = {"model": "interfaces/{package.name}/model.py"} @pytest.mark.parametrize("SF_args, inputs_att, expected", [ ({"templates":templates1}, {"package":"fsl"}, @@ -75,6 +76,11 @@ def test_s3datagrabber(): ({"templates":templates2}, {"to":2}, {"infields":["to"], "outfields":["converter"], "run_output":{"converter":op.join(op.dirname(nipype.__file__), "interfaces/dcm2nii.py")}, "node_output":["converter"]}), + + ({"templates": templates3}, {"package": namedtuple("package", ["name"])("fsl")}, + {"infields": ["package"], "outfields": ["model"], + "run_output": {"model": op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py")}, + "node_output": ["model"]}), ]) def test_selectfiles(SF_args, inputs_att, expected): base_dir = op.dirname(nipype.__file__) @@ -110,7 +116,7 @@ def test_s3datagrabber_communication(tmpdir): dg.inputs.anon = True dg.inputs.bucket = 'openfmri' dg.inputs.bucket_path = 'ds001/' - dg.inputs.local_directory = str(tmpdir) + dg.inputs.local_directory = tmpdir.strpath dg.inputs.sort_filelist = True dg.inputs.template = '*' dg.inputs.field_template = dict(func='%s/BOLD/task001_%s/bold.nii.gz', @@ -140,7 +146,7 @@ def test_datagrabber_order(tmpdir): tmpdir.join(file_name).open('a').close() dg = nio.DataGrabber(infields=['sid']) - dg.inputs.base_directory = str(tmpdir) + dg.inputs.base_directory = tmpdir.strpath dg.inputs.template = '%s_L%d_R*.q*' dg.inputs.template_args = {'outfiles': [['sid', 1], ['sid', 2], ['sid', 3]]} @@ -178,6 +184,7 @@ def dummy_input(request, tmpdir_factory): Function to create a dummy file ''' # Init variables + input_path = tmpdir_factory.mktemp('input_data').join('datasink_test_s3.txt') # Create input file @@ -201,7 +208,7 @@ def test_datasink_to_s3(dummy_input, tmpdir): attr_folder = 'text_file' output_dir = 's3://' + bucket_name # Local temporary filepaths for testing - fakes3_dir = str(tmpdir) + fakes3_dir = tmpdir.strpath input_path = dummy_input # Start up fake-S3 server @@ -273,7 +280,7 @@ def test_datasink_localcopy(dummy_input, tmpdir): ''' # Init variables - local_dir = str(tmpdir) + local_dir = tmpdir.strpath container = 'outputs' attr_folder = 'text_file' @@ -328,42 +335,41 @@ def test_datasink_substitutions(tmpdir): x in glob.glob(os.path.join(str(outdir), '*'))]) \ == ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns - -def _temp_analyze_files(): +@pytest.fixture() +def _temp_analyze_files(tmpdir): """Generate temporary analyze file pair.""" - fd, orig_img = mkstemp(suffix='.img', dir=mkdtemp()) - orig_hdr = orig_img[:-4] + '.hdr' - fp = open(orig_hdr, 'w+') - fp.close() - return orig_img, orig_hdr + img_dir = tmpdir.mkdir("img") + orig_img = img_dir.join("orig.img") + orig_hdr = img_dir.join("orig.hdr") + orig_img.open('w') + orig_hdr.open('w') + return orig_img.strpath, orig_hdr.strpath -def test_datasink_copydir(): - orig_img, orig_hdr = _temp_analyze_files() - outdir = mkdtemp() +def test_datasink_copydir_1(_temp_analyze_files, tmpdir): + orig_img, orig_hdr = _temp_analyze_files + outdir = tmpdir pth, fname = os.path.split(orig_img) - ds = nio.DataSink(base_directory=outdir, parameterization=False) + ds = nio.DataSink(base_directory=outdir.mkdir("basedir").strpath, parameterization=False) setattr(ds.inputs, '@outdir', pth) ds.run() sep = os.path.sep - file_exists = lambda: os.path.exists(os.path.join(outdir, - pth.split(sep)[-1], - fname)) - assert file_exists() - shutil.rmtree(pth) + assert tmpdir.join('basedir', pth.split(sep)[-1], fname).check() - orig_img, orig_hdr = _temp_analyze_files() +def test_datasink_copydir_2(_temp_analyze_files, tmpdir): + orig_img, orig_hdr = _temp_analyze_files pth, fname = os.path.split(orig_img) + ds = nio.DataSink(base_directory=tmpdir.mkdir("basedir").strpath, parameterization=False) ds.inputs.remove_dest_dir = True setattr(ds.inputs, 'outdir', pth) ds.run() - assert not file_exists() - shutil.rmtree(outdir) - shutil.rmtree(pth) + sep = os.path.sep + assert not tmpdir.join('basedir', pth.split(sep)[-1], fname).check() + assert tmpdir.join('basedir', 'outdir', pth.split(sep)[-1], fname).check() def test_datafinder_depth(tmpdir): - outdir = str(tmpdir) + outdir = tmpdir.strpath os.makedirs(os.path.join(outdir, '0', '1', '2', '3')) df = nio.DataFinder() @@ -380,7 +386,7 @@ def test_datafinder_depth(tmpdir): def test_datafinder_unpack(tmpdir): - outdir = str(tmpdir) + outdir = tmpdir.strpath single_res = os.path.join(outdir, "findme.txt") open(single_res, 'a').close() open(os.path.join(outdir, "dontfindme"), 'a').close() @@ -401,7 +407,7 @@ def test_freesurfersource(): assert fss.inputs.subjects_dir == Undefined -def test_jsonsink_input(tmpdir): +def test_jsonsink_input(): ds = nio.JSONFileSink() assert ds.inputs._outputs == {} @@ -418,7 +424,7 @@ def test_jsonsink_input(tmpdir): {'new_entry' : 'someValue', 'test' : 'testInfields'} ]) def test_jsonsink(tmpdir, inputs_attributes): - os.chdir(str(tmpdir)) + tmpdir.chdir() js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'}) setattr(js.inputs, 'contrasts.alt', 'someNestedValue') expected_data = {"contrasts": {"alt": "someNestedValue"}, "foo": "var"} diff --git a/nipype/interfaces/tests/test_matlab.py b/nipype/interfaces/tests/test_matlab.py index 33f80c0fa1..25b5ac964f 100644 --- a/nipype/interfaces/tests/test_matlab.py +++ b/nipype/interfaces/tests/test_matlab.py @@ -2,8 +2,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os -from tempfile import mkdtemp -from shutil import rmtree import pytest import nipype.interfaces.matlab as mlab @@ -70,7 +68,7 @@ def test_mlab_init(): @pytest.mark.skipif(no_matlab, reason="matlab is not available") -def test_run_interface(): +def test_run_interface(tmpdir): default_script_file = clean_workspace_and_get_default_script_file() mc = mlab.MatlabCommand(matlab_cmd='foo_m') @@ -89,12 +87,10 @@ def test_run_interface(): if os.path.exists(default_script_file): # cleanup os.remove(default_script_file) - cwd = os.getcwd() - basedir = mkdtemp() - os.chdir(basedir) + cwd = tmpdir.chdir() # bypasses ubuntu dash issue - mc = mlab.MatlabCommand(script='foo;', paths=[basedir], mfile=True) + mc = mlab.MatlabCommand(script='foo;', paths=[tmpdir.strpath], mfile=True) assert not os.path.exists(default_script_file), 'scriptfile should not exist 4.' with pytest.raises(RuntimeError): mc.run() @@ -103,11 +99,10 @@ def test_run_interface(): os.remove(default_script_file) # bypasses ubuntu dash issue - res = mlab.MatlabCommand(script='a=1;', paths=[basedir], mfile=True).run() + res = mlab.MatlabCommand(script='a=1;', paths=[tmpdir.strpath], mfile=True).run() assert res.runtime.returncode == 0 assert os.path.exists(default_script_file), 'scriptfile should exist 5.' - os.chdir(cwd) - rmtree(basedir) + cwd.chdir() @pytest.mark.skipif(no_matlab, reason="matlab is not available") diff --git a/nipype/interfaces/tests/test_nilearn.py b/nipype/interfaces/tests/test_nilearn.py index 4e8299aa74..ce3846a6d3 100644 --- a/nipype/interfaces/tests/test_nilearn.py +++ b/nipype/interfaces/tests/test_nilearn.py @@ -1,8 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os -import tempfile -import shutil import numpy as np @@ -33,13 +31,14 @@ class TestSignalExtraction(): labels = ['CSF', 'GrayMatter', 'WhiteMatter'] global_labels = ['GlobalSignal'] + labels - def setup_class(self): - self.orig_dir = os.getcwd() - self.temp_dir = tempfile.mkdtemp() - os.chdir(self.temp_dir) + @pytest.fixture(autouse=True, scope='class') + def setup_class(self, tmpdir_factory): + tempdir = tmpdir_factory.mktemp("test") + self.orig_dir = tempdir.chdir() utils.save_toy_nii(self.fake_fmri_data, self.filenames['in_file']) utils.save_toy_nii(self.fake_label_data, self.filenames['label_files']) + def test_signal_extract_no_shared(self): # run iface.SignalExtraction(in_file=self.filenames['in_file'], @@ -151,10 +150,9 @@ def assert_expected_output(self, labels, wanted): for j, segment in enumerate(time): npt.assert_almost_equal(segment, wanted[i][j], decimal=1) - - def teardown_class(self): - os.chdir(self.orig_dir) - shutil.rmtree(self.temp_dir) +#dj: self doesnt have orig_dir at this point, not sure how to change it. should work without it +# def teardown_class(self): +# self.orig_dir.chdir() fake_fmri_data = np.array([[[[2, -1, 4, -2, 3], diff --git a/nipype/interfaces/tests/test_runtime_profiler.py b/nipype/interfaces/tests/test_runtime_profiler.py deleted file mode 100644 index 400b2728ae..0000000000 --- a/nipype/interfaces/tests/test_runtime_profiler.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -# test_runtime_profiler.py -# -# Author: Daniel Clark, 2016 - -""" -Module to unit test the runtime_profiler in nipype -""" - -from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import open, str - -# Import packages -from nipype.interfaces.base import (traits, CommandLine, CommandLineInputSpec, - runtime_profile) -import pytest -import sys - -run_profile = runtime_profile - -if run_profile: - try: - import psutil - skip_profile_msg = 'Run profiler tests' - except ImportError as exc: - skip_profile_msg = 'Missing python packages for runtime profiling, skipping...\n'\ - 'Error: %s' % exc - run_profile = False -else: - skip_profile_msg = 'Not running profiler' - -# UseResources inputspec -class UseResourcesInputSpec(CommandLineInputSpec): - ''' - use_resources cmd interface inputspec - ''' - - # Init attributes - num_gb = traits.Float(desc='Number of GB of RAM to use', - argstr='-g %f') - num_threads = traits.Int(desc='Number of threads to use', - argstr='-p %d') - - -# UseResources interface -class UseResources(CommandLine): - ''' - use_resources cmd interface - ''' - - # Import packages - import os - - # Init attributes - input_spec = UseResourcesInputSpec - - # Get path of executable - exec_dir = os.path.dirname(os.path.realpath(__file__)) - exec_path = os.path.join(exec_dir, 'use_resources') - - # Init cmd - _cmd = exec_path - - -# Spin multiple threads -def use_resources(num_threads, num_gb): - ''' - Function to execute multiple use_gb_ram functions in parallel - ''' - - # Function to occupy GB of memory - def _use_gb_ram(num_gb): - ''' - Function to consume GB of memory - ''' - import sys - - # Getsize of one character string - bsize = sys.getsizeof(' ') - sys.getsizeof(' ') - boffset = sys.getsizeof('') - - num_bytes = int(num_gb * (1024**3)) - # Eat num_gb GB of memory for 1 second - gb_str = ' ' * ((num_bytes - boffset) // bsize) - - assert sys.getsizeof(gb_str) == num_bytes - - # Spin CPU - ctr = 0 - while ctr < 30e6: - ctr += 1 - - # Clear memory - del ctr - del gb_str - - # Import packages - from multiprocessing import Process - from threading import Thread - - # Init variables - num_gb = float(num_gb) - - # Build thread list - thread_list = [] - for idx in range(num_threads): - thread = Thread(target=_use_gb_ram, args=(num_gb/num_threads,), - name=str(idx)) - thread_list.append(thread) - - # Run multi-threaded - print('Using %.3f GB of memory over %d sub-threads...' % \ - (num_gb, num_threads)) - for idx, thread in enumerate(thread_list): - thread.start() - - for thread in thread_list: - thread.join() - - -# Test case for the run function -class TestRuntimeProfiler(): - ''' - This class is a test case for the runtime profiler - ''' - - # setup method for the necessary arguments to run cpac_pipeline.run - def setup_class(self): - ''' - Method to instantiate TestRuntimeProfiler - - Parameters - ---------- - self : TestRuntimeProfile - ''' - - # Init parameters - # Input RAM GB to occupy - self.num_gb = 1.0 - # Input number of sub-threads (not including parent threads) - self.num_threads = 2 - # Acceptable percent error for memory profiled against input - self.mem_err_gb = 0.3 # Increased to 30% for py2.7 - - # ! Only used for benchmarking the profiler over a range of - # ! RAM usage and number of threads - # ! Requires a LOT of RAM to be tested - def _collect_range_runtime_stats(self, num_threads): - ''' - Function to collect a range of runtime stats - ''' - - # Import packages - import json - import numpy as np - import pandas as pd - - # Init variables - ram_gb_range = 10.0 - ram_gb_step = 0.25 - dict_list = [] - - # Iterate through all combos - for num_gb in np.arange(0.25, ram_gb_range+ram_gb_step, ram_gb_step): - # Cmd-level - cmd_start_str, cmd_fin_str = self._run_cmdline_workflow(num_gb, num_threads) - cmd_start_ts = json.loads(cmd_start_str)['start'] - cmd_node_stats = json.loads(cmd_fin_str) - cmd_runtime_threads = int(cmd_node_stats['runtime_threads']) - cmd_runtime_gb = float(cmd_node_stats['runtime_memory_gb']) - cmd_finish_ts = cmd_node_stats['finish'] - - # Func-level - func_start_str, func_fin_str = self._run_function_workflow(num_gb, num_threads) - func_start_ts = json.loads(func_start_str)['start'] - func_node_stats = json.loads(func_fin_str) - func_runtime_threads = int(func_node_stats['runtime_threads']) - func_runtime_gb = float(func_node_stats['runtime_memory_gb']) - func_finish_ts = func_node_stats['finish'] - - # Calc errors - cmd_threads_err = cmd_runtime_threads - num_threads - cmd_gb_err = cmd_runtime_gb - num_gb - func_threads_err = func_runtime_threads - num_threads - func_gb_err = func_runtime_gb - num_gb - - # Node dictionary - results_dict = {'input_threads' : num_threads, - 'input_gb' : num_gb, - 'cmd_runtime_threads' : cmd_runtime_threads, - 'cmd_runtime_gb' : cmd_runtime_gb, - 'func_runtime_threads' : func_runtime_threads, - 'func_runtime_gb' : func_runtime_gb, - 'cmd_threads_err' : cmd_threads_err, - 'cmd_gb_err' : cmd_gb_err, - 'func_threads_err' : func_threads_err, - 'func_gb_err' : func_gb_err, - 'cmd_start_ts' : cmd_start_ts, - 'cmd_finish_ts' : cmd_finish_ts, - 'func_start_ts' : func_start_ts, - 'func_finish_ts' : func_finish_ts} - # Append to list - dict_list.append(results_dict) - - # Create dataframe - runtime_results_df = pd.DataFrame(dict_list) - - # Return dataframe - return runtime_results_df - - # Test node - def _run_cmdline_workflow(self, num_gb, num_threads): - ''' - Function to run the use_resources cmdline script in a nipype workflow - and return the runtime stats recorded by the profiler - - Parameters - ---------- - self : TestRuntimeProfile - - Returns - ------- - finish_str : string - a json-compatible dictionary string containing the runtime - statistics of the nipype node that used system resources - ''' - - # Import packages - import logging - import os - import shutil - import tempfile - - import nipype.pipeline.engine as pe - import nipype.interfaces.utility as util - from nipype.pipeline.plugins.callback_log import log_nodes_cb - - # Init variables - base_dir = tempfile.mkdtemp() - log_file = os.path.join(base_dir, 'callback.log') - - # Init logger - logger = logging.getLogger('callback') - logger.setLevel(logging.DEBUG) - handler = logging.FileHandler(log_file) - logger.addHandler(handler) - - # Declare workflow - wf = pe.Workflow(name='test_runtime_prof_cmd') - wf.base_dir = base_dir - - # Input node - input_node = pe.Node(util.IdentityInterface(fields=['num_gb', - 'num_threads']), - name='input_node') - input_node.inputs.num_gb = num_gb - input_node.inputs.num_threads = num_threads - - # Resources used node - resource_node = pe.Node(UseResources(), name='resource_node') - resource_node.interface.estimated_memory_gb = num_gb - resource_node.interface.num_threads = num_threads - - # Connect workflow - wf.connect(input_node, 'num_gb', resource_node, 'num_gb') - wf.connect(input_node, 'num_threads', resource_node, 'num_threads') - - # Run workflow - plugin_args = {'n_procs' : num_threads, - 'memory_gb' : num_gb, - 'status_callback' : log_nodes_cb} - wf.run(plugin='MultiProc', plugin_args=plugin_args) - - # Get runtime stats from log file - with open(log_file, 'r') as log_handle: - lines = log_handle.readlines() - start_str = lines[0].rstrip('\n') - finish_str = lines[1].rstrip('\n') - - # Delete wf base dir - shutil.rmtree(base_dir) - - # Return runtime stats - return start_str, finish_str - - # Test node - def _run_function_workflow(self, num_gb, num_threads): - ''' - Function to run the use_resources() function in a nipype workflow - and return the runtime stats recorded by the profiler - - Parameters - ---------- - self : TestRuntimeProfile - - Returns - ------- - finish_str : string - a json-compatible dictionary string containing the runtime - statistics of the nipype node that used system resources - ''' - - # Import packages - import logging - import os - import shutil - import tempfile - - import nipype.pipeline.engine as pe - import nipype.interfaces.utility as util - from nipype.pipeline.plugins.callback_log import log_nodes_cb - - # Init variables - base_dir = tempfile.mkdtemp() - log_file = os.path.join(base_dir, 'callback.log') - - # Init logger - logger = logging.getLogger('callback') - logger.setLevel(logging.DEBUG) - handler = logging.FileHandler(log_file) - logger.addHandler(handler) - - # Declare workflow - wf = pe.Workflow(name='test_runtime_prof_func') - wf.base_dir = base_dir - - # Input node - input_node = pe.Node(util.IdentityInterface(fields=['num_gb', - 'num_threads']), - name='input_node') - input_node.inputs.num_gb = num_gb - input_node.inputs.num_threads = num_threads - - # Resources used node - resource_node = pe.Node(util.Function(input_names=['num_threads', - 'num_gb'], - output_names=[], - function=use_resources), - name='resource_node') - resource_node.interface.estimated_memory_gb = num_gb - resource_node.interface.num_threads = num_threads - - # Connect workflow - wf.connect(input_node, 'num_gb', resource_node, 'num_gb') - wf.connect(input_node, 'num_threads', resource_node, 'num_threads') - - # Run workflow - plugin_args = {'n_procs' : num_threads, - 'memory_gb' : num_gb, - 'status_callback' : log_nodes_cb} - wf.run(plugin='MultiProc', plugin_args=plugin_args) - - # Get runtime stats from log file - with open(log_file, 'r') as log_handle: - lines = log_handle.readlines() - start_str = lines[0].rstrip('\n') - finish_str = lines[1].rstrip('\n') - - # Delete wf base dir - shutil.rmtree(base_dir) - - # Return runtime stats - return start_str, finish_str - - # Test resources were used as expected in cmdline interface - @pytest.mark.skipif(run_profile == False, reason=skip_profile_msg) - def test_cmdline_profiling(self): - ''' - Test runtime profiler correctly records workflow RAM/CPUs consumption - from a cmdline function - ''' - - # Import packages - import json - import numpy as np - - # Init variables - num_gb = self.num_gb - num_threads = self.num_threads - - # Run workflow and get stats - start_str, finish_str = self._run_cmdline_workflow(num_gb, num_threads) - # Get runtime stats as dictionary - node_stats = json.loads(finish_str) - - # Read out runtime stats - runtime_gb = float(node_stats['runtime_memory_gb']) - runtime_threads = int(node_stats['runtime_threads']) - - # Get margin of error for RAM GB - allowed_gb_err = self.mem_err_gb - runtime_gb_err = np.abs(runtime_gb-num_gb) - # - expected_runtime_threads = num_threads - - # Error message formatting - mem_err = 'Input memory: %f is not within %.3f GB of runtime '\ - 'memory: %f' % (num_gb, self.mem_err_gb, runtime_gb) - threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ - % (expected_runtime_threads, runtime_threads) - - # Assert runtime stats are what was input - assert runtime_gb_err <= allowed_gb_err, mem_err - assert abs(expected_runtime_threads - runtime_threads) <= 1, threads_err - - # Test resources were used as expected - @pytest.mark.skipif(True, reason="https://github.com/nipy/nipype/issues/1663") - @pytest.mark.skipif(run_profile == False, reason=skip_profile_msg) - def test_function_profiling(self): - ''' - Test runtime profiler correctly records workflow RAM/CPUs consumption - from a python function - ''' - - # Import packages - import json - import numpy as np - - # Init variables - num_gb = self.num_gb - num_threads = self.num_threads - - # Run workflow and get stats - start_str, finish_str = self._run_function_workflow(num_gb, num_threads) - # Get runtime stats as dictionary - node_stats = json.loads(finish_str) - - # Read out runtime stats - runtime_gb = float(node_stats['runtime_memory_gb']) - runtime_threads = int(node_stats['runtime_threads']) - - # Get margin of error for RAM GB - allowed_gb_err = self.mem_err_gb - runtime_gb_err = np.abs(runtime_gb-num_gb) - # - expected_runtime_threads = num_threads - - # Error message formatting - mem_err = 'Input memory: %f is not within %.3f GB of runtime '\ - 'memory: %f' % (num_gb, self.mem_err_gb, runtime_gb) - threads_err = 'Input threads: %d is not equal to runtime threads: %d' \ - % (expected_runtime_threads, runtime_threads) - - # Assert runtime stats are what was input - assert runtime_gb_err <= allowed_gb_err, mem_err - assert abs(expected_runtime_threads - runtime_threads) <= 1, threads_err - - diff --git a/nipype/interfaces/tests/use_resources b/nipype/interfaces/tests/use_resources deleted file mode 100755 index 06e2d3e906..0000000000 --- a/nipype/interfaces/tests/use_resources +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python -# -# use_resources - -''' -Python script to use a certain amount of RAM on disk and number of -threads - -Usage: - use_resources -g -p -''' - -# Function to occupy GB of memory -def use_gb_ram(num_gb): - ''' - Function to consume GB of memory - ''' - - # Eat 1 GB of memory for 1 second - gb_str = ' ' * int(num_gb*1024.0**3) - - # Spin CPU - ctr = 0 - while ctr < 30e6: - ctr+= 1 - - # Clear memory - del ctr - del gb_str - - -# Make main executable -if __name__ == '__main__': - - # Import packages - import argparse - from threading import Thread - from multiprocessing import Process - - # Init argparser - parser = argparse.ArgumentParser(description=__doc__) - - # Add arguments - parser.add_argument('-g', '--num_gb', nargs=1, required=True, - help='Number of GB RAM to use, can be float or int') - parser.add_argument('-p', '--num_threads', nargs=1, required=True, - help='Number of threads to run in parallel') - - # Parse args - args = parser.parse_args() - - # Init variables - num_gb = float(args.num_gb[0]) - num_threads = int(args.num_threads[0]) - - # Build thread list - thread_list = [] - for idx in range(num_threads): - thread_list.append(Thread(target=use_gb_ram, args=(num_gb/num_threads,))) - - # Run multi-threaded - print('Using %.3f GB of memory over %d sub-threads...' % \ - (num_gb, num_threads)) - for thread in thread_list: - thread.start() - - for thread in thread_list: - thread.join() diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py index 60e4c4aa3f..ec744d9fce 100644 --- a/nipype/interfaces/utility/base.py +++ b/nipype/interfaces/utility/base.py @@ -2,15 +2,9 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ -Various utilities - - Change directory to provide relative paths for doctests - >>> import os - >>> filepath = os.path.dirname(os.path.realpath(__file__)) - >>> datadir = os.path.realpath(os.path.join(filepath, - ... '../../testing/data')) - >>> os.chdir(datadir) - + # changing to temporary directories + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() """ from __future__ import print_function, division, unicode_literals, absolute_import from builtins import range @@ -47,7 +41,7 @@ class IdentityInterface(IOBase): >>> out = ii.run() - >>> out.outputs.a # doctest: +ALLOW_UNICODE + >>> out.outputs.a 'foo' >>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True) @@ -231,14 +225,14 @@ class Rename(IOBase): >>> from nipype.interfaces.utility import Rename >>> rename1 = Rename() - >>> rename1.inputs.in_file = "zstat1.nii.gz" + >>> rename1.inputs.in_file = os.path.join(datadir, "zstat1.nii.gz") # datadir is a directory with exemplary files, defined in conftest.py >>> rename1.inputs.format_string = "Faces-Scenes.nii.gz" >>> res = rename1.run() # doctest: +SKIP >>> res.outputs.out_file # doctest: +SKIP 'Faces-Scenes.nii.gz" # doctest: +SKIP >>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d") - >>> rename2.inputs.in_file = "functional.nii" + >>> rename2.inputs.in_file = os.path.join(datadir, "functional.nii") >>> rename2.inputs.keep_ext = True >>> rename2.inputs.subject_id = "subj_201" >>> rename2.inputs.run = 2 @@ -247,7 +241,7 @@ class Rename(IOBase): 'subj_201_func_run02.nii' # doctest: +SKIP >>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii") - >>> rename3.inputs.in_file = "func_epi_1_1.nii" + >>> rename3.inputs.in_file = os.path.join(datadir, "func_epi_1_1.nii") >>> rename3.inputs.parse_string = "func_(?P\w*)_.*" >>> rename3.inputs.subject_id = "subj_201" >>> rename3.inputs.run = 2 diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py index 739725a417..0b561d9702 100644 --- a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -4,7 +4,8 @@ def test_AssertEqual_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), volume1=dict(mandatory=True, diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py index 649d626a5f..1831728c04 100644 --- a/nipype/interfaces/utility/tests/test_auto_Function.py +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -6,7 +6,8 @@ def test_Function_inputs(): input_map = dict(function_str=dict(mandatory=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), ) diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py index f98e70892b..07f5b60962 100644 --- a/nipype/interfaces/utility/tests/test_auto_Merge.py +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -6,7 +6,8 @@ def test_Merge_inputs(): input_map = dict(axis=dict(usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), no_flatten=dict(usedefault=True, diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py index 3c67785702..7889366b76 100644 --- a/nipype/interfaces/utility/tests/test_auto_Select.py +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -4,7 +4,8 @@ def test_Select_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), index=dict(mandatory=True, diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py index 663ff65b13..a0e02af267 100644 --- a/nipype/interfaces/utility/tests/test_auto_Split.py +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -4,7 +4,8 @@ def test_Split_inputs(): - input_map = dict(ignore_exception=dict(nohash=True, + input_map = dict(ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), inlist=dict(mandatory=True, diff --git a/nipype/interfaces/utility/tests/test_base.py b/nipype/interfaces/utility/tests/test_base.py index 3d2fbd2b5f..3e66f827d2 100644 --- a/nipype/interfaces/utility/tests/test_base.py +++ b/nipype/interfaces/utility/tests/test_base.py @@ -11,13 +11,13 @@ def test_rename(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() # Test very simple rename _ = open("file.txt", "w").close() rn = utility.Rename(in_file="file.txt", format_string="test_file1.txt") res = rn.run() - outfile = str(tmpdir.join("test_file1.txt")) + outfile = tmpdir.join("test_file1.txt").strpath assert res.outputs.out_file == outfile assert os.path.exists(outfile) @@ -31,7 +31,7 @@ def test_rename(tmpdir): rn.inputs.field1 = "test" rn.inputs.field2 = 2 res = rn.run() - outfile = str(tmpdir.join("test_file2.txt")) + outfile = tmpdir.join("test_file2.txt").strpath assert res.outputs.out_file == outfile assert os.path.exists(outfile) @@ -41,7 +41,7 @@ def test_rename(tmpdir): ({"squeeze" : True}, (0 , [1,2,3])) ]) def test_split(tmpdir, args, expected): - os.chdir(str(tmpdir)) + tmpdir.chdir() node = pe.Node(utility.Split(inlist=list(range(4)), splits=[1, 3], @@ -64,7 +64,7 @@ def test_split(tmpdir, args, expected): [[0, 2, 4], [1, 3, 5]]), ]) def test_merge(tmpdir, args, kwargs, in_lists, expected): - os.chdir(str(tmpdir)) + tmpdir.chdir() node = pe.Node(utility.Merge(*args, **kwargs), name='merge') diff --git a/nipype/interfaces/utility/tests/test_csv.py b/nipype/interfaces/utility/tests/test_csv.py index 86ac95a371..f0101b4da7 100644 --- a/nipype/interfaces/utility/tests/test_csv.py +++ b/nipype/interfaces/utility/tests/test_csv.py @@ -12,7 +12,7 @@ def test_csvReader(tmpdir): "bar,world,5\n", "baz,goodbye,0.3\n"] for x in range(2): - name = str(tmpdir.join("testfile.csv")) + name = tmpdir.join("testfile.csv").strpath with open(name, 'w') as fid: reader = utility.CSVReader() if x % 2 == 0: diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index 3384a5865c..b995dc27ad 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -16,7 +16,7 @@ def concat_sort(in_arrays): """ def test_function(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() def gen_random_array(size): import numpy as np @@ -46,8 +46,8 @@ def make_random_array(size): return np.random.randn(size, size) -def should_fail(tmpdir): - os.chdir(tmpdir) +def should_fail(tmp): + tmp.chdir() node = pe.Node(utility.Function(input_names=["size"], output_names=["random_array"], @@ -59,11 +59,11 @@ def should_fail(tmpdir): def test_should_fail(tmpdir): with pytest.raises(NameError): - should_fail(str(tmpdir)) + should_fail(tmpdir) def test_function_with_imports(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() node = pe.Node(utility.Function(input_names=["size"], output_names=["random_array"], @@ -79,7 +79,7 @@ def test_aux_connect_function(tmpdir): """ This tests excution nodes with multiple inputs and auxiliary function inside the Workflow connect function. """ - os.chdir(str(tmpdir)) + tmpdir.chdir() wf = pe.Workflow(name="test_workflow") diff --git a/nipype/interfaces/utility/wrappers.py b/nipype/interfaces/utility/wrappers.py index 4de11d7ea8..9999c4af6a 100644 --- a/nipype/interfaces/utility/wrappers.py +++ b/nipype/interfaces/utility/wrappers.py @@ -1,17 +1,12 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Various utilities - - Change directory to provide relative paths for doctests - >>> import os - >>> filepath = os.path.dirname(os.path.realpath(__file__)) - >>> datadir = os.path.realpath(os.path.join(filepath, - ... '../../testing/data')) - >>> os.chdir(datadir) - - """ +# changing to temporary directories + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() +""" + from __future__ import print_function, division, unicode_literals, absolute_import from future import standard_library @@ -20,21 +15,13 @@ from builtins import str, bytes from ... import logging -from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, runtime_profile, - BaseInterfaceInputSpec, get_max_resources_used) +from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, + BaseInterfaceInputSpec) from ..io import IOBase, add_traits from ...utils.filemanip import filename_to_list -from ...utils.misc import getsource, create_function_from_source - -logger = logging.getLogger('interface') -if runtime_profile: - try: - import psutil - except ImportError as exc: - logger.info('Unable to import packages needed for runtime profiling. '\ - 'Turning off runtime profiler. Reason: %s' % exc) - runtime_profile = False +from ...utils.functions import getsource, create_function_from_source +iflogger = logging.getLogger('interface') class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): function_str = traits.Str(mandatory=True, desc='code for function') @@ -137,22 +124,9 @@ def _add_output_traits(self, base): return base def _run_interface(self, runtime): - # Get workflow logger for runtime profile error reporting - logger = logging.getLogger('workflow') - # Create function handle function_handle = create_function_from_source(self.inputs.function_str, self.imports) - - # Wrapper for running function handle in multiprocessing.Process - # Can catch exceptions and report output via multiprocessing.Queue - def _function_handle_wrapper(queue, **kwargs): - try: - out = function_handle(**kwargs) - queue.put(out) - except Exception as exc: - queue.put(exc) - # Get function args args = {} for name in self._input_names: @@ -160,37 +134,7 @@ def _function_handle_wrapper(queue, **kwargs): if isdefined(value): args[name] = value - # Profile resources if set - if runtime_profile: - import multiprocessing - # Init communication queue and proc objs - queue = multiprocessing.Queue() - proc = multiprocessing.Process(target=_function_handle_wrapper, - args=(queue,), kwargs=args) - - # Init memory and threads before profiling - mem_mb = 0 - num_threads = 0 - - # Start process and profile while it's alive - proc.start() - while proc.is_alive(): - mem_mb, num_threads = \ - get_max_resources_used(proc.pid, mem_mb, num_threads, - pyfunc=True) - - # Get result from process queue - out = queue.get() - # If it is an exception, raise it - if isinstance(out, Exception): - raise out - - # Function ran successfully, populate runtime stats - setattr(runtime, 'runtime_memory_gb', mem_mb / 1024.0) - setattr(runtime, 'runtime_threads', num_threads) - else: - out = function_handle(**args) - + out = function_handle(**args) if len(self._output_names) == 1: self._out[self._output_names[0]] = out else: diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py index 460c1eac79..83bd21b7bf 100644 --- a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -12,7 +12,8 @@ def test_Vnifti2Image_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -26,7 +27,8 @@ def test_Vnifti2Image_inputs(): name_template='%s.v', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = Vnifti2Image.input_spec() diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py index 055e665abc..e9e198e90b 100644 --- a/nipype/interfaces/vista/tests/test_auto_VtoMat.py +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -9,7 +9,8 @@ def test_VtoMat_inputs(): environ=dict(nohash=True, usedefault=True, ), - ignore_exception=dict(nohash=True, + ignore_exception=dict(deprecated='1.0.0', + nohash=True, usedefault=True, ), in_file=dict(argstr='-in %s', @@ -23,7 +24,8 @@ def test_VtoMat_inputs(): name_template='%s.mat', position=-1, ), - terminal_output=dict(nohash=True, + terminal_output=dict(deprecated='1.0.0', + nohash=True, ), ) inputs = VtoMat.input_spec() diff --git a/nipype/interfaces/vista/vista.py b/nipype/interfaces/vista/vista.py index e898956d65..e8928ae24a 100644 --- a/nipype/interfaces/vista/vista.py +++ b/nipype/interfaces/vista/vista.py @@ -34,7 +34,7 @@ class Vnifti2Image(CommandLine): >>> vimage = Vnifti2Image() >>> vimage.inputs.in_file = 'image.nii' - >>> vimage.cmdline # doctest: +ALLOW_UNICODE + >>> vimage.cmdline 'vnifti2image -in image.nii -out image.v' >>> vimage.run() # doctest: +SKIP """ @@ -63,7 +63,7 @@ class VtoMat(CommandLine): >>> vimage = VtoMat() >>> vimage.inputs.in_file = 'image.v' - >>> vimage.cmdline # doctest: +ALLOW_UNICODE + >>> vimage.cmdline 'vtomat -in image.v -out image.mat' >>> vimage.run() # doctest: +SKIP """ diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 622003f8a2..ed1fde9d28 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: @@ -15,49 +14,53 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import range, object, str, bytes, open +from builtins import range, str, bytes, open -from future import standard_library -standard_library.install_aliases() from collections import OrderedDict -from copy import deepcopy -import pickle -from glob import glob -import gzip import os import os.path as op import shutil -import errno import socket -from shutil import rmtree -import sys +from copy import deepcopy +from glob import glob + from tempfile import mkdtemp -from hashlib import sha1 +from future import standard_library from ... import config, logging -from ...utils.misc import (flatten, unflatten, str2bool) -from ...utils.filemanip import (save_json, FileNotFoundError, - filename_to_list, list_to_filename, - copyfiles, fnames_presuffix, loadpkl, - split_filename, load_json, savepkl, - write_rst_header, write_rst_dict, - write_rst_list, to_str) -from ...interfaces.base import (traits, InputMultiPath, CommandLine, - Undefined, TraitedSpec, DynamicTraitedSpec, - Bunch, InterfaceResult, md5, Interface, - TraitDictObject, TraitListObject, isdefined, - runtime_profile) -from .utils import (generate_expanded_graph, modify_paths, - export_graph, make_output_dir, write_workflow_prov, - clean_working_directory, format_dot, topological_sort, - get_print_name, merge_dict, evaluate_connect_function) +from ...utils.misc import flatten, unflatten, str2bool, dict_diff +from ...utils.filemanip import ( + md5, FileNotFoundError, filename_to_list, list_to_filename, + copyfiles, fnames_presuffix, loadpkl, split_filename, load_json, makedirs, + emptydirs, savepkl, to_str +) + +from ...interfaces.base import ( + traits, InputMultiPath, CommandLine, Undefined, DynamicTraitedSpec, + Bunch, InterfaceResult, Interface, isdefined +) +from .utils import ( + _parameterization_dir, + save_hashfile as _save_hashfile, + load_resultfile as _load_resultfile, + save_resultfile as _save_resultfile, + nodelist_runner as _node_runner, + strip_temp as _strip_temp, + write_report, + clean_working_directory, + merge_dict, evaluate_connect_function +) from .base import EngineBase +standard_library.install_aliases() + logger = logging.getLogger('workflow') + class Node(EngineBase): - """Wraps interface objects for use in pipeline + """ + Wraps interface objects for use in pipeline A Node creates a sandbox-like directory for executing the underlying interface. It will copy or link inputs into this directory to ensure that @@ -79,7 +82,7 @@ class Node(EngineBase): def __init__(self, interface, name, iterables=None, itersource=None, synchronize=False, overwrite=None, needed_outputs=None, - run_without_submitting=False, n_procs=1, mem_gb=None, + run_without_submitting=False, n_procs=None, mem_gb=0.20, **kwargs): """ Parameters @@ -149,34 +152,40 @@ def __init__(self, interface, name, iterables=None, itersource=None, multiprocessing pool """ - base_dir = None - if 'base_dir' in kwargs: - base_dir = kwargs['base_dir'] - super(Node, self).__init__(name, base_dir) + # Make sure an interface is set, and that it is an Interface if interface is None: raise IOError('Interface must be provided') if not isinstance(interface, Interface): raise IOError('interface must be an instance of an Interface') - self._interface = interface + + super(Node, self).__init__(name, kwargs.get('base_dir')) + self.name = name - self._result = None + self._interface = interface + self._hierarchy = None + self._got_inputs = False + self._originputs = None + self._output_dir = None self.iterables = iterables self.synchronize = synchronize self.itersource = itersource self.overwrite = overwrite self.parameterization = None - self.run_without_submitting = run_without_submitting self.input_source = {} - self.needed_outputs = [] self.plugin_args = {} - self._interface.num_threads = n_procs - if mem_gb is not None: - self._interface.estimated_memory_gb = mem_gb + self.run_without_submitting = run_without_submitting + self._mem_gb = mem_gb + self._n_procs = n_procs + + # Downstream n_procs + if hasattr(self._interface.inputs, 'num_threads') and self._n_procs is not None: + self._interface.inputs.num_threads = self._n_procs + # Initialize needed_outputs + self.needed_outputs = [] if needed_outputs: self.needed_outputs = sorted(needed_outputs) - self._got_inputs = False @property def interface(self): @@ -185,12 +194,8 @@ def interface(self): @property def result(self): - if self._result: - return self._result - else: - cwd = self.output_dir() - result, _, _ = self._load_resultfile(cwd) - return result + """Get result from result file (do not hold it in memory)""" + return _load_resultfile(self.output_dir(), self.name)[0] @property def inputs(self): @@ -202,8 +207,43 @@ def outputs(self): """Return the output fields of the underlying interface""" return self._interface._outputs() + @property + def mem_gb(self): + """Get estimated memory (GB)""" + if hasattr(self._interface, 'estimated_memory_gb'): + self._mem_gb = self._interface.estimated_memory_gb + logger.warning('Setting "estimated_memory_gb" on Interfaces has been ' + 'deprecated as of nipype 1.0, please use Node.mem_gb.') + + return self._mem_gb + + @property + def n_procs(self): + """Get the estimated number of processes/threads""" + if self._n_procs is not None: + return self._n_procs + if hasattr(self._interface.inputs, + 'num_threads') and isdefined(self._interface.inputs.num_threads): + return self._interface.inputs.num_threads + + return 1 + + @n_procs.setter + def n_procs(self, value): + """Set an estimated number of processes/threads""" + self._n_procs = value + + # Overwrite interface's dynamic input of num_threads + if hasattr(self._interface.inputs, 'num_threads'): + self._interface.inputs.num_threads = self._n_procs + def output_dir(self): """Return the location of the output directory for the node""" + # Output dir is cached + if self._output_dir: + return self._output_dir + + # Calculate & cache otherwise if self.base_dir is None: self.base_dir = mkdtemp() outputdir = self.base_dir @@ -212,54 +252,96 @@ def output_dir(self): if self.parameterization: params_str = ['{}'.format(p) for p in self.parameterization] if not str2bool(self.config['execution']['parameterize_dirs']): - params_str = [self._parameterization_dir(p) for p in params_str] + params_str = [_parameterization_dir(p) for p in params_str] outputdir = op.join(outputdir, *params_str) - return op.abspath(op.join(outputdir, self.name)) + + self._output_dir = op.abspath(op.join(outputdir, self.name)) + return self._output_dir def set_input(self, parameter, val): - """ Set interface input value""" - logger.debug('setting nodelevel(%s) input %s = %s', + """Set interface input value""" + logger.debug('[Node] %s - setting input %s = %s', self.name, parameter, to_str(val)) setattr(self.inputs, parameter, deepcopy(val)) def get_output(self, parameter): """Retrieve a particular output of the node""" - val = None - if self._result: - val = getattr(self._result.outputs, parameter) - else: - cwd = self.output_dir() - result, _, _ = self._load_resultfile(cwd) - if result and result.outputs: - val = getattr(result.outputs, parameter) - return val + return getattr(self.result.outputs, parameter, None) def help(self): - """ Print interface help""" + """Print interface help""" self._interface.help() def hash_exists(self, updatehash=False): + """ + Check if the interface has been run previously, and whether + cached results are viable for reuse + """ + # Get a dictionary with hashed filenames and a hashvalue # of the dictionary itself. hashed_inputs, hashvalue = self._get_hashval() outdir = self.output_dir() - if op.exists(outdir): - logger.debug('Output dir: %s', to_str(os.listdir(outdir))) - hashfiles = glob(op.join(outdir, '_0x*.json')) - logger.debug('Found hashfiles: %s', to_str(hashfiles)) - if len(hashfiles) > 1: - logger.info(hashfiles) - logger.info('Removing multiple hashfiles and forcing node to rerun') - for hashfile in hashfiles: - os.unlink(hashfile) hashfile = op.join(outdir, '_0x%s.json' % hashvalue) - logger.debug('Final hashfile: %s', hashfile) - if updatehash and op.exists(outdir): - logger.debug("Updating hash: %s", hashvalue) - for file in glob(op.join(outdir, '_0x*.json')): - os.remove(file) - self._save_hashfile(hashfile, hashed_inputs) - return op.exists(hashfile), hashvalue, hashfile, hashed_inputs + hash_exists = op.exists(hashfile) + + logger.debug('[Node] hash value=%s, exists=%s', hashvalue, hash_exists) + + if op.exists(outdir): + # Find previous hashfiles + globhashes = glob(op.join(outdir, '_0x*.json')) + unfinished = [path for path in globhashes if path.endswith('_unfinished.json')] + hashfiles = list(set(globhashes) - set(unfinished)) + if len(hashfiles) > 1: + for rmfile in hashfiles: + os.remove(rmfile) + + raise RuntimeError( + '[Node] Cache ERROR - Found %d previous hashfiles indicating ' + 'that the ``base_dir`` for this node went stale. Please re-run the ' + 'workflow.' % len(hashfiles)) + + # This should not happen, but clean up and break if so. + if unfinished and updatehash: + for rmfile in unfinished: + os.remove(rmfile) + + raise RuntimeError( + '[Node] Cache ERROR - Found unfinished hashfiles (%d) indicating ' + 'that the ``base_dir`` for this node went stale. Please re-run the ' + 'workflow.' % len(unfinished)) + + # Remove outdated hashfile + if hashfiles and hashfiles[0] != hashfile: + logger.info('[Node] Outdated hashfile found for "%s", removing and forcing node ' + 'to rerun.', self.fullname) + + # If logging is more verbose than INFO (20), print diff between hashes + loglevel = logger.getEffectiveLevel() + if loglevel < 20: # Lazy logging: only < INFO + split_out = split_filename(hashfiles[0]) + exp_hash_file_base = split_out[1] + exp_hash = exp_hash_file_base[len('_0x'):] + logger.log(loglevel, "[Node] Old/new hashes = %s/%s", exp_hash, hashvalue) + try: + prev_inputs = load_json(hashfiles[0]) + except Exception: + pass + else: + logger.log(loglevel, dict_diff(prev_inputs, hashed_inputs, 10)) + + os.remove(hashfiles[0]) + + # Update only possible if it exists + if hash_exists and updatehash: + logger.debug("[Node] Updating hash: %s", hashvalue) + _save_hashfile(hashfile, hashed_inputs) + + logger.debug( + 'updatehash=%s, overwrite=%s, always_run=%s, hash_exists=%s, ' + 'hash_method=%s', updatehash, self.overwrite, self._interface.always_run, + hash_exists, self.config['execution']['hash_method'].lower()) + return hash_exists, hashvalue, hashfile, hashed_inputs def run(self, updatehash=False): """Execute the node in its directory. @@ -268,144 +350,115 @@ def run(self, updatehash=False): ---------- updatehash: boolean - Update the hash stored in the output directory + When the hash stored in the output directory as a result of a previous run + does not match that calculated for this execution, updatehash=True only + updates the hash without re-running. """ - # check to see if output directory and hash exist + if self.config is None: - self.config = deepcopy(config._sections) - else: - self.config = merge_dict(deepcopy(config._sections), self.config) - if not self._got_inputs: - self._get_inputs() - self._got_inputs = True + self.config = {} + self.config = merge_dict(deepcopy(config._sections), self.config) + self._get_inputs() + + # Check if output directory exists outdir = self.output_dir() - logger.info("Executing node %s in dir: %s", self._id, outdir) if op.exists(outdir): - logger.debug('Output dir: %s', to_str(os.listdir(outdir))) + logger.debug('Output directory (%s) exists and is %sempty,', + outdir, 'not ' * bool(os.listdir(outdir))) + + # Check hash, check whether run should be enforced + logger.info('[Node] Setting-up "%s" in "%s".', self.fullname, outdir) hash_info = self.hash_exists(updatehash=updatehash) hash_exists, hashvalue, hashfile, hashed_inputs = hash_info - logger.debug( - 'updatehash=%s, overwrite=%s, always_run=%s, hash_exists=%s', - updatehash, self.overwrite, self._interface.always_run, hash_exists) - if (not updatehash and (((self.overwrite is None and - self._interface.always_run) or - self.overwrite) or not - hash_exists)): - logger.debug("Node hash: %s", hashvalue) - - # by rerunning we mean only nodes that did finish to run previously - json_pat = op.join(outdir, '_0x*.json') - json_unfinished_pat = op.join(outdir, '_0x*_unfinished.json') - need_rerun = (op.exists(outdir) and not - isinstance(self, MapNode) and - len(glob(json_pat)) != 0 and - len(glob(json_unfinished_pat)) == 0) - if need_rerun: - logger.debug( - "Rerunning node:\n" - "updatehash = %s, self.overwrite = %s, self._interface.always_run = %s, " - "os.path.exists(%s) = %s, hash_method = %s", updatehash, self.overwrite, - self._interface.always_run, hashfile, op.exists(hashfile), - self.config['execution']['hash_method'].lower()) - log_debug = config.get('logging', 'workflow_level') == 'DEBUG' - if log_debug and not op.exists(hashfile): - exp_hash_paths = glob(json_pat) - if len(exp_hash_paths) == 1: - split_out = split_filename(exp_hash_paths[0]) - exp_hash_file_base = split_out[1] - exp_hash = exp_hash_file_base[len('_0x'):] - logger.debug("Previous node hash = %s", exp_hash) - try: - prev_inputs = load_json(exp_hash_paths[0]) - except: - pass - else: - logging.logdebug_dict_differences(prev_inputs, - hashed_inputs) - cannot_rerun = (str2bool( - self.config['execution']['stop_on_first_rerun']) and not - (self.overwrite is None and self._interface.always_run)) - if cannot_rerun: - raise Exception(("Cannot rerun when 'stop_on_first_rerun' " - "is set to True")) - hashfile_unfinished = op.join(outdir, - '_0x%s_unfinished.json' % - hashvalue) - if op.exists(hashfile): - os.remove(hashfile) - rm_outdir = (op.exists(outdir) and not - (op.exists(hashfile_unfinished) and - self._interface.can_resume) and not - isinstance(self, MapNode)) - if rm_outdir: - logger.debug("Removing old %s and its contents", outdir) - try: - rmtree(outdir) - except OSError as ex: - outdircont = os.listdir(outdir) - if ((ex.errno == errno.ENOTEMPTY) and (len(outdircont) == 0)): - logger.warn( - 'An exception was raised trying to remove old %s, but the path ' - 'seems empty. Is it an NFS mount?. Passing the exception.', outdir) - elif ((ex.errno == errno.ENOTEMPTY) and (len(outdircont) != 0)): - logger.debug( - 'Folder contents (%d items): %s', len(outdircont), outdircont) - raise ex - else: - raise ex + force_run = self.overwrite or (self.overwrite is None and self._interface.always_run) + + # If the node is cached, check on pklz files and finish + if hash_exists and (updatehash or not force_run): + logger.debug("Only updating node hashes or skipping execution") + inputs_file = op.join(outdir, '_inputs.pklz') + if not op.exists(inputs_file): + logger.debug('Creating inputs file %s', inputs_file) + savepkl(inputs_file, self.inputs.get_traitsfree()) + + node_file = op.join(outdir, '_node.pklz') + if not op.exists(node_file): + logger.debug('Creating node file %s', node_file) + savepkl(node_file, self) + + result = self._run_interface(execute=False, updatehash=updatehash) + logger.info('[Node] "%s" found cached%s.', self.fullname, + ' (and hash updated)' * updatehash) + return result - else: - logger.debug( - "%s found and can_resume is True or Node is a MapNode - resuming execution", - hashfile_unfinished) - if isinstance(self, MapNode): - # remove old json files - for filename in glob(op.join(outdir, '_0x*.json')): - os.unlink(filename) - outdir = make_output_dir(outdir) - self._save_hashfile(hashfile_unfinished, hashed_inputs) - self.write_report(report_type='preexec', cwd=outdir) - savepkl(op.join(outdir, '_node.pklz'), self) - savepkl(op.join(outdir, '_inputs.pklz'), - self.inputs.get_traitsfree()) - try: - self._run_interface() - except: - os.remove(hashfile_unfinished) - raise - shutil.move(hashfile_unfinished, hashfile) - self.write_report(report_type='postexec', cwd=outdir) + # by rerunning we mean only nodes that did finish to run previously + if hash_exists and not isinstance(self, MapNode): + logger.debug('[Node] Rerunning "%s"', self.fullname) + if not force_run and str2bool(self.config['execution']['stop_on_first_rerun']): + raise Exception('Cannot rerun when "stop_on_first_rerun" is set to True') + + # Remove hashfile if it exists at this point (re-running) + if op.exists(hashfile): + os.remove(hashfile) + + # Hashfile while running + hashfile_unfinished = op.join( + outdir, '_0x%s_unfinished.json' % hashvalue) + + # Delete directory contents if this is not a MapNode or can't resume + rm_outdir = not isinstance(self, MapNode) and not ( + self._interface.can_resume and op.isfile(hashfile_unfinished)) + if rm_outdir: + emptydirs(outdir, noexist_ok=True) else: - if not op.exists(op.join(outdir, '_inputs.pklz')): - logger.debug('%s: creating inputs file', self.name) - savepkl(op.join(outdir, '_inputs.pklz'), - self.inputs.get_traitsfree()) - if not op.exists(op.join(outdir, '_node.pklz')): - logger.debug('%s: creating node file', self.name) - savepkl(op.join(outdir, '_node.pklz'), self) - logger.debug("Hashfile exists. Skipping execution") - self._run_interface(execute=False, updatehash=updatehash) - logger.debug('Finished running %s in dir: %s\n', self._id, outdir) - return self._result - - # Private functions - def _parameterization_dir(self, param): - """ - Returns the directory name for the given parameterization string as follows: - - If the parameterization is longer than 32 characters, then - return the SHA-1 hex digest. - - Otherwise, return the parameterization unchanged. - """ - if len(param) > 32: - return sha1(param.encode()).hexdigest() - else: - return param + logger.debug('[%sNode] Resume - hashfile=%s', + 'Map' * int(isinstance(self, MapNode)), + hashfile_unfinished) + if isinstance(self, MapNode): + # remove old json files + for filename in glob(op.join(outdir, '_0x*.json')): + os.remove(filename) + + # Make sure outdir is created + makedirs(outdir, exist_ok=True) + + # Store runtime-hashfile, pre-execution report, the node and the inputs set. + _save_hashfile(hashfile_unfinished, hashed_inputs) + write_report(self, report_type='preexec', + is_mapnode=isinstance(self, MapNode)) + savepkl(op.join(outdir, '_node.pklz'), self) + savepkl(op.join(outdir, '_inputs.pklz'), + self.inputs.get_traitsfree()) + + try: + cwd = os.getcwd() + except OSError: + # Changing back to cwd is probably not necessary + # but this makes sure there's somewhere to change to. + cwd = op.split(outdir)[0] + logger.warning('Current folder "%s" does not exist, changing to "%s" instead.', + os.getenv('PWD', 'unknown'), cwd) + + os.chdir(outdir) + try: + result = self._run_interface(execute=True) + except Exception: + logger.warning('[Node] Error on "%s" (%s)', self.fullname, outdir) + # Tear-up after error + os.remove(hashfile_unfinished) + raise + finally: # Ensure we come back to the original CWD + os.chdir(cwd) + + # Tear-up after success + shutil.move(hashfile_unfinished, hashfile) + write_report(self, report_type='postexec', + is_mapnode=isinstance(self, MapNode)) + logger.info('[Node] Finished "%s".', self.fullname) + return result def _get_hashval(self): """Return a hash of the input state""" - if not self._got_inputs: - self._get_inputs() - self._got_inputs = True + self._get_inputs() hashed_inputs, hashvalue = self.inputs.get_hashval( hash_method=self.config['execution']['hash_method']) rm_extra = self.config['execution']['remove_unnecessary_outputs'] @@ -418,30 +471,15 @@ def _get_hashval(self): hashed_inputs.append(('needed_outputs', sorted_outputs)) return hashed_inputs, hashvalue - def _save_hashfile(self, hashfile, hashed_inputs): - try: - save_json(hashfile, hashed_inputs) - except (IOError, TypeError): - err_type = sys.exc_info()[0] - if err_type is TypeError: - # XXX - SG current workaround is to just - # create the hashed file and not put anything - # in it - with open(hashfile, 'wt') as fd: - fd.writelines(str(hashed_inputs)) - - logger.debug( - 'Unable to write a particular type to the json file') - else: - logger.critical('Unable to open the file in write mode: %s', - hashfile) - def _get_inputs(self): """Retrieve inputs from pointers to results file This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.) """ + if self._got_inputs: + return + logger.debug('Setting node inputs') for key, info in list(self.input_source.items()): logger.debug('input: %s', key) @@ -453,9 +491,8 @@ def _get_inputs(self): output_name = info[1][0] value = getattr(results.outputs, output_name) if isdefined(value): - output_value = evaluate_connect_function(info[1][1], - info[1][2], - value) + output_value = evaluate_connect_function( + info[1][1], info[1][2], value) else: output_name = info[1] try: @@ -474,88 +511,17 @@ def _get_inputs(self): e.args = (e.args[0] + "\n" + '\n'.join(msg),) raise + # Successfully set inputs + self._got_inputs = True + def _run_interface(self, execute=True, updatehash=False): if updatehash: - return - old_cwd = os.getcwd() - os.chdir(self.output_dir()) - self._result = self._run_command(execute) - os.chdir(old_cwd) - - def _save_results(self, result, cwd): - resultsfile = op.join(cwd, 'result_%s.pklz' % self.name) - if result.outputs: - try: - outputs = result.outputs.get() - except TypeError: - outputs = result.outputs.dictcopy() # outputs was a bunch - result.outputs.set(**modify_paths(outputs, relative=True, - basedir=cwd)) + return self._load_results() + return self._run_command(execute) - savepkl(resultsfile, result) - logger.debug('saved results in %s', resultsfile) - - if result.outputs: - result.outputs.set(**outputs) - - def _load_resultfile(self, cwd): - """Load results if it exists in cwd - - Parameter - --------- - - cwd : working directory of node - - Returns - ------- - - result : InterfaceResult structure - aggregate : boolean indicating whether node should aggregate_outputs - attribute error : boolean indicating whether there was some mismatch in - versions of traits used to store result and hence node needs to - rerun - """ - aggregate = True - resultsoutputfile = op.join(cwd, 'result_%s.pklz' % self.name) - result = None - attribute_error = False - if op.exists(resultsoutputfile): - pkl_file = gzip.open(resultsoutputfile, 'rb') - try: - result = pickle.load(pkl_file) - except UnicodeDecodeError: - # Was this pickle created with Python 2.x? - pickle.load(pkl_file, fix_imports=True, encoding='utf-8') - logger.warn('Successfully loaded pickle in compatibility mode') - except (traits.TraitError, AttributeError, ImportError, - EOFError) as err: - if isinstance(err, (AttributeError, ImportError)): - attribute_error = True - logger.debug('attribute error: %s probably using ' - 'different trait pickled file', str(err)) - else: - logger.debug( - 'some file does not exist. hence trait cannot be set') - else: - if result.outputs: - try: - outputs = result.outputs.get() - except TypeError: - outputs = result.outputs.dictcopy() # outputs == Bunch - try: - result.outputs.set(**modify_paths(outputs, - relative=False, - basedir=cwd)) - except FileNotFoundError: - logger.debug('conversion to full path results in ' - 'non existent file') - aggregate = False - pkl_file.close() - logger.debug('Aggregate: %s', aggregate) - return result, aggregate, attribute_error - - def _load_results(self, cwd): - result, aggregate, attribute_error = self._load_resultfile(cwd) + def _load_results(self): + cwd = self.output_dir() + result, aggregate, attribute_error = _load_resultfile(cwd, self.name) # try aggregating first if aggregate: logger.debug('aggregating results') @@ -563,7 +529,7 @@ def _load_results(self, cwd): old_inputs = loadpkl(op.join(cwd, '_inputs.pklz')) self.inputs.trait_set(**old_inputs) if not isinstance(self, MapNode): - self._copyfiles_to_wd(cwd, True, linksonly=True) + self._copyfiles_to_wd(linksonly=True) aggouts = self._interface.aggregate_outputs( needed_outputs=self.needed_outputs) runtime = Bunch(cwd=cwd, @@ -575,177 +541,131 @@ def _load_results(self, cwd): runtime=runtime, inputs=self._interface.inputs.get_traitsfree(), outputs=aggouts) - self._save_results(result, cwd) + _save_resultfile(result, cwd, self.name) else: logger.debug('aggregating mapnode results') - self._run_interface() - result = self._result + result = self._run_interface() return result def _run_command(self, execute, copyfiles=True): - cwd = os.getcwd() - if execute and copyfiles: + + if not execute: + try: + result = self._load_results() + except (FileNotFoundError, AttributeError): + # if aggregation does not work, rerun the node + logger.info("[Node] Some of the outputs were not found: " + "rerunning node.") + copyfiles = False # OE: this was like this before, + execute = True # I'll keep them for safety + else: + logger.info( + "[Node] Cached - collecting precomputed outputs") + return result + + # Run command: either execute is true or load_results failed. + runtime = Bunch(returncode=1, + environ=dict(os.environ), + hostname=socket.gethostname()) + result = InterfaceResult( + interface=self._interface.__class__, + runtime=runtime, + inputs=self._interface.inputs.get_traitsfree()) + + outdir = self.output_dir() + if copyfiles: self._originputs = deepcopy(self._interface.inputs) - if execute: - runtime = Bunch(returncode=1, - environ=dict(os.environ), - hostname=socket.gethostname()) - result = InterfaceResult( - interface=self._interface.__class__, - runtime=runtime, - inputs=self._interface.inputs.get_traitsfree()) - self._result = result - logger.debug('Executing node') - if copyfiles: - self._copyfiles_to_wd(cwd, execute) - if issubclass(self._interface.__class__, CommandLine): - try: - cmd = self._interface.cmdline - except Exception as msg: - self._result.runtime.stderr = msg - raise - cmdfile = op.join(cwd, 'command.txt') - fd = open(cmdfile, 'wt') - fd.writelines(cmd + "\n") - fd.close() - logger.info('Running: %s' % cmd) + self._copyfiles_to_wd(execute=execute) + + message = '[Node] Running "%s" ("%s.%s")' + if issubclass(self._interface.__class__, CommandLine): try: - result = self._interface.run() + cmd = self._interface.cmdline except Exception as msg: - self._save_results(result, cwd) - self._result.runtime.stderr = msg + result.runtime.stderr = '%s\n\n%s' % ( + getattr(result.runtime, 'stderr', ''), msg) + _save_resultfile(result, outdir, self.name) raise + cmdfile = op.join(outdir, 'command.txt') + with open(cmdfile, 'wt') as fd: + print(cmd + "\n", file=fd) + message += ', a CommandLine Interface with command:\n%s' % cmd + logger.info(message, self.name, self._interface.__module__, + self._interface.__class__.__name__) + try: + result = self._interface.run() + except Exception as msg: + result.runtime.stderr = '%s\n\n%s' % ( + getattr(result.runtime, 'stderr', ''), msg) + _save_resultfile(result, outdir, self.name) + raise + + dirs2keep = None + if isinstance(self, MapNode): + dirs2keep = [op.join(outdir, 'mapflow')] + + result.outputs = clean_working_directory( + result.outputs, outdir, + self._interface.inputs, + self.needed_outputs, + self.config, + dirs2keep=dirs2keep + ) + _save_resultfile(result, outdir, self.name) - dirs2keep = None - if isinstance(self, MapNode): - dirs2keep = [op.join(cwd, 'mapflow')] - result.outputs = clean_working_directory(result.outputs, cwd, - self._interface.inputs, - self.needed_outputs, - self.config, - dirs2keep=dirs2keep) - self._save_results(result, cwd) - else: - logger.info("Collecting precomputed outputs") - try: - result = self._load_results(cwd) - except (FileNotFoundError, AttributeError): - # if aggregation does not work, rerun the node - logger.info(("Some of the outputs were not found: " - "rerunning node.")) - result = self._run_command(execute=True, copyfiles=False) return result - def _strip_temp(self, files, wd): - out = [] - for f in files: - if isinstance(f, list): - out.append(self._strip_temp(f, wd)) - else: - out.append(f.replace(op.join(wd, '_tempinput'), wd)) - return out - - def _copyfiles_to_wd(self, outdir, execute, linksonly=False): - """ copy files over and change the inputs""" - if hasattr(self._interface, '_get_filecopy_info'): - logger.debug('copying files to wd [execute=%s, linksonly=%s]', - str(execute), str(linksonly)) - if execute and linksonly: - olddir = outdir - outdir = op.join(outdir, '_tempinput') - os.makedirs(outdir) - for info in self._interface._get_filecopy_info(): - files = self.inputs.get().get(info['key']) - if not isdefined(files): - continue - if files: - infiles = filename_to_list(files) - if execute: - if linksonly: - if not info['copy']: - newfiles = copyfiles(infiles, - [outdir], - copy=info['copy'], - create_new=True) - else: - newfiles = fnames_presuffix(infiles, - newpath=outdir) - newfiles = self._strip_temp( - newfiles, - op.abspath(olddir).split(op.sep)[-1]) - else: - newfiles = copyfiles(infiles, - [outdir], - copy=info['copy'], - create_new=True) + def _copyfiles_to_wd(self, execute=True, linksonly=False): + """copy files over and change the inputs""" + if not hasattr(self._interface, '_get_filecopy_info'): + # Nothing to be done + return + + logger.debug('copying files to wd [execute=%s, linksonly=%s]', + execute, linksonly) + + outdir = self.output_dir() + if execute and linksonly: + olddir = outdir + outdir = op.join(outdir, '_tempinput') + makedirs(outdir, exist_ok=True) + + for info in self._interface._get_filecopy_info(): + files = self.inputs.get().get(info['key']) + if not isdefined(files) or not files: + continue + + infiles = filename_to_list(files) + if execute: + if linksonly: + if not info['copy']: + newfiles = copyfiles(infiles, + [outdir], + copy=info['copy'], + create_new=True) else: - newfiles = fnames_presuffix(infiles, newpath=outdir) - if not isinstance(files, list): - newfiles = list_to_filename(newfiles) - setattr(self.inputs, info['key'], newfiles) - if execute and linksonly: - rmtree(outdir) + newfiles = fnames_presuffix(infiles, + newpath=outdir) + newfiles = _strip_temp( + newfiles, + op.abspath(olddir).split(op.sep)[-1]) + else: + newfiles = copyfiles(infiles, + [outdir], + copy=info['copy'], + create_new=True) + else: + newfiles = fnames_presuffix(infiles, newpath=outdir) + if not isinstance(files, list): + newfiles = list_to_filename(newfiles) + setattr(self.inputs, info['key'], newfiles) + if execute and linksonly: + emptydirs(outdir, noexist_ok=True) def update(self, **opts): + """Update inputs""" self.inputs.update(**opts) - def write_report(self, report_type=None, cwd=None): - if not str2bool(self.config['execution']['create_report']): - return - report_dir = op.join(cwd, '_report') - report_file = op.join(report_dir, 'report.rst') - if not op.exists(report_dir): - os.makedirs(report_dir) - if report_type == 'preexec': - logger.debug('writing pre-exec report to %s', report_file) - fp = open(report_file, 'wt') - fp.writelines(write_rst_header('Node: %s' % get_print_name(self), - level=0)) - fp.writelines(write_rst_list(['Hierarchy : %s' % self.fullname, - 'Exec ID : %s' % self._id])) - fp.writelines(write_rst_header('Original Inputs', level=1)) - fp.writelines(write_rst_dict(self.inputs.get())) - if report_type == 'postexec': - logger.debug('writing post-exec report to %s', report_file) - fp = open(report_file, 'at') - fp.writelines(write_rst_header('Execution Inputs', level=1)) - fp.writelines(write_rst_dict(self.inputs.get())) - exit_now = (not hasattr(self.result, 'outputs') or - self.result.outputs is None) - if exit_now: - return - fp.writelines(write_rst_header('Execution Outputs', level=1)) - if isinstance(self.result.outputs, Bunch): - fp.writelines(write_rst_dict(self.result.outputs.dictcopy())) - elif self.result.outputs: - fp.writelines(write_rst_dict(self.result.outputs.get())) - if isinstance(self, MapNode): - fp.close() - return - fp.writelines(write_rst_header('Runtime info', level=1)) - # Init rst dictionary of runtime stats - rst_dict = {'hostname' : self.result.runtime.hostname, - 'duration' : self.result.runtime.duration} - # Try and insert memory/threads usage if available - if runtime_profile: - try: - rst_dict['runtime_memory_gb'] = self.result.runtime.runtime_memory_gb - rst_dict['runtime_threads'] = self.result.runtime.runtime_threads - except AttributeError: - logger.info('Runtime memory and threads stats unavailable') - if hasattr(self.result.runtime, 'cmdline'): - rst_dict['command'] = self.result.runtime.cmdline - fp.writelines(write_rst_dict(rst_dict)) - else: - fp.writelines(write_rst_dict(rst_dict)) - if hasattr(self.result.runtime, 'merged'): - fp.writelines(write_rst_header('Terminal output', level=2)) - fp.writelines(write_rst_list(self.result.runtime.merged)) - if hasattr(self.result.runtime, 'environ'): - fp.writelines(write_rst_header('Environment', level=2)) - fp.writelines(write_rst_dict(self.result.runtime.environ)) - fp.close() - class JoinNode(Node): """Wraps interface objects that join inputs into a list. @@ -797,7 +717,8 @@ def __init__(self, interface, name, joinsource, joinfield=None, """ super(JoinNode, self).__init__(interface, name, **kwargs) - self.joinsource = joinsource + self._joinsource = None # The member should be defined + self.joinsource = joinsource # Let the setter do the job """the join predecessor iterable node""" if not joinfield: @@ -852,7 +773,7 @@ def _add_join_item_fields(self): ... name='inputspec'), >>> join = JoinNode(IdentityInterface(fields=['images', 'mask']), ... joinsource='inputspec', joinfield='images', name='join') - >>> join._add_join_item_fields() # doctest: +ALLOW_UNICODE + >>> join._add_join_item_fields() {'images': 'imagesJ1'} Return the {base field: slot field} dictionary @@ -872,7 +793,7 @@ def _add_join_item_field(self, field, index): Return the new field name """ # the new field name - name = self._join_item_field_name(field, index) + name = "%sJ%d" % (field, index + 1) # make a copy of the join trait trait = self._inputs.trait(field, False, True) # add the join item trait to the override traits @@ -880,10 +801,6 @@ def _add_join_item_field(self, field, index): return name - def _join_item_field_name(self, field, index): - """Return the field suffixed by the index + 1""" - return "%sJ%d" % (field, index + 1) - def _override_join_traits(self, basetraits, fields): """Convert the given join fields to accept an input that is a list item rather than a list. Non-join fields @@ -932,7 +849,8 @@ def _collate_join_field_inputs(self): try: setattr(self._interface.inputs, field, val) except Exception as e: - raise ValueError(">>JN %s %s %s %s %s: %s" % (self, field, val, self.inputs.copyable_trait_names(), self.joinfield, e)) + raise ValueError(">>JN %s %s %s %s %s: %s" % ( + self, field, val, self.inputs.copyable_trait_names(), self.joinfield, e)) elif hasattr(self._interface.inputs, field): # copy the non-join field val = getattr(self._inputs, field) @@ -958,13 +876,14 @@ def _collate_input_value(self, field): basetrait = self._interface.inputs.trait(field) if isinstance(basetrait.trait_type, traits.Set): return set(val) - elif self._unique: + + if self._unique: return list(OrderedDict.fromkeys(val)) - else: - return val + + return val def _slot_value(self, field, index): - slot_field = self._join_item_field_name(field, index) + slot_field = "%sJ%d" % (field, index + 1) try: return getattr(self._inputs, slot_field) except AttributeError as e: @@ -1004,10 +923,13 @@ def __init__(self, interface, iterfield, name, serial=False, nested=False, **kwa name : alphanumeric string node specific name serial : boolean - flag to enforce executing the jobs of the mapnode in a serial manner rather than parallel - nested : boolea - support for nested lists, if set the input list will be flattened before running, and the - nested list structure of the outputs will be resored + flag to enforce executing the jobs of the mapnode in a serial + manner rather than parallel + nested : boolean + support for nested lists. If set, the input list will be flattened + before running and the nested list structure of the outputs will + be resored. + See Node docstring for additional keyword arguments. """ @@ -1045,15 +967,15 @@ def _create_dynamic_traits(self, basetraits, fields=None, nitems=None): return output def set_input(self, parameter, val): - """ Set interface input value or nodewrapper attribute - + """ + Set interface input value or nodewrapper attribute Priority goes to interface. """ logger.debug('setting nodelevel(%s) input %s = %s', to_str(self), parameter, to_str(val)) - self._set_mapnode_input(self.inputs, parameter, deepcopy(val)) + self._set_mapnode_input(parameter, deepcopy(val)) - def _set_mapnode_input(self, object, name, newvalue): + def _set_mapnode_input(self, name, newvalue): logger.debug('setting mapnode(%s) input: %s -> %s', to_str(self), name, to_str(newvalue)) if name in self.iterfield: @@ -1062,10 +984,8 @@ def _set_mapnode_input(self, object, name, newvalue): setattr(self._interface.inputs, name, newvalue) def _get_hashval(self): - """ Compute hash including iterfield lists.""" - if not self._got_inputs: - self._get_inputs() - self._got_inputs = True + """Compute hash including iterfield lists.""" + self._get_inputs() self._check_iterfield() hashinputs = deepcopy(self._interface.inputs) for name in self.iterfield: @@ -1100,8 +1020,6 @@ def inputs(self): def outputs(self): if self._interface._outputs(): return Bunch(self._interface._outputs().get()) - else: - return None def _make_nodes(self, cwd=None): if cwd is None: @@ -1111,18 +1029,19 @@ def _make_nodes(self, cwd=None): else: nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) for i in range(nitems): - nodename = '_' + self.name + str(i) + nodename = '_%s%d' % (self.name, i) node = Node(deepcopy(self._interface), - n_procs=self._interface.num_threads, - mem_gb=self._interface.estimated_memory_gb, + n_procs=self._n_procs, + mem_gb=self._mem_gb, overwrite=self.overwrite, needed_outputs=self.needed_outputs, run_without_submitting=self.run_without_submitting, base_dir=op.join(cwd, 'mapflow'), name=nodename) node.plugin_args = self.plugin_args - node._interface.inputs.trait_set( + node.interface.inputs.trait_set( **deepcopy(self._interface.inputs.get())) + node.interface.resource_monitor = self._interface.resource_monitor for field in self.iterfield: if self.nested: fieldvals = flatten(filename_to_list(getattr(self.inputs, field))) @@ -1133,35 +1052,23 @@ def _make_nodes(self, cwd=None): node.config = self.config yield i, node - def _node_runner(self, nodes, updatehash=False): - old_cwd = os.getcwd() - for i, node in nodes: - err = None - try: - node.run(updatehash=updatehash) - except Exception as this_err: - err = this_err - if str2bool(self.config['execution']['stop_on_first_crash']): - raise - finally: - os.chdir(old_cwd) - yield i, node, err - def _collate_results(self, nodes): - self._result = InterfaceResult(interface=[], runtime=[], - provenance=[], inputs=[], - outputs=self.outputs) + finalresult = InterfaceResult( + interface=[], runtime=[], provenance=[], inputs=[], + outputs=self.outputs) returncode = [] - for i, node, err in nodes: - self._result.runtime.insert(i, None) - if node.result: - if hasattr(node.result, 'runtime'): - self._result.interface.insert(i, node.result.interface) - self._result.inputs.insert(i, node.result.inputs) - self._result.runtime[i] = node.result.runtime - if hasattr(node.result, 'provenance'): - self._result.provenance.insert(i, node.result.provenance) + for i, nresult, err in nodes: + finalresult.runtime.insert(i, None) returncode.insert(i, err) + + if nresult: + if hasattr(nresult, 'runtime'): + finalresult.interface.insert(i, nresult.interface) + finalresult.inputs.insert(i, nresult.inputs) + finalresult.runtime[i] = nresult.runtime + if hasattr(nresult, 'provenance'): + finalresult.provenance.insert(i, nresult.provenance) + if self.outputs: for key, _ in list(self.outputs.items()): rm_extra = (self.config['execution'] @@ -1169,78 +1076,52 @@ def _collate_results(self, nodes): if str2bool(rm_extra) and self.needed_outputs: if key not in self.needed_outputs: continue - values = getattr(self._result.outputs, key) + values = getattr(finalresult.outputs, key) if not isdefined(values): values = [] - if node.result.outputs: - values.insert(i, node.result.outputs.get()[key]) + if nresult and nresult.outputs: + values.insert(i, nresult.outputs.get()[key]) else: values.insert(i, None) defined_vals = [isdefined(val) for val in values] - if any(defined_vals) and self._result.outputs: - setattr(self._result.outputs, key, values) + if any(defined_vals) and finalresult.outputs: + setattr(finalresult.outputs, key, values) if self.nested: for key, _ in list(self.outputs.items()): - values = getattr(self._result.outputs, key) + values = getattr(finalresult.outputs, key) if isdefined(values): - values = unflatten(values, filename_to_list(getattr(self.inputs, self.iterfield[0]))) - setattr(self._result.outputs, key, values) + values = unflatten(values, filename_to_list( + getattr(self.inputs, self.iterfield[0]))) + setattr(finalresult.outputs, key, values) if returncode and any([code is not None for code in returncode]): msg = [] for i, code in enumerate(returncode): if code is not None: msg += ['Subnode %d failed' % i] - msg += ['Error:', str(code)] + msg += ['Error: %s' % str(code)] raise Exception('Subnodes of node: %s failed:\n%s' % (self.name, '\n'.join(msg))) - def write_report(self, report_type=None, cwd=None): - if not str2bool(self.config['execution']['create_report']): - return - if report_type == 'preexec': - super(MapNode, self).write_report(report_type=report_type, cwd=cwd) - if report_type == 'postexec': - super(MapNode, self).write_report(report_type=report_type, cwd=cwd) - report_dir = op.join(cwd, '_report') - report_file = op.join(report_dir, 'report.rst') - fp = open(report_file, 'at') - fp.writelines(write_rst_header('Subnode reports', level=1)) - nitems = len(filename_to_list( - getattr(self.inputs, self.iterfield[0]))) - subnode_report_files = [] - for i in range(nitems): - nodename = '_' + self.name + str(i) - subnode_report_files.insert(i, 'subnode %d' % i + ' : ' + - op.join(cwd, - 'mapflow', - nodename, - '_report', - 'report.rst')) - fp.writelines(write_rst_list(subnode_report_files)) - fp.close() + return finalresult def get_subnodes(self): - if not self._got_inputs: - self._get_inputs() - self._got_inputs = True + """Generate subnodes of a mapnode and write pre-execution report""" + self._get_inputs() self._check_iterfield() - self.write_report(report_type='preexec', cwd=self.output_dir()) + write_report(self, report_type='preexec', is_mapnode=True) return [node for _, node in self._make_nodes()] def num_subnodes(self): - if not self._got_inputs: - self._get_inputs() - self._got_inputs = True + """Get the number of subnodes to iterate in this MapNode""" + self._get_inputs() self._check_iterfield() if self._serial: return 1 - else: - if self.nested: - return len(filename_to_list(flatten(getattr(self.inputs, self.iterfield[0])))) - else: - return len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) + if self.nested: + return len(filename_to_list(flatten(getattr(self.inputs, self.iterfield[0])))) + return len(filename_to_list(getattr(self.inputs, self.iterfield[0]))) def _get_inputs(self): old_inputs = self._inputs.get() @@ -1275,29 +1156,37 @@ def _run_interface(self, execute=True, updatehash=False): This is primarily intended for serial execution of mapnode. A parallel execution requires creation of new nodes that can be spawned """ - old_cwd = os.getcwd() - cwd = self.output_dir() - os.chdir(cwd) self._check_iterfield() - if execute: - if self.nested: - nitems = len(filename_to_list(flatten(getattr(self.inputs, - self.iterfield[0])))) - else: - nitems = len(filename_to_list(getattr(self.inputs, - self.iterfield[0]))) - nodenames = ['_' + self.name + str(i) for i in range(nitems)] - self._collate_results(self._node_runner(self._make_nodes(cwd), - updatehash=updatehash)) - self._save_results(self._result, cwd) - # remove any node directories no longer required - dirs2remove = [] - for path in glob(op.join(cwd, 'mapflow', '*')): - if op.isdir(path): - if path.split(op.sep)[-1] not in nodenames: - dirs2remove.append(path) - for path in dirs2remove: - shutil.rmtree(path) + cwd = self.output_dir() + if not execute: + return self._load_results() + + # Set up mapnode folder names + if self.nested: + nitems = len(filename_to_list(flatten(getattr(self.inputs, + self.iterfield[0])))) else: - self._result = self._load_results(cwd) - os.chdir(old_cwd) + nitems = len(filename_to_list(getattr(self.inputs, + self.iterfield[0]))) + nnametpl = '_%s{}' % self.name + nodenames = [nnametpl.format(i) for i in range(nitems)] + + # Run mapnode + result = self._collate_results(_node_runner( + self._make_nodes(cwd), + updatehash=updatehash, + stop_first=str2bool(self.config['execution']['stop_on_first_crash']) + )) + # And store results + _save_resultfile(result, cwd, self.name) + # remove any node directories no longer required + dirs2remove = [] + for path in glob(op.join(cwd, 'mapflow', '*')): + if op.isdir(path): + if path.split(op.sep)[-1] not in nodenames: + dirs2remove.append(path) + for path in dirs2remove: + logger.debug('[MapNode] Removing folder "%s".' , path) + shutil.rmtree(path) + + return result diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index e2624d03c8..034174758a 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -316,7 +316,7 @@ def test_disconnect(): flow1 = pe.Workflow(name='test') flow1.connect(a, 'a', b, 'a') flow1.disconnect(a, 'a', b, 'a') - assert flow1._graph.edges() == [] + assert list(flow1._graph.edges()) == [] def test_doubleconnect(): @@ -456,8 +456,26 @@ def test_mapnode_iterfield_check(): with pytest.raises(ValueError): mod1._check_iterfield() +@pytest.mark.parametrize("x_inp, f_exp", [ + (3, [6]), ([2, 3], [4, 6]), ((2, 3), [4, 6]), + (range(3), [0, 2, 4]), + ("Str", ["StrStr"]), (["Str1", "Str2"], ["Str1Str1", "Str2Str2"]) + ]) +def test_mapnode_iterfield_type(x_inp, f_exp): + from nipype import MapNode, Function + def double_func(x): + return 2 * x + double = Function(["x"], ["f_x"], double_func) + + double_node = MapNode(double, name="double", iterfield=["x"]) + double_node.inputs.x = x_inp + + res = double_node.run() + assert res.outputs.f_x == f_exp + + def test_mapnode_nested(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() from nipype import MapNode, Function def func1(in1): @@ -470,7 +488,6 @@ def func1(in1): name='n1') n1.inputs.in1 = [[1, [2]], 3, [4, 5]] n1.run() - print(n1.get_output('out')) assert n1.get_output('out') == [[2, [3]], 4, [5, 6]] n2 = MapNode(Function(input_names=['in1'], @@ -487,7 +504,7 @@ def func1(in1): def test_mapnode_expansion(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() from nipype import MapNode, Function def func1(in1): @@ -495,23 +512,22 @@ def func1(in1): mapnode = MapNode(Function(function=func1), iterfield='in1', - name='mapnode') + name='mapnode', + n_procs=2, + mem_gb=2) mapnode.inputs.in1 = [1, 2] - mapnode.interface.num_threads = 2 - mapnode.interface.estimated_memory_gb = 2 for idx, node in mapnode._make_nodes(): for attr in ('overwrite', 'run_without_submitting', 'plugin_args'): assert getattr(node, attr) == getattr(mapnode, attr) - for attr in ('num_threads', 'estimated_memory_gb'): - assert (getattr(node._interface, attr) == - getattr(mapnode._interface, attr)) + for attr in ('_n_procs', '_mem_gb'): + assert (getattr(node, attr) == + getattr(mapnode, attr)) def test_node_hash(tmpdir): - wd = str(tmpdir) - os.chdir(wd) from nipype.interfaces.utility import Function + tmpdir.chdir() def func1(): return 1 @@ -530,13 +546,13 @@ def func2(a): modify = lambda x: x + 1 n1.inputs.a = 1 w1.connect(n1, ('a', modify), n2, 'a') - w1.base_dir = wd + w1.base_dir = os.getcwd() # generate outputs w1.run(plugin='Linear') # ensure plugin is being called w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'false', - 'crashdump_dir': wd} + 'crashdump_dir': os.getcwd()} # create dummy distributed plugin class from nipype.pipeline.plugins.base import DistributedPluginBase @@ -558,14 +574,14 @@ def _submit_job(self, node, updatehash=False): # set local check w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', - 'crashdump_dir': wd} + 'crashdump_dir': os.getcwd()} w1.run(plugin=RaiseError()) def test_old_config(tmpdir): - wd = str(tmpdir) - os.chdir(wd) + tmpdir.chdir() + wd = os.getcwd() from nipype.interfaces.utility import Function def func1(): @@ -596,8 +612,8 @@ def func2(a): def test_mapnode_json(tmpdir): """Tests that mapnodes don't generate excess jsons """ - wd = str(tmpdir) - os.chdir(wd) + tmpdir.chdir() + wd = os.getcwd() from nipype import MapNode, Function, Workflow def func1(in1): @@ -619,7 +635,7 @@ def func1(in1): n1.inputs.in1 = [1] eg = w1.run() - node = eg.nodes()[0] + node = list(eg.nodes())[0] outjson = glob(os.path.join(node.output_dir(), '_0x*.json')) assert len(outjson) == 1 @@ -644,7 +660,7 @@ def test_parameterize_dirs_false(tmpdir): n2 = pe.Node(IdentityInterface(fields='in1'), name='Node2') wf = pe.Workflow(name='Test') - wf.base_dir = str(tmpdir) + wf.base_dir = tmpdir.strpath wf.config['execution']['parameterize_dirs'] = False wf.connect([(n1, n2, [('output1', 'in1')])]) @@ -653,8 +669,8 @@ def test_parameterize_dirs_false(tmpdir): def test_serial_input(tmpdir): - wd = str(tmpdir) - os.chdir(wd) + tmpdir.chdir() + wd = os.getcwd() from nipype import MapNode, Function, Workflow def func1(in1): @@ -690,7 +706,7 @@ def func1(in1): def test_write_graph_runs(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): for simple in (True, False): @@ -718,7 +734,7 @@ def test_write_graph_runs(tmpdir): def test_deep_nested_write_graph_runs(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): for simple in (True, False): diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 87dafeee0f..a77745eb03 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -149,7 +149,7 @@ def _list_outputs(self): def test_join_expansion(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -196,7 +196,7 @@ def test_join_expansion(tmpdir): def test_node_joinsource(tmpdir): """Test setting the joinsource to a Node.""" - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -214,7 +214,7 @@ def test_node_joinsource(tmpdir): def test_set_join_node(tmpdir): """Test collecting join inputs to a set.""" - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -240,7 +240,7 @@ def test_unique_join_node(tmpdir): """Test join with the ``unique`` flag set to True.""" global _sum_operands _sum_operands = [] - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -265,7 +265,7 @@ def test_multiple_join_nodes(tmpdir): """Test two join nodes, one downstream of the other.""" global _products _products = [] - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -318,7 +318,7 @@ def test_identity_join_node(tmpdir): """Test an IdentityInterface join.""" global _sum_operands _sum_operands = [] - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -353,7 +353,7 @@ def test_multifield_join_node(tmpdir): """Test join on several fields.""" global _products _products = [] - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -393,7 +393,7 @@ def test_synchronize_join_node(tmpdir): """Test join on an input node which has the ``synchronize`` flag set to True.""" global _products _products = [] - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -430,7 +430,7 @@ def test_synchronize_join_node(tmpdir): def test_itersource_join_source_node(tmpdir): """Test join on an input node which has an ``itersource``.""" - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -484,7 +484,7 @@ def test_itersource_join_source_node(tmpdir): def test_itersource_two_join_nodes(tmpdir): """Test join with a midstream ``itersource`` and an upstream iterable.""" - os.chdir(str(tmpdir)) + tmpdir.chdir() # Make the workflow. wf = pe.Workflow(name='test') @@ -524,8 +524,7 @@ def test_itersource_two_join_nodes(tmpdir): def test_set_join_node_file_input(tmpdir): """Test collecting join inputs to a set.""" - wd = str(tmpdir) - os.chdir(wd) + tmpdir.chdir() open('test.nii', 'w+').close() open('test2.nii', 'w+').close() @@ -533,7 +532,7 @@ def test_set_join_node_file_input(tmpdir): wf = pe.Workflow(name='test') # the iterated input node inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') - inputspec.iterables = [('n', [os.path.join(wd, 'test.nii'), os.path.join(wd, 'test2.nii')])] + inputspec.iterables = [('n', [tmpdir.join('test.nii').strpath, tmpdir.join('test2.nii').strpath])] # a pre-join node in the iterated path pre_join1 = pe.Node(IdentityInterface(fields=['n']), name='pre_join1') wf.connect(inputspec, 'n', pre_join1, 'n') @@ -547,8 +546,7 @@ def test_set_join_node_file_input(tmpdir): def test_nested_workflow_join(tmpdir): """Test collecting join inputs within a nested workflow""" - wd = str(tmpdir) - os.chdir(wd) + tmpdir.chdir() # Make the nested workflow def nested_wf(i, name='smallwf'): diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py index 7aa20b9302..23c7a16fc6 100644 --- a/nipype/pipeline/engine/tests/test_utils.py +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -18,27 +18,28 @@ from ..utils import merge_dict, clean_working_directory, write_workflow_prov -def test_identitynode_removal(): +def test_identitynode_removal(tmpdir): def test_function(arg1, arg2, arg3): import numpy as np return (np.array(arg1) + arg2 + arg3).tolist() - wf = pe.Workflow(name="testidentity") - n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='src') + wf = pe.Workflow(name="testidentity", base_dir=tmpdir.strpath) + + n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='src', base_dir=tmpdir.strpath) n1.iterables = ('b', [0, 1, 2, 3]) n1.inputs.a = [0, 1, 2, 3] - n2 = pe.Node(niu.Select(), name='selector') + n2 = pe.Node(niu.Select(), name='selector', base_dir=tmpdir.strpath) wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist') wf.connect(n1, 'b', n2, 'index') - n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='passer') + n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='passer', base_dir=tmpdir.strpath) n3.inputs.c = [1, 2, 3, 4] wf.connect(n2, 'out', n3, 'd') - n4 = pe.Node(niu.Select(), name='selector2') + n4 = pe.Node(niu.Select(), name='selector2', base_dir=tmpdir.strpath) wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist') wf.connect(n3, 'd', n4, 'index') @@ -58,15 +59,13 @@ class InputSpec(nib.TraitedSpec): outputs = OutputSpec() inputs = InputSpec() - wd = str(tmpdir) filenames = ['file.hdr', 'file.img', 'file.BRIK', 'file.HEAD', '_0x1234.json', 'foo.txt'] outfiles = [] for filename in filenames: - outfile = os.path.join(wd, filename) - with open(outfile, 'wt') as fp: - fp.writelines('dummy') - outfiles.append(outfile) + outfile = tmpdir.join(filename) + outfile.write('dummy') + outfiles.append(outfile.strpath) outputs.files = outfiles[:4:2] outputs.others = outfiles[5] inputs.infile = outfiles[-1] @@ -75,12 +74,12 @@ class InputSpec(nib.TraitedSpec): assert os.path.exists(outfiles[5]) config.set_default_config() config.set('execution', 'remove_unnecessary_outputs', False) - out = clean_working_directory(outputs, wd, inputs, needed_outputs, + out = clean_working_directory(outputs, tmpdir.strpath, inputs, needed_outputs, deepcopy(config._sections)) assert os.path.exists(outfiles[5]) assert out.others == outfiles[5] config.set('execution', 'remove_unnecessary_outputs', True) - out = clean_working_directory(outputs, wd, inputs, needed_outputs, + out = clean_working_directory(outputs, tmpdir.strpath, inputs, needed_outputs, deepcopy(config._sections)) assert os.path.exists(outfiles[1]) assert os.path.exists(outfiles[3]) @@ -105,30 +104,21 @@ def test_function(arg1): fp.close() return file1, file2 - out_dir = str(tmpdir) n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['file1', 'file2'], function=test_function), - base_dir=out_dir, + base_dir=tmpdir.strpath, name='testoutputs') n1.inputs.arg1 = 1 n1.config = {'execution': {'remove_unnecessary_outputs': True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() - assert os.path.exists(os.path.join(out_dir, - n1.name, - 'file1.txt')) - assert os.path.exists(os.path.join(out_dir, - n1.name, - 'file2.txt')) + assert tmpdir.join(n1.name,'file1.txt').check() + assert tmpdir.join(n1.name,'file1.txt').check() n1.needed_outputs = ['file2'] n1.run() - assert not os.path.exists(os.path.join(out_dir, - n1.name, - 'file1.txt')) - assert os.path.exists(os.path.join(out_dir, - n1.name, - 'file2.txt')) + assert not tmpdir.join(n1.name,'file1.txt').check() + assert tmpdir.join(n1.name,'file2.txt').check() class InputSpec(nib.TraitedSpec): @@ -154,29 +144,22 @@ def _list_outputs(self): def test_inputs_removal(tmpdir): - out_dir = str(tmpdir) - file1 = os.path.join(out_dir, 'file1.txt') - fp = open(file1, 'wt') - fp.write('dummy_file') - fp.close() + file1 = tmpdir.join('file1.txt') + file1.write('dummy_file') n1 = pe.Node(UtilsTestInterface(), - base_dir=out_dir, + base_dir=tmpdir.strpath, name='testinputs') - n1.inputs.in_file = file1 + n1.inputs.in_file = file1.strpath n1.config = {'execution': {'keep_inputs': True}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.run() - assert os.path.exists(os.path.join(out_dir, - n1.name, - 'file1.txt')) - n1.inputs.in_file = file1 + assert tmpdir.join(n1.name,'file1.txt').check() + n1.inputs.in_file = file1.strpath n1.config = {'execution': {'keep_inputs': False}} n1.config = merge_dict(deepcopy(config._sections), n1.config) n1.overwrite = True n1.run() - assert not os.path.exists(os.path.join(out_dir, - n1.name, - 'file1.txt')) + assert not tmpdir.join(n1.name,'file1.txt').check() def test_outputs_removal_wf(tmpdir): @@ -210,27 +193,26 @@ def test_function3(arg): import os return arg - out_dir = str(tmpdir) for plugin in ('Linear',): # , 'MultiProc'): n1 = pe.Node(niu.Function(input_names=['arg1'], output_names=['out_file1', 'out_file2', 'dir'], function=test_function), - name='n1') + name='n1', base_dir=tmpdir.strpath) n1.inputs.arg1 = 1 n2 = pe.Node(niu.Function(input_names=['in_file', 'arg'], output_names=['out_file1', 'out_file2', 'n'], function=test_function2), - name='n2') + name='n2', base_dir=tmpdir.strpath) n2.inputs.arg = 2 n3 = pe.Node(niu.Function(input_names=['arg'], output_names=['n'], function=test_function3), - name='n3') + name='n3', base_dir=tmpdir.strpath) - wf = pe.Workflow(name="node_rem_test" + plugin, base_dir=out_dir) + wf = pe.Workflow(name="node_rem_test" + plugin, base_dir=tmpdir.strpath) wf.connect(n1, "out_file1", n2, "in_file") wf.run(plugin='Linear') @@ -271,7 +253,7 @@ def test_function3(arg): n2.name, 'file3.txt')) != remove_unnecessary_outputs - n4 = pe.Node(UtilsTestInterface(), name='n4') + n4 = pe.Node(UtilsTestInterface(), name='n4', base_dir=tmpdir.strpath) wf.connect(n2, "out_file1", n4, "in_file") def pick_first(l): @@ -320,20 +302,18 @@ def create_wf(name): def test_multi_disconnected_iterable(tmpdir): metawf = pe.Workflow(name='meta') - metawf.base_dir = str(tmpdir) + metawf.base_dir = tmpdir.strpath metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)]) eg = metawf.run(plugin='Linear') assert len(eg.nodes()) == 60 def test_provenance(tmpdir): - out_dir = str(tmpdir) metawf = pe.Workflow(name='meta') - metawf.base_dir = out_dir + metawf.base_dir = tmpdir.strpath metawf.add_nodes([create_wf('wf%d' % i) for i in range(1)]) eg = metawf.run(plugin='Linear') - prov_base = os.path.join(out_dir, - 'workflow_provenance_test') + prov_base = tmpdir.join('workflow_provenance_test').strpath psg = write_workflow_prov(eg, prov_base, format='all') assert len(psg.bundles) == 2 assert len(psg.get_records()) == 7 @@ -356,7 +336,7 @@ def test_mapnode_crash(tmpdir): node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] node.config = deepcopy(config._sections) node.config['execution']['stop_on_first_crash'] = True - node.base_dir = str(tmpdir) + node.base_dir = tmpdir.strpath with pytest.raises(TypeError): node.run() os.chdir(cwd) @@ -373,7 +353,7 @@ def test_mapnode_crash2(tmpdir): iterfield=['WRONG'], name='myfunc') node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] - node.base_dir = str(tmpdir) + node.base_dir = tmpdir.strpath with pytest.raises(Exception): node.run() @@ -384,6 +364,7 @@ def test_mapnode_crash2(tmpdir): reason="the famous segfault #1788") def test_mapnode_crash3(tmpdir): """Test mapnode crash when mapnode is embedded in a workflow""" + tmpdir.chdir() node = pe.MapNode(niu.Function(input_names=['WRONG'], output_names=['newstring'], function=dummy_func), @@ -392,6 +373,8 @@ def test_mapnode_crash3(tmpdir): node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] wf = pe.Workflow('testmapnodecrash') wf.add_nodes([node]) - wf.base_dir = str(tmpdir) + wf.base_dir = tmpdir.strpath + #changing crashdump dir to cwl (to avoid problems with read-only systems) + wf.config["execution"]["crashdump_dir"] = os.getcwd() with pytest.raises(RuntimeError): wf.run(plugin='Linear') diff --git a/nipype/pipeline/engine/tests/test_workflows.py b/nipype/pipeline/engine/tests/test_workflows.py new file mode 100644 index 0000000000..32b2fa3505 --- /dev/null +++ b/nipype/pipeline/engine/tests/test_workflows.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine workflows module +""" +import pytest + +from ... import engine as pe +from ....interfaces import utility as niu + + +def test_duplicate_node_check(): + + wf = pe.Workflow(name="testidentity") + + original_list = [0,1,2,3,4,5,6,7,8,9] + + selector1 = pe.Node(niu.Select(), name="selector1") + selector1.inputs.index = original_list[:-1] + selector1.inputs.inlist = original_list + selector2 = pe.Node(niu.Select(), name="selector2") + selector2.inputs.index = original_list[:-2] + selector3 = pe.Node(niu.Select(), name="selector3") + selector3.inputs.index = original_list[:-3] + selector4 = pe.Node(niu.Select(), name="selector3") + selector4.inputs.index = original_list[:-4] + + wf_connections = [ + (selector1, selector2, [("out","inlist")]), + (selector2, selector3, [("out","inlist")]), + (selector3, selector4, [("out","inlist")]), + ] + + with pytest.raises(IOError) as excinfo: + wf.connect(wf_connections) + assert 'Duplicate node name "selector3" found.' == str(excinfo.value) diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 25b12ab607..61937faac3 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -1,44 +1,52 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Utility routines for workflow graphs -""" +"""Utility routines for workflow graphs""" from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import str, open, map, next, zip, range +from builtins import str, open, next, zip, range +import os import sys -from future import standard_library -standard_library.install_aliases() +import pickle from collections import defaultdict - +import re from copy import deepcopy from glob import glob -try: - from inspect import signature -except ImportError: - from funcsigs import signature -import os -import re -import pickle -from functools import reduce -import numpy as np -from ...utils.misc import package_check +from traceback import format_exception +from hashlib import sha1 +import gzip -package_check('networkx', '1.3') +from functools import reduce +import numpy as np import networkx as nx +from future import standard_library -from ...utils.filemanip import (fname_presuffix, FileNotFoundError, to_str, - filename_to_list, get_related_files) -from ...utils.misc import create_function_from_source, str2bool -from ...interfaces.base import (CommandLine, isdefined, Undefined, - InterfaceResult) +from ... import logging, config, LooseVersion +from ...utils.filemanip import ( + relpath, makedirs, fname_presuffix, to_str, + filename_to_list, get_related_files, FileNotFoundError, + save_json, savepkl, + write_rst_header, write_rst_dict, write_rst_list, +) +from ...utils.misc import str2bool +from ...utils.functions import create_function_from_source +from ...interfaces.base import ( + Bunch, CommandLine, isdefined, Undefined, + InterfaceResult, traits) from ...interfaces.utility import IdentityInterface from ...utils.provenance import ProvStore, pm, nipype_ns, get_id -from ... import logging, config + +try: + from inspect import signature +except ImportError: + from funcsigs import signature + +standard_library.install_aliases() logger = logging.getLogger('workflow') +PY3 = sys.version_info[0] > 2 try: dfs_preorder = nx.dfs_preorder @@ -46,39 +54,265 @@ dfs_preorder = nx.dfs_preorder_nodes logger.debug('networkx 1.4 dev or higher detected') -try: - from os.path import relpath -except ImportError: - import os.path as op - - def relpath(path, start=None): - """Return a relative version of a path""" - if start is None: - start = os.curdir - if not path: - raise ValueError("no path specified") - start_list = op.abspath(start).split(op.sep) - path_list = op.abspath(path).split(op.sep) - if start_list[0].lower() != path_list[0].lower(): - unc_path, rest = op.splitunc(path) - unc_start, rest = op.splitunc(start) - if bool(unc_path) ^ bool(unc_start): - raise ValueError(("Cannot mix UNC and non-UNC paths " - "(%s and %s)") % (path, start)) - else: - raise ValueError("path is on drive %s, start on drive %s" - % (path_list[0], start_list[0])) - # Work out how much of the filepath is shared by start and path. - for i in range(min(len(start_list), len(path_list))): - if start_list[i].lower() != path_list[i].lower(): - break + +def _parameterization_dir(param): + """ + Returns the directory name for the given parameterization string as follows: + - If the parameterization is longer than 32 characters, then + return the SHA-1 hex digest. + - Otherwise, return the parameterization unchanged. + """ + if len(param) > 32: + return sha1(param.encode()).hexdigest() + return param + + +def save_hashfile(hashfile, hashed_inputs): + """Store a hashfile""" + try: + save_json(hashfile, hashed_inputs) + except (IOError, TypeError): + err_type = sys.exc_info()[0] + if err_type is TypeError: + # XXX - SG current workaround is to just + # create the hashed file and not put anything + # in it + with open(hashfile, 'wt') as fd: + fd.writelines(str(hashed_inputs)) + + logger.debug( + 'Unable to write a particular type to the json file') else: - i += 1 + logger.critical('Unable to open the file in write mode: %s', + hashfile) + + +def nodelist_runner(nodes, updatehash=False, stop_first=False): + """ + A generator that iterates and over a list of ``nodes`` and + executes them. + + """ + for i, node in nodes: + err = None + result = None + try: + result = node.run(updatehash=updatehash) + except Exception: + if stop_first: + raise + + result = node.result + err = [] + if result.runtime and hasattr(result.runtime, 'traceback'): + err = [result.runtime.traceback] + + err += format_exception(*sys.exc_info()) + err = '\n'.join(err) + finally: + yield i, result, err + + +def write_report(node, report_type=None, is_mapnode=False): + """Write a report file for a node""" + if not str2bool(node.config['execution']['create_report']): + return + + if report_type not in ['preexec', 'postexec']: + logger.warning('[Node] Unknown report type "%s".', report_type) + return + + cwd = node.output_dir() + report_dir = os.path.join(cwd, '_report') + report_file = os.path.join(report_dir, 'report.rst') + makedirs(report_dir, exist_ok=True) + + logger.debug('[Node] Writing %s-exec report to "%s"', + report_type[:-4], report_file) + if report_type.startswith('pre'): + lines = [ + write_rst_header('Node: %s' % get_print_name(node), level=0), + write_rst_list(['Hierarchy : %s' % node.fullname, + 'Exec ID : %s' % node._id]), + write_rst_header('Original Inputs', level=1), + write_rst_dict(node.inputs.get()), + ] + with open(report_file, 'wt') as fp: + fp.write('\n'.join(lines)) + return + + lines = [ + write_rst_header('Execution Inputs', level=1), + write_rst_dict(node.inputs.get()), + ] + + result = node.result # Locally cache result + outputs = result.outputs + + if outputs is None: + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + lines.append(write_rst_header('Execution Outputs', level=1)) + + if isinstance(outputs, Bunch): + lines.append(write_rst_dict(outputs.dictcopy())) + elif outputs: + lines.append(write_rst_dict(outputs.get())) + + if is_mapnode: + lines.append(write_rst_header('Subnode reports', level=1)) + nitems = len(filename_to_list( + getattr(node.inputs, node.iterfield[0]))) + subnode_report_files = [] + for i in range(nitems): + nodecwd = os.path.join( + cwd, 'mapflow', '_%s%d' % (node.name, i), + '_report', 'report.rst') + subnode_report_files.append( + 'subnode %d : %s' % (i, nodecwd)) + + lines.append(write_rst_list(subnode_report_files)) + + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + lines.append(write_rst_header('Runtime info', level=1)) + # Init rst dictionary of runtime stats + rst_dict = { + 'hostname': result.runtime.hostname, + 'duration': result.runtime.duration, + } + + if hasattr(result.runtime, 'cmdline'): + rst_dict['command'] = result.runtime.cmdline + + # Try and insert memory/threads usage if available + if hasattr(result.runtime, 'mem_peak_gb'): + rst_dict['mem_peak_gb'] = result.runtime.mem_peak_gb + + if hasattr(result.runtime, 'cpu_percent'): + rst_dict['cpu_percent'] = result.runtime.cpu_percent + + lines.append(write_rst_dict(rst_dict)) + + # Collect terminal output + if hasattr(result.runtime, 'merged'): + lines += [ + write_rst_header('Terminal output', level=2), + write_rst_list(result.runtime.merged), + ] + if hasattr(result.runtime, 'stdout'): + lines += [ + write_rst_header('Terminal - standard output', level=2), + write_rst_list(result.runtime.stdout), + ] + if hasattr(result.runtime, 'stderr'): + lines += [ + write_rst_header('Terminal - standard error', level=2), + write_rst_list(result.runtime.stderr), + ] + + # Store environment + if hasattr(result.runtime, 'environ'): + lines += [ + write_rst_header('Environment', level=2), + write_rst_dict(result.runtime.environ), + ] + + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + +def save_resultfile(result, cwd, name): + """Save a result pklz file to ``cwd``""" + resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) + if result.outputs: + try: + outputs = result.outputs.get() + except TypeError: + outputs = result.outputs.dictcopy() # outputs was a bunch + result.outputs.set(**modify_paths( + outputs, relative=True, basedir=cwd)) + + savepkl(resultsfile, result) + logger.debug('saved results in %s', resultsfile) + + if result.outputs: + result.outputs.set(**outputs) - rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:] - if not rel_list: - return os.curdir - return op.join(*rel_list) + +def load_resultfile(path, name): + """ + Load InterfaceResult file from path + + Parameter + --------- + + path : base_dir of node + name : name of node + + Returns + ------- + + result : InterfaceResult structure + aggregate : boolean indicating whether node should aggregate_outputs + attribute error : boolean indicating whether there was some mismatch in + versions of traits used to store result and hence node needs to + rerun + """ + aggregate = True + resultsoutputfile = os.path.join(path, 'result_%s.pklz' % name) + result = None + attribute_error = False + if os.path.exists(resultsoutputfile): + pkl_file = gzip.open(resultsoutputfile, 'rb') + try: + result = pickle.load(pkl_file) + except UnicodeDecodeError: + # Was this pickle created with Python 2.x? + pickle.load(pkl_file, fix_imports=True, encoding='utf-8') + logger.warning('Successfully loaded pickle in compatibility mode') + except (traits.TraitError, AttributeError, ImportError, + EOFError) as err: + if isinstance(err, (AttributeError, ImportError)): + attribute_error = True + logger.debug('attribute error: %s probably using ' + 'different trait pickled file', str(err)) + else: + logger.debug( + 'some file does not exist. hence trait cannot be set') + else: + if result.outputs: + try: + outputs = result.outputs.get() + except TypeError: + outputs = result.outputs.dictcopy() # outputs == Bunch + try: + result.outputs.set(**modify_paths(outputs, + relative=False, + basedir=path)) + except FileNotFoundError: + logger.debug('conversion to full path results in ' + 'non existent file') + aggregate = False + pkl_file.close() + logger.debug('Aggregate: %s', aggregate) + return result, aggregate, attribute_error + + +def strip_temp(files, wd): + """Remove temp from a list of file paths""" + out = [] + for f in files: + if isinstance(f, list): + out.append(strip_temp(f, wd)) + else: + out.append(f.replace(os.path.join(wd, '_tempinput'), wd)) + return out def _write_inputs(node): @@ -87,10 +321,10 @@ def _write_inputs(node): for key, _ in list(node.inputs.items()): val = getattr(node.inputs, key) if isdefined(val): - if type(val) == str: + if isinstance(val, (str, bytes)): try: func = create_function_from_source(val) - except RuntimeError as e: + except RuntimeError: lines.append("%s.inputs.%s = '%s'" % (nodename, key, val)) else: funcname = [name for name in func.__globals__ @@ -100,7 +334,7 @@ def _write_inputs(node): lines[-1] = lines[-1].replace(' %s(' % funcname, ' %s_1(' % funcname) funcname = '%s_1' % funcname - lines.append('from nipype.utils.misc import getsource') + lines.append('from nipype.utils.functions import getsource') lines.append("%s.inputs.%s = getsource(%s)" % (nodename, key, funcname)) @@ -115,18 +349,18 @@ def format_node(node, format='python', include_config=False): lines = [] name = node.fullname.replace('.', '_') if format == 'python': - klass = node._interface + klass = node.interface importline = 'from %s import %s' % (klass.__module__, klass.__class__.__name__) comment = '# Node: %s' % node.fullname - spec = signature(node._interface.__init__) + spec = signature(node.interface.__init__) args = [p.name for p in list(spec.parameters.values())] args = args[1:] if args: filled_args = [] for arg in args: - if hasattr(node._interface, '_%s' % arg): - filled_args.append('%s=%s' % (arg, getattr(node._interface, + if hasattr(node.interface, '_%s' % arg): + filled_args.append('%s=%s' % (arg, getattr(node.interface, '_%s' % arg))) args = ', '.join(filled_args) else: @@ -195,7 +429,7 @@ def modify_paths(object, relative=True, basedir=None): else: out = os.path.abspath(os.path.join(basedir, object)) if not os.path.exists(out): - raise FileNotFoundError('File %s not found' % out) + raise IOError('File %s not found' % out) else: out = object return out @@ -210,8 +444,8 @@ def get_print_name(node, simple_form=True): """ name = node.fullname if hasattr(node, '_interface'): - pkglist = node._interface.__class__.__module__.split('.') - interface = node._interface.__class__.__name__ + pkglist = node.interface.__class__.__module__.split('.') + interface = node.interface.__class__.__name__ destclass = '' if len(pkglist) > 2: destclass = '.%s' % pkglist[2] @@ -247,47 +481,44 @@ def _create_dot_graph(graph, show_connectinfo=False, simple_form=True): def _write_detailed_dot(graph, dotfilename): - """Create a dot file with connection info - - digraph structs { - node [shape=record]; - struct1 [label=" left| mid\ dle| right"]; - struct2 [label=" one| two"]; - struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"]; - struct1:f1 -> struct2:f0; - struct1:f0 -> struct2:f1; - struct1:f2 -> struct3:here; - } + r""" + Create a dot file with connection info :: + + digraph structs { + node [shape=record]; + struct1 [label=" left| middle| right"]; + struct2 [label=" one| two"]; + struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"]; + struct1:f1 -> struct2:f0; + struct1:f0 -> struct2:f1; + struct1:f2 -> struct3:here; + } """ text = ['digraph structs {', 'node [shape=record];'] # write nodes edges = [] - replacefunk = lambda x: x.replace('_', '').replace('.', ''). \ - replace('@', '').replace('-', '') for n in nx.topological_sort(graph): nodename = str(n) inports = [] - for u, v, d in graph.in_edges_iter(nbunch=n, data=True): + for u, v, d in graph.in_edges(nbunch=n, data=True): for cd in d['connect']: if isinstance(cd[0], (str, bytes)): outport = cd[0] else: outport = cd[0][0] inport = cd[1] - ipstrip = 'in' + replacefunk(inport) - opstrip = 'out' + replacefunk(outport) + ipstrip = 'in%s' % _replacefunk(inport) + opstrip = 'out%s' % _replacefunk(outport) edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''), opstrip, str(v).replace('.', ''), ipstrip)) if inport not in inports: inports.append(inport) - inputstr = '{IN' - for ip in sorted(inports): - inputstr += '| %s' % (replacefunk(ip), ip) - inputstr += '}' + inputstr = ['{IN'] + ['| %s' % (_replacefunk(ip), ip) + for ip in sorted(inports)] + ['}'] outports = [] - for u, v, d in graph.out_edges_iter(nbunch=n, data=True): + for u, v, d in graph.out_edges(nbunch=n, data=True): for cd in d['connect']: if isinstance(cd[0], (str, bytes)): outport = cd[0] @@ -295,13 +526,11 @@ def _write_detailed_dot(graph, dotfilename): outport = cd[0][0] if outport not in outports: outports.append(outport) - outputstr = '{OUT' - for op in sorted(outports): - outputstr += '| %s' % (replacefunk(op), op) - outputstr += '}' + outputstr = ['{OUT'] + ['| %s' % (_replacefunk(oport), oport) + for oport in sorted(outports)] + ['}'] srcpackage = '' if hasattr(n, '_interface'): - pkglist = n._interface.__class__.__module__.split('.') + pkglist = n.interface.__class__.__module__.split('.') if len(pkglist) > 2: srcpackage = pkglist[2] srchierarchy = '.'.join(nodename.split('.')[1:-1]) @@ -309,19 +538,23 @@ def _write_detailed_dot(graph, dotfilename): srcpackage, srchierarchy) text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''), - inputstr, + ''.join(inputstr), nodenamestr, - outputstr)] + ''.join(outputstr))] # write edges for edge in sorted(edges): text.append(edge) text.append('}') - filep = open(dotfilename, 'wt') - filep.write('\n'.join(text)) - filep.close() + with open(dotfilename, 'wt') as filep: + filep.write('\n'.join(text)) return text +def _replacefunk(x): + return x.replace('_', '').replace( + '.', '').replace('@', '').replace('-', '') + + # Graph manipulations for iterable expansion def _get_valid_pathstr(pathstr): """Remove disallowed characters from path @@ -340,8 +573,7 @@ def _get_valid_pathstr(pathstr): def expand_iterables(iterables, synchronize=False): if synchronize: return synchronize_iterables(iterables) - else: - return list(walk(list(iterables.items()))) + return list(walk(list(iterables.items()))) def count_iterables(iterables, synchronize=False): @@ -352,10 +584,7 @@ def count_iterables(iterables, synchronize=False): Otherwise, the count is the product of the iterables value list sizes. """ - if synchronize: - op = max - else: - op = lambda x, y: x * y + op = max if synchronize else lambda x, y: x * y return reduce(op, [len(func()) for _, func in list(iterables.items())]) @@ -446,7 +675,7 @@ def get_levels(G): levels = {} for n in nx.topological_sort(G): levels[n] = 0 - for pred in G.predecessors_iter(n): + for pred in G.predecessors(n): levels[n] = max(levels[n], levels[pred] + 1) return levels @@ -491,9 +720,9 @@ def _merge_graphs(supergraph, nodes, subgraph, nodeid, iterables, raise Exception(("Execution graph does not have a unique set of node " "names. Please rerun the workflow")) edgeinfo = {} - for n in subgraph.nodes(): + for n in list(subgraph.nodes()): nidx = ids.index(n._hierarchy + n._id) - for edge in supergraph.in_edges_iter(supernodes[nidx]): + for edge in supergraph.in_edges(list(supernodes)[nidx]): # make sure edge is not part of subgraph if edge[0] not in subgraph.nodes(): if n._hierarchy + n._id not in list(edgeinfo.keys()): @@ -514,7 +743,7 @@ def _merge_graphs(supergraph, nodes, subgraph, nodeid, iterables, Gc = deepcopy(subgraph) ids = [n._hierarchy + n._id for n in Gc.nodes()] nodeidx = ids.index(nodeid) - rootnode = Gc.nodes()[nodeidx] + rootnode = list(Gc.nodes())[nodeidx] paramstr = '' for key, val in sorted(params.items()): paramstr = '{}_{}_{}'.format( @@ -524,14 +753,13 @@ def _merge_graphs(supergraph, nodes, subgraph, nodeid, iterables, logger.debug('Parameterization: paramstr=%s', paramstr) levels = get_levels(Gc) for n in Gc.nodes(): - """ - update parameterization of the node to reflect the location of - the output directory. For example, if the iterables along a - path of the directed graph consisted of the variables 'a' and - 'b', then every node in the path including and after the node - with iterable 'b' will be placed in a directory - _a_aval/_b_bval/. - """ + # update parameterization of the node to reflect the location of + # the output directory. For example, if the iterables along a + # path of the directed graph consisted of the variables 'a' and + # 'b', then every node in the path including and after the node + # with iterable 'b' will be placed in a directory + # _a_aval/_b_bval/. + path_length = levels[n] # enter as negative numbers so that earlier iterables with longer # path lengths get precedence in a sort @@ -583,7 +811,7 @@ def _identity_nodes(graph, include_iterables): to True. """ return [node for node in nx.topological_sort(graph) - if isinstance(node._interface, IdentityInterface) and + if isinstance(node.interface, IdentityInterface) and (include_iterables or getattr(node, 'iterables') is None)] @@ -598,7 +826,7 @@ def _remove_identity_node(graph, node): else: _propagate_root_output(graph, node, field, connections) graph.remove_nodes_from([node]) - logger.debug("Removed the identity node %s from the graph." % node) + logger.debug("Removed the identity node %s from the graph.", node) def _node_ports(graph, node): @@ -613,10 +841,10 @@ def _node_ports(graph, node): """ portinputs = {} portoutputs = {} - for u, _, d in graph.in_edges_iter(node, data=True): + for u, _, d in graph.in_edges(node, data=True): for src, dest in d['connect']: portinputs[dest] = (u, src) - for _, v, d in graph.out_edges_iter(node, data=True): + for _, v, d in graph.out_edges(node, data=True): for src, dest in d['connect']: if isinstance(src, tuple): srcport = src[0] @@ -682,22 +910,22 @@ def generate_expanded_graph(graph_in): logger.debug("PE: expanding iterables") graph_in = _remove_nonjoin_identity_nodes(graph_in, keep_iterables=True) # standardize the iterables as {(field, function)} dictionaries - for node in graph_in.nodes_iter(): + for node in graph_in.nodes(): if node.iterables: _standardize_iterables(node) allprefixes = list('abcdefghijklmnopqrstuvwxyz') # the iterable nodes inodes = _iterable_nodes(graph_in) - logger.debug("Detected iterable nodes %s" % inodes) + logger.debug("Detected iterable nodes %s", inodes) # while there is an iterable node, expand the iterable node's # subgraphs while inodes: inode = inodes[0] - logger.debug("Expanding the iterable node %s..." % inode) + logger.debug("Expanding the iterable node %s...", inode) # the join successor nodes of the current iterable node - jnodes = [node for node in graph_in.nodes_iter() + jnodes = [node for node in graph_in.nodes() if hasattr(node, 'joinsource') and inode.name == node.joinsource and nx.has_path(graph_in, inode, node)] @@ -709,14 +937,14 @@ def generate_expanded_graph(graph_in): for jnode in jnodes: in_edges = jedge_dict[jnode] = {} edges2remove = [] - for src, dest, data in graph_in.in_edges_iter(jnode, True): + for src, dest, data in graph_in.in_edges(jnode, True): in_edges[src.itername] = data edges2remove.append((src, dest)) for src, dest in edges2remove: graph_in.remove_edge(src, dest) - logger.debug("Excised the %s -> %s join node in-edge." - % (src, dest)) + logger.debug("Excised the %s -> %s join node in-edge.", + src, dest) if inode.itersource: # the itersource is a (node name, fields) tuple @@ -726,15 +954,15 @@ def generate_expanded_graph(graph_in): src_fields = [src_fields] # find the unique iterable source node in the graph try: - iter_src = next((node for node in graph_in.nodes_iter() + iter_src = next((node for node in graph_in.nodes() if node.name == src_name and nx.has_path(graph_in, node, inode))) except StopIteration: raise ValueError("The node %s itersource %s was not found" " among the iterable predecessor nodes" % (inode, src_name)) - logger.debug("The node %s has iterable source node %s" - % (inode, iter_src)) + logger.debug("The node %s has iterable source node %s", + inode, iter_src) # look up the iterables for this particular itersource descendant # using the iterable source ancestor values as a key iterables = {} @@ -760,7 +988,7 @@ def make_field_func(*pair): else: iterables = inode.iterables.copy() inode.iterables = None - logger.debug('node: %s iterables: %s' % (inode, iterables)) + logger.debug('node: %s iterables: %s', inode, iterables) # collect the subnodes to expand subnodes = [s for s in dfs_preorder(graph_in, inode)] @@ -768,7 +996,7 @@ def make_field_func(*pair): for s in subnodes: prior_prefix.extend(re.findall('\.(.)I', s._id)) prior_prefix = sorted(prior_prefix) - if not len(prior_prefix): + if not prior_prefix: iterable_prefix = 'a' else: if prior_prefix[-1] == 'z': @@ -781,7 +1009,11 @@ def make_field_func(*pair): inode._id += ('.' + iterable_prefix + 'I') # merge the iterated subgraphs - subgraph = graph_in.subgraph(subnodes) + # dj: the behaviour of .copy changes in version 2 + if LooseVersion(nx.__version__) < LooseVersion('2'): + subgraph = graph_in.subgraph(subnodes) + else: + subgraph = graph_in.subgraph(subnodes).copy() graph_in = _merge_graphs(graph_in, subnodes, subgraph, inode._hierarchy + inode._id, iterables, iterable_prefix, inode.synchronize) @@ -793,13 +1025,13 @@ def make_field_func(*pair): old_edge_dict = jedge_dict[jnode] # the edge source node replicates expansions = defaultdict(list) - for node in graph_in.nodes_iter(): - for src_id, edge_data in list(old_edge_dict.items()): + for node in graph_in.nodes(): + for src_id in list(old_edge_dict.keys()): if node.itername.startswith(src_id): expansions[src_id].append(node) for in_id, in_nodes in list(expansions.items()): logger.debug("The join node %s input %s was expanded" - " to %d nodes." % (jnode, in_id, len(in_nodes))) + " to %d nodes.", jnode, in_id, len(in_nodes)) # preserve the node iteration order by sorting on the node id for in_nodes in list(expansions.values()): in_nodes.sort(key=lambda node: node._id) @@ -839,12 +1071,12 @@ def make_field_func(*pair): if dest_field in slots: slot_field = slots[dest_field] connects[con_idx] = (src_field, slot_field) - logger.debug("Qualified the %s -> %s join field" - " %s as %s." % - (in_node, jnode, dest_field, slot_field)) - graph_in.add_edge(in_node, jnode, newdata) + logger.debug( + "Qualified the %s -> %s join field %s as %s.", + in_node, jnode, dest_field, slot_field) + graph_in.add_edge(in_node, jnode, **newdata) logger.debug("Connected the join node %s subgraph to the" - " expanded join point %s" % (jnode, in_node)) + " expanded join point %s", jnode, in_node) # nx.write_dot(graph_in, '%s_post.dot' % node) # the remaining iterable nodes @@ -900,7 +1132,7 @@ def _standardize_iterables(node): fields = set(node.inputs.copyable_trait_names()) # Flag indicating whether the iterables are in the alternate # synchronize form and are not converted to a standard format. - synchronize = False + # synchronize = False # OE: commented out since it is not used # A synchronize iterables node without an itersource can be in # [fields, value tuples] format rather than # [(field, value list), (field, value list), ...] @@ -975,9 +1207,9 @@ def _transpose_iterables(fields, values): if val is not None: transposed[fields[idx]][key].append(val) return list(transposed.items()) - else: - return list(zip(fields, [[v for v in list(transpose) if v is not None] - for transpose in zip(*values)])) + + return list(zip(fields, [[v for v in list(transpose) if v is not None] + for transpose in zip(*values)])) def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False, @@ -1011,8 +1243,8 @@ def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False, logger.debug('using input graph') if base_dir is None: base_dir = os.getcwd() - if not os.path.exists(base_dir): - os.makedirs(base_dir) + + makedirs(base_dir, exist_ok=True) outfname = fname_presuffix(dotfilename, suffix='_detailed.dot', use_ext=False, @@ -1020,9 +1252,10 @@ def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False, _write_detailed_dot(graph, outfname) if format != 'dot': cmd = 'dot -T%s -O %s' % (format, outfname) - res = CommandLine(cmd, terminal_output='allatonce').run() + res = CommandLine(cmd, terminal_output='allatonce', + resource_monitor=False).run() if res.runtime.returncode: - logger.warn('dot2png: %s', res.runtime.stderr) + logger.warning('dot2png: %s', res.runtime.stderr) pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form) simplefname = fname_presuffix(dotfilename, suffix='.dot', @@ -1031,9 +1264,10 @@ def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False, nx.drawing.nx_pydot.write_dot(pklgraph, simplefname) if format != 'dot': cmd = 'dot -T%s -O %s' % (format, simplefname) - res = CommandLine(cmd, terminal_output='allatonce').run() + res = CommandLine(cmd, terminal_output='allatonce', + resource_monitor=False).run() if res.runtime.returncode: - logger.warn('dot2png: %s', res.runtime.stderr) + logger.warning('dot2png: %s', res.runtime.stderr) if show: pos = nx.graphviz_layout(pklgraph, prog='dot') nx.draw(pklgraph, pos) @@ -1051,7 +1285,7 @@ def format_dot(dotfilename, format='png'): if format != 'dot': cmd = 'dot -T%s -O \'%s\'' % (format, dotfilename) try: - CommandLine(cmd).run() + CommandLine(cmd, resource_monitor=False).run() except IOError as ioe: if "could not be found" in str(ioe): raise IOError("Cannot draw directed graph; executable 'dot' is unavailable") @@ -1061,26 +1295,6 @@ def format_dot(dotfilename, format='png'): return dotfilename -def make_output_dir(outdir): - """Make the output_dir if it doesn't exist. - - Parameters - ---------- - outdir : output directory to create - - """ - # this odd approach deals with concurrent directory cureation - try: - if not os.path.exists(os.path.abspath(outdir)): - logger.debug("Creating %s", outdir) - os.makedirs(outdir) - except OSError: - logger.debug("Problem creating %s", outdir) - if not os.path.exists(outdir): - raise OSError('Could not create %s', outdir) - return outdir - - def get_all_files(infile): files = [infile] if infile.endswith(".img"): @@ -1096,7 +1310,7 @@ def walk_outputs(object): """ out = [] if isinstance(object, dict): - for key, val in sorted(object.items()): + for _, val in sorted(object.items()): if isdefined(val): out.extend(walk_outputs(val)) elif isinstance(object, (list, tuple)): @@ -1153,13 +1367,13 @@ def clean_working_directory(outputs, cwd, inputs, needed_outputs, config, for filename in needed_files: temp.extend(get_related_files(filename)) needed_files = temp - logger.debug('Needed files: %s' % (';'.join(needed_files))) - logger.debug('Needed dirs: %s' % (';'.join(needed_dirs))) + logger.debug('Needed files: %s', ';'.join(needed_files)) + logger.debug('Needed dirs: %s', ';'.join(needed_dirs)) files2remove = [] if str2bool(config['execution']['remove_unnecessary_outputs']): for f in walk_files(cwd): if f not in needed_files: - if len(needed_dirs) == 0: + if not needed_dirs: files2remove.append(f) elif not any([f.startswith(dname) for dname in needed_dirs]): files2remove.append(f) @@ -1172,7 +1386,7 @@ def clean_working_directory(outputs, cwd, inputs, needed_outputs, config, for f in walk_files(cwd): if f in input_files and f not in needed_files: files2remove.append(f) - logger.debug('Removing files: %s' % (';'.join(files2remove))) + logger.debug('Removing files: %s', ';'.join(files2remove)) for f in files2remove: os.remove(f) for key in outputs.copyable_trait_names(): @@ -1236,9 +1450,9 @@ def write_workflow_prov(graph, filename=None, format='all'): processes = [] nodes = graph.nodes() - for idx, node in enumerate(nodes): + for node in nodes: result = node.result - classname = node._interface.__class__.__name__ + classname = node.interface.__class__.__name__ _, hashval, _, _ = node.hash_exists() attrs = {pm.PROV["type"]: nipype_ns[classname], pm.PROV["label"]: '_'.join((classname, node.name)), @@ -1254,7 +1468,7 @@ def write_workflow_prov(graph, filename=None, format='all'): if idx < len(result.inputs): subresult.inputs = result.inputs[idx] if result.outputs: - for key, value in list(result.outputs.items()): + for key, _ in list(result.outputs.items()): values = getattr(result.outputs, key) if isdefined(values) and idx < len(values): subresult.outputs[key] = values[idx] @@ -1264,7 +1478,7 @@ def write_workflow_prov(graph, filename=None, format='all'): ps.g.add_bundle(sub_bundle) bundle_entity = ps.g.entity(sub_bundle.identifier, other_attributes={'prov:type': - pm.PROV_BUNDLE}) + pm.PROV_BUNDLE}) ps.g.wasGeneratedBy(bundle_entity, process) else: process.add_attributes({pm.PROV["type"]: nipype_ns["Node"]}) @@ -1277,25 +1491,103 @@ def write_workflow_prov(graph, filename=None, format='all'): ps.g.add_bundle(result_bundle) bundle_entity = ps.g.entity(result_bundle.identifier, other_attributes={'prov:type': - pm.PROV_BUNDLE}) + pm.PROV_BUNDLE}) ps.g.wasGeneratedBy(bundle_entity, process) processes.append(process) # add dependencies (edges) # Process->Process - for idx, edgeinfo in enumerate(graph.in_edges_iter()): - ps.g.wasStartedBy(processes[nodes.index(edgeinfo[1])], - starter=processes[nodes.index(edgeinfo[0])]) + for idx, edgeinfo in enumerate(graph.in_edges()): + ps.g.wasStartedBy(processes[list(nodes).index(edgeinfo[1])], + starter=processes[list(nodes).index(edgeinfo[0])]) # write provenance ps.write_provenance(filename, format=format) return ps.g +def write_workflow_resources(graph, filename=None, append=None): + """ + Generate a JSON file with profiling traces that can be loaded + in a pandas DataFrame or processed with JavaScript like D3.js + """ + import simplejson as json + + # Overwrite filename if nipype config is set + filename = config.get('monitoring', 'summary_file', filename) + + # If filename still does not make sense, store in $PWD + if not filename: + filename = os.path.join(os.getcwd(), 'resource_monitor.json') + + if append is None: + append = str2bool(config.get( + 'monitoring', 'summary_append', 'true')) + + big_dict = { + 'time': [], + 'name': [], + 'interface': [], + 'rss_GiB': [], + 'vms_GiB': [], + 'cpus': [], + 'mapnode': [], + 'params': [], + } + + # If file exists, just append new profile information + # If we append different runs, then we will see different + # "bursts" of timestamps corresponding to those executions. + if append and os.path.isfile(filename): + with open(filename, 'r' if PY3 else 'rb') as rsf: + big_dict = json.load(rsf) + + for _, node in enumerate(graph.nodes()): + nodename = node.fullname + classname = node.interface.__class__.__name__ + + params = '' + if node.parameterization: + params = '_'.join(['{}'.format(p) + for p in node.parameterization]) + + try: + rt_list = node.result.runtime + except Exception: + logger.warning('Could not access runtime info for node %s' + ' (%s interface)', nodename, classname) + continue + + if not isinstance(rt_list, list): + rt_list = [rt_list] + + for subidx, runtime in enumerate(rt_list): + try: + nsamples = len(runtime.prof_dict['time']) + except AttributeError: + logger.warning( + 'Could not retrieve profiling information for node "%s" ' + '(mapflow %d/%d).', nodename, subidx + 1, len(rt_list)) + continue + + for key in ['time', 'cpus', 'rss_GiB', 'vms_GiB']: + big_dict[key] += runtime.prof_dict[key] + + big_dict['interface'] += [classname] * nsamples + big_dict['name'] += [nodename] * nsamples + big_dict['mapnode'] += [subidx] * nsamples + big_dict['params'] += [params] * nsamples + + with open(filename, 'w' if PY3 else 'wb') as rsf: + json.dump(big_dict, rsf, ensure_ascii=False) + + return filename + + def topological_sort(graph, depth_first=False): """Returns a depth first sorted order if depth_first is True """ - nodesort = nx.topological_sort(graph) + nodesort = list(nx.topological_sort(graph)) if not depth_first: return nodesort, None logger.debug("Performing depth first search") diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py index 35bd575edd..e00f105c5e 100644 --- a/nipype/pipeline/engine/workflows.py +++ b/nipype/pipeline/engine/workflows.py @@ -15,52 +15,42 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import range, object, str, bytes, open - -# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict -from future import standard_library -standard_library.install_aliases() +from builtins import str, bytes, open +import os +import os.path as op +import sys from datetime import datetime - from copy import deepcopy import pickle -import os -import os.path as op import shutil -import sys -from warnings import warn import numpy as np import networkx as nx - from ... import config, logging -from ...utils.misc import (unflatten, package_check, str2bool, - getsource, create_function_from_source) -from ...interfaces.base import (traits, InputMultiPath, CommandLine, - Undefined, TraitedSpec, DynamicTraitedSpec, - Bunch, InterfaceResult, md5, Interface, - TraitDictObject, TraitListObject, isdefined) - -from ...utils.filemanip import (save_json, FileNotFoundError, - filename_to_list, list_to_filename, - copyfiles, fnames_presuffix, loadpkl, - split_filename, load_json, savepkl, - write_rst_header, write_rst_dict, - write_rst_list, to_str) -from .utils import (generate_expanded_graph, modify_paths, - export_graph, make_output_dir, write_workflow_prov, - clean_working_directory, format_dot, topological_sort, - get_print_name, merge_dict, evaluate_connect_function, - _write_inputs, format_node) +from ...utils.misc import str2bool +from ...utils.functions import (getsource, create_function_from_source) + +from ...interfaces.base import ( + traits, TraitedSpec, TraitDictObject, TraitListObject) +from ...utils.filemanip import save_json, makedirs, to_str +from .utils import ( + generate_expanded_graph, export_graph, write_workflow_prov, + write_workflow_resources, format_dot, topological_sort, + get_print_name, merge_dict, format_node +) from .base import EngineBase -from .nodes import Node, MapNode +from .nodes import MapNode + +# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict +from future import standard_library +standard_library.install_aliases() -package_check('networkx', '1.3') logger = logging.getLogger('workflow') + class Workflow(EngineBase): """Controls the setup and execution of a pipeline of processes.""" @@ -185,7 +175,7 @@ def connect(self, *args, **kwargs): # check to see which ports of destnode are already # connected. if not disconnect and (destnode in self._graph.nodes()): - for edge in self._graph.in_edges_iter(destnode): + for edge in self._graph.in_edges(destnode): data = self._graph.get_edge_data(*edge) for sourceinfo, destname in data['connect']: if destname not in connected_ports[destnode]: @@ -195,21 +185,21 @@ def connect(self, *args, **kwargs): # determine their inputs/outputs depending on # connection settings. Skip these modules in the check if dest in connected_ports[destnode]: - raise Exception(""" + raise Exception("""\ Trying to connect %s:%s to %s:%s but input '%s' of node '%s' is already connected. """ % (srcnode, source, destnode, dest, dest, destnode)) if not (hasattr(destnode, '_interface') and - ('.io' in str(destnode._interface.__class__) or - any(['.io' in str(val) for val in - destnode._interface.__class__.__bases__])) + ('.io' in str(destnode._interface.__class__) or + any(['.io' in str(val) for val in + destnode._interface.__class__.__bases__])) ): if not destnode._check_inputs(dest): not_found.append(['in', destnode.name, dest]) if not (hasattr(srcnode, '_interface') and - ('.io' in str(srcnode._interface.__class__) - or any(['.io' in str(val) for val in - srcnode._interface.__class__.__bases__]))): + ('.io' in str(srcnode._interface.__class__) or + any(['.io' in str(val) + for val in srcnode._interface.__class__.__bases__]))): if isinstance(source, tuple): # handles the case that source is specified # with a function @@ -296,7 +286,7 @@ def disconnect(self, *args): remove = [] for edge in conn: if edge in ed_conns: - idx = ed_conns.index(edge) + # idx = ed_conns.index(edge) remove.append((edge[0], edge[1])) logger.debug('disconnect(): remove list %s', to_str(remove)) @@ -423,9 +413,9 @@ def write_graph(self, dotfilename='graph.dot', graph2use='hierarchical', base_dir = op.join(base_dir, self.name) else: base_dir = os.getcwd() - base_dir = make_output_dir(base_dir) + base_dir = makedirs(base_dir, exist_ok=True) if graph2use in ['hierarchical', 'colored']: - if self.name[:1].isdigit(): # these graphs break if int + if self.name[:1].isdigit(): # these graphs break if int raise ValueError('{} graph failed, workflow name cannot begin ' 'with a number'.format(graph2use)) dotfilename = op.join(base_dir, dotfilename) @@ -506,8 +496,8 @@ def export(self, filename=None, prefix="output", format="python", else: lines.append(line) # write connections - for u, _, d in flatgraph.in_edges_iter(nbunch=node, - data=True): + for u, _, d in flatgraph.in_edges(nbunch=node, + data=True): for cd in d['connect']: if isinstance(cd[0], tuple): args = list(cd[0]) @@ -557,7 +547,7 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): if not isinstance(plugin, (str, bytes)): runner = plugin else: - name = 'nipype.pipeline.plugins' + name = '.'.join(__name__.split('.')[:-2] + ['plugins']) try: __import__(name) except ImportError: @@ -569,12 +559,6 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): runner = plugin_mod(plugin_args=plugin_args) flatgraph = self._create_flat_graph() self.config = merge_dict(deepcopy(config._sections), self.config) - if 'crashdump_dir' in self.config: - warn(("Deprecated: workflow.config['crashdump_dir']\n" - "Please use config['execution']['crashdump_dir']")) - crash_dir = self.config['crashdump_dir'] - self.config['execution']['crashdump_dir'] = crash_dir - del self.config['crashdump_dir'] logger.info('Workflow %s settings: %s', self.name, to_str(sorted(self.config))) self._set_needed_outputs(flatgraph) execgraph = generate_expanded_graph(deepcopy(flatgraph)) @@ -594,6 +578,13 @@ def run(self, plugin=None, plugin_args=None, updatehash=False): 'workflow_provenance_%s' % datestr) logger.info('Provenance file prefix: %s' % prov_base) write_workflow_prov(execgraph, prov_base, format='all') + + if config.resource_monitor: + base_dir = self.base_dir or os.getcwd() + write_workflow_resources( + execgraph, + filename=op.join(base_dir, self.name, 'resource_monitor.json') + ) return execgraph # PRIVATE API AND FUNCTIONS @@ -602,8 +593,7 @@ def _write_report_info(self, workingdir, name, graph): if workingdir is None: workingdir = os.getcwd() report_dir = op.join(workingdir, name) - if not op.exists(report_dir): - os.makedirs(report_dir) + makedirs(report_dir, exist_ok=True) shutil.copyfile(op.join(op.dirname(__file__), 'report_template.html'), op.join(report_dir, 'index.html')) @@ -633,7 +623,7 @@ def _write_report_info(self, workingdir, name, graph): total=N, name='Group_%05d' % gid)) json_dict['maxN'] = maxN - for u, v in graph.in_edges_iter(): + for u, v in graph.in_edges(): json_dict['links'].append(dict(source=nodes.index(u), target=nodes.index(v), value=1)) @@ -642,7 +632,7 @@ def _write_report_info(self, workingdir, name, graph): # Avoid RuntimeWarning: divide by zero encountered in log10 num_nodes = len(nodes) if num_nodes > 0: - index_name = np.ceil(np.log10(num_nodes)).astype(int) + index_name = np.ceil(np.log10(num_nodes)).astype(int) else: index_name = 0 template = '%%0%dd_' % index_name @@ -654,7 +644,7 @@ def getname(u, i): json_dict = [] for i, node in enumerate(nodes): imports = [] - for u, v in graph.in_edges_iter(nbunch=node): + for u, v in graph.in_edges(nbunch=node): imports.append(getname(u, nodes.index(u))) json_dict.append(dict(name=getname(node, i), size=1, @@ -669,7 +659,7 @@ def _set_needed_outputs(self, graph): return for node in graph.nodes(): node.needed_outputs = [] - for edge in graph.out_edges_iter(node): + for edge in graph.out_edges(node): data = graph.get_edge_data(*edge) sourceinfo = [v1[0] if isinstance(v1, tuple) else v1 for v1, v2 in data['connect']] @@ -683,7 +673,7 @@ def _configure_exec_nodes(self, graph): """ for node in graph.nodes(): node.input_source = {} - for edge in graph.in_edges_iter(node): + for edge in graph.in_edges(node): data = graph.get_edge_data(*edge) for sourceinfo, field in data['connect']: node.input_source[field] = \ @@ -700,8 +690,13 @@ def _check_nodes(self, nodes): for node in nodes: if node.name in node_names: idx = node_names.index(node.name) - if node_lineage[idx] in [node._hierarchy, self.name]: - raise IOError('Duplicate node name %s found.' % node.name) + try: + this_node_lineage = node_lineage[idx] + except IndexError: + raise IOError('Duplicate node name "%s" found.' % node.name) + else: + if this_node_lineage in [node._hierarchy, self.name]: + raise IOError('Duplicate node name "%s" found.' % node.name) else: node_names.append(node.name) @@ -753,8 +748,8 @@ def _get_inputs(self): setattr(inputdict, node.name, node.inputs) else: taken_inputs = [] - for _, _, d in self._graph.in_edges_iter(nbunch=node, - data=True): + for _, _, d in self._graph.in_edges(nbunch=node, + data=True): for cd in d['connect']: taken_inputs.append(cd[1]) unconnectedinputs = TraitedSpec() @@ -785,10 +780,10 @@ def _get_outputs(self): setattr(outputdict, node.name, outputs) return outputdict - def _set_input(self, object, name, newvalue): + def _set_input(self, objekt, name, newvalue): """Trait callback function to update a node input """ - object.traits()[name].node.set_input(name, newvalue) + objekt.traits()[name].node.set_input(name, newvalue) def _set_node_input(self, node, param, source, sourceinfo): """Set inputs of a node given the edge connection""" @@ -859,7 +854,8 @@ def _generate_flatgraph(self): # use in_edges instead of in_edges_iter to allow # disconnections to take place properly. otherwise, the # edge dict is modified. - for u, _, d in self._graph.in_edges(nbunch=node, data=True): + # dj: added list() for networkx ver.2 + for u, _, d in list(self._graph.in_edges(nbunch=node, data=True)): logger.debug('in: connections-> %s', to_str(d['connect'])) for cd in deepcopy(d['connect']): logger.debug("in: %s", to_str(cd)) @@ -872,7 +868,8 @@ def _generate_flatgraph(self): self.disconnect(u, cd[0], node, cd[1]) self.connect(srcnode, srcout, dstnode, dstin) # do not use out_edges_iter for reasons stated in in_edges - for _, v, d in self._graph.out_edges(nbunch=node, data=True): + # dj: for ver 2 use list(out_edges) + for _, v, d in list(self._graph.out_edges(nbunch=node, data=True)): logger.debug('out: connections-> %s', to_str(d['connect'])) for cd in deepcopy(d['connect']): logger.debug("out: %s", to_str(cd)) @@ -914,13 +911,13 @@ def _get_dot(self, prefix=None, hierarchy=None, colored=False, prefix = ' ' if hierarchy is None: hierarchy = [] - colorset = ['#FFFFC8', # Y - '#0000FF', '#B4B4FF', '#E6E6FF', # B - '#FF0000', '#FFB4B4', '#FFE6E6', # R - '#00A300', '#B4FFB4', '#E6FFE6', # G - '#0000FF', '#B4B4FF'] # loop B + colorset = ['#FFFFC8', # Y + '#0000FF', '#B4B4FF', '#E6E6FF', # B + '#FF0000', '#FFB4B4', '#FFE6E6', # R + '#00A300', '#B4FFB4', '#E6FFE6', # G + '#0000FF', '#B4B4FF'] # loop B if level > len(colorset) - 2: - level = 3 # Loop back to blue + level = 3 # Loop back to blue dotlist = ['%slabel="%s";' % (prefix, self.name)] for node in nx.topological_sort(self._graph): @@ -960,7 +957,7 @@ def _get_dot(self, prefix=None, hierarchy=None, colored=False, simple_form=simple_form, level=level + 3)) dotlist.append('}') else: - for subnode in self._graph.successors_iter(node): + for subnode in self._graph.successors(node): if node._hierarchy != subnode._hierarchy: continue if not isinstance(subnode, Workflow): @@ -975,7 +972,7 @@ def _get_dot(self, prefix=None, hierarchy=None, colored=False, subnodename)) logger.debug('connection: %s', dotlist[-1]) # add between workflow connections - for u, v, d in self._graph.edges_iter(data=True): + for u, v, d in self._graph.edges(data=True): uname = '.'.join(hierarchy + [u.fullname]) vname = '.'.join(hierarchy + [v.fullname]) for src, dest in d['connect']: diff --git a/nipype/pipeline/plugins/API.rst b/nipype/pipeline/plugins/API.rst deleted file mode 100644 index 57ef2632bc..0000000000 --- a/nipype/pipeline/plugins/API.rst +++ /dev/null @@ -1,8 +0,0 @@ -Execution plugin API -==================== - -Current status: - -class plugin_runner(PluginBase): - - def run(graph, config, updatehash) diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py index cb2c193004..34d3abdebc 100644 --- a/nipype/pipeline/plugins/__init__.py +++ b/nipype/pipeline/plugins/__init__.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, division, unicode_literals, absolute_import from .debug import DebugPlugin from .linear import LinearPlugin @@ -19,5 +20,4 @@ from .slurm import SLURMPlugin from .slurmgraph import SLURMGraphPlugin -from .callback_log import log_nodes_cb from . import semaphore_singleton diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py index 7334e00c52..ec8c68a148 100644 --- a/nipype/pipeline/plugins/base.py +++ b/nipype/pipeline/plugins/base.py @@ -9,211 +9,88 @@ from copy import deepcopy from glob import glob import os -import getpass import shutil -from socket import gethostname import sys -import uuid -from time import strftime, sleep, time -from traceback import format_exception, format_exc -from warnings import warn +from time import sleep, time +from traceback import format_exc import numpy as np import scipy.sparse as ssp - from ... import logging -from ...utils.filemanip import savepkl, loadpkl, crash2txt +from ...utils.filemanip import loadpkl from ...utils.misc import str2bool from ..engine.utils import (nx, dfs_preorder, topological_sort) from ..engine import MapNode - +from .tools import report_crash, report_nodes_not_run, create_pyscript logger = logging.getLogger('workflow') -iflogger = logging.getLogger('interface') -def report_crash(node, traceback=None, hostname=None): - """Writes crash related information to a file +class PluginBase(object): """ - name = node._id - if node.result and hasattr(node.result, 'runtime') and \ - node.result.runtime: - if isinstance(node.result.runtime, list): - host = node.result.runtime[0].hostname - else: - host = node.result.runtime.hostname - else: - if hostname: - host = hostname - else: - host = gethostname() - message = ['Node %s failed to run on host %s.' % (name, - host)] - logger.error(message) - if not traceback: - exc_type, exc_value, exc_traceback = sys.exc_info() - traceback = format_exception(exc_type, - exc_value, - exc_traceback) - timeofcrash = strftime('%Y%m%d-%H%M%S') - login_name = getpass.getuser() - crashfile = 'crash-%s-%s-%s-%s' % (timeofcrash, - login_name, - name, - str(uuid.uuid4())) - crashdir = node.config['execution']['crashdump_dir'] - if crashdir is None: - crashdir = os.getcwd() - if not os.path.exists(crashdir): - os.makedirs(crashdir) - crashfile = os.path.join(crashdir, crashfile) - if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: - crashfile += '.txt' - else: - crashfile += '.pklz' - logger.info('Saving crash info to %s' % crashfile) - logger.info(''.join(traceback)) - if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: - crash2txt(crashfile, dict(node=node, traceback=traceback)) - else: - savepkl(crashfile, dict(node=node, traceback=traceback)) - return crashfile - - -def report_nodes_not_run(notrun): - """List nodes that crashed with crashfile info - - Optionally displays dependent nodes that weren't executed as a result of - the crash. + Base class for plugins + """ - if notrun: - logger.info("***********************************") - for info in notrun: - logger.error("could not run node: %s" % - '.'.join((info['node']._hierarchy, - info['node']._id))) - logger.info("crashfile: %s" % info['crashfile']) - logger.debug("The following dependent nodes were not run") - for subnode in info['dependents']: - logger.debug(subnode._id) - logger.info("***********************************") - raise RuntimeError(('Workflow did not execute cleanly. ' - 'Check log for details')) - - -def create_pyscript(node, updatehash=False, store_exception=True): - # pickle node - timestamp = strftime('%Y%m%d_%H%M%S') - if node._hierarchy: - suffix = '%s_%s_%s' % (timestamp, node._hierarchy, node._id) - batch_dir = os.path.join(node.base_dir, - node._hierarchy.split('.')[0], - 'batch') - else: - suffix = '%s_%s' % (timestamp, node._id) - batch_dir = os.path.join(node.base_dir, 'batch') - if not os.path.exists(batch_dir): - os.makedirs(batch_dir) - pkl_file = os.path.join(batch_dir, 'node_%s.pklz' % suffix) - savepkl(pkl_file, dict(node=node, updatehash=updatehash)) - mpl_backend = node.config["execution"]["matplotlib_backend"] - # create python script to load and trap exception - cmdstr = """import os -import sys -can_import_matplotlib = True #Silently allow matplotlib to be ignored -try: - import matplotlib - matplotlib.use('%s') -except ImportError: - can_import_matplotlib = False - pass - -from nipype import config, logging -from nipype.utils.filemanip import loadpkl, savepkl -from socket import gethostname -from traceback import format_exception -info = None -pklfile = '%s' -batchdir = '%s' -from nipype.utils.filemanip import loadpkl, savepkl -try: - if not sys.version_info < (2, 7): - from collections import OrderedDict - config_dict=%s - config.update_config(config_dict) - ## Only configure matplotlib if it was successfully imported, matplotlib is an optional component to nipype - if can_import_matplotlib: - config.update_matplotlib() - logging.update_logging(config) - traceback=None - cwd = os.getcwd() - info = loadpkl(pklfile) - result = info['node'].run(updatehash=info['updatehash']) -except Exception as e: - etype, eval, etr = sys.exc_info() - traceback = format_exception(etype,eval,etr) - if info is None or not os.path.exists(info['node'].output_dir()): - result = None - resultsfile = os.path.join(batchdir, 'crashdump_%s.pklz') - else: - result = info['node'].result - resultsfile = os.path.join(info['node'].output_dir(), - 'result_%%s.pklz'%%info['node'].name) -""" - if store_exception: - cmdstr += """ - savepkl(resultsfile, dict(result=result, hostname=gethostname(), - traceback=traceback)) -""" - else: - cmdstr += """ - if info is None: - savepkl(resultsfile, dict(result=result, hostname=gethostname(), - traceback=traceback)) - else: - from nipype.pipeline.plugins.base import report_crash - report_crash(info['node'], traceback, gethostname()) - raise Exception(e) -""" - cmdstr = cmdstr % (mpl_backend, pkl_file, batch_dir, node.config, suffix) - pyscript = os.path.join(batch_dir, 'pyscript_%s.py' % suffix) - with open(pyscript, 'wt') as fp: - fp.writelines(cmdstr) - return pyscript + def __init__(self, plugin_args=None): + if plugin_args is None: + plugin_args = {} + self.plugin_args = plugin_args + self._config = None + self._status_callback = plugin_args.get('status_callback') + def run(self, graph, config, updatehash=False): + """ + The core plugin member that should be implemented by + all plugins. -class PluginBase(object): - """Base class for plugins""" + graph: a networkx, flattened :abbr:`DAG (Directed Acyclic Graph)` + to be executed - def __init__(self, plugin_args=None): - if plugin_args and 'status_callback' in plugin_args: - self._status_callback = plugin_args['status_callback'] - else: - self._status_callback = None - return + config: a nipype.config object - def run(self, graph, config, updatehash=False): + updatehash: + + """ raise NotImplementedError class DistributedPluginBase(PluginBase): - """Execute workflow with a distribution engine + """ + Execute workflow with a distribution engine + + Relevant class attributes + ------------------------- + + procs: list (N) of underlying interface elements to be processed + proc_done: a boolean numpy array (N,) signifying whether a process has been + submitted for execution + proc_pending: a boolean numpy array (N,) signifying whether a + process is currently running. + depidx: a boolean matrix (NxN) storing the dependency structure accross + processes. Process dependencies are derived from each column. + + Combinations of ``proc_done`` and ``proc_pending`` + -------------------------------------------------- + + +------------+---------------+--------------------------------+ + | proc_done | proc_pending | outcome | + +============+===============+================================+ + | True | False | Process is finished | + +------------+---------------+--------------------------------+ + | True | True | Process is currently being run | + +------------+---------------+--------------------------------+ + | False | False | Process is queued | + +------------+---------------+--------------------------------+ + | False | True | INVALID COMBINATION | + +------------+---------------+--------------------------------+ """ def __init__(self, plugin_args=None): - """Initialize runtime attributes to none - - procs: list (N) of underlying interface elements to be processed - proc_done: a boolean vector (N) signifying whether a process has been - executed - proc_pending: a boolean vector (N) signifying whether a - process is currently running. Note: A process is finished only when - both proc_done==True and - proc_pending==False - depidx: a boolean matrix (NxN) storing the dependency structure accross - processes. Process dependencies are derived from each column. + """ + Initialize runtime attributes to none + """ super(DistributedPluginBase, self).__init__(plugin_args=plugin_args) self.procs = None @@ -223,32 +100,63 @@ def __init__(self, plugin_args=None): self.mapnodesubids = None self.proc_done = None self.proc_pending = None - self.max_jobs = np.inf - if plugin_args and 'max_jobs' in plugin_args: - self.max_jobs = plugin_args['max_jobs'] + self.pending_tasks = [] + self.max_jobs = self.plugin_args.get('max_jobs', np.inf) + + def _prerun_check(self, graph): + """Stub method to validate/massage graph and nodes before running""" + + def _postrun_check(self): + """Stub method to close any open resources""" def run(self, graph, config, updatehash=False): - """Executes a pre-defined pipeline using distributed approaches + """ + Executes a pre-defined pipeline using distributed approaches """ logger.info("Running in parallel.") self._config = config + poll_sleep_secs = float(config['execution']['poll_sleep_duration']) + + self._prerun_check(graph) # Generate appropriate structures for worker-manager model self._generate_dependency_list(graph) - self.pending_tasks = [] - self.readytorun = [] self.mapnodes = [] self.mapnodesubids = {} # setup polling - TODO: change to threaded model notrun = [] - while np.any(self.proc_done == False) | \ - np.any(self.proc_pending == True): + old_progress_stats = None + old_presub_stats = None + while not np.all(self.proc_done) or np.any(self.proc_pending): + loop_start = time() + # Check if a job is available (jobs with all dependencies run) + # https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + jobs_ready = np.nonzero(~self.proc_done & + (self.depidx.sum(0) == 0))[1] + + progress_stats = (len(self.proc_done), + np.sum(self.proc_done ^ self.proc_pending), + np.sum(self.proc_done & self.proc_pending), + len(jobs_ready), + len(self.pending_tasks), + np.sum(~self.proc_done & ~self.proc_pending)) + display_stats = progress_stats != old_progress_stats + if display_stats: + logger.debug('Progress: %d jobs, %d/%d/%d ' + '(done/running/ready), %d/%d ' + '(pending_tasks/waiting).', *progress_stats) + old_progress_stats = progress_stats toappend = [] # trigger callbacks for any pending results while self.pending_tasks: taskid, jobid = self.pending_tasks.pop() try: result = self._get_result(taskid) + except Exception: + notrun.append(self._clean_queue( + jobid, graph, result={'result': None, + 'traceback': format_exc()})) + else: if result: if result['traceback']: notrun.append(self._clean_queue(jobid, graph, @@ -258,36 +166,34 @@ def run(self, graph, config, updatehash=False): self._remove_node_dirs() self._clear_task(taskid) else: + assert self.proc_done[jobid] and \ + self.proc_pending[jobid] toappend.insert(0, (taskid, jobid)) - except Exception: - result = {'result': None, - 'traceback': format_exc()} - notrun.append(self._clean_queue(jobid, graph, - result=result)) + if toappend: self.pending_tasks.extend(toappend) + num_jobs = len(self.pending_tasks) - logger.debug('Number of pending tasks: %d' % num_jobs) + presub_stats = (num_jobs, + np.sum(self.proc_done & self.proc_pending)) + display_stats = display_stats or presub_stats != old_presub_stats + if display_stats: + logger.debug('Tasks currently running: %d. Pending: %d.', + *presub_stats) + old_presub_stats = presub_stats if num_jobs < self.max_jobs: - self._send_procs_to_workers(updatehash=updatehash, - graph=graph) - else: - logger.debug('Not submitting') - self._wait() + self._send_procs_to_workers(updatehash=updatehash, graph=graph) + elif display_stats: + logger.debug('Not submitting (max jobs reached)') + + sleep_til = loop_start + poll_sleep_secs + sleep(max(0, sleep_til - time())) self._remove_node_dirs() report_nodes_not_run(notrun) # close any open resources - self._close() - - def _wait(self): - sleep(float(self._config['execution']['poll_sleep_duration'])) - - def _close(self): - # close any open resources, this could raise NotImplementedError - # but I didn't want to break other plugins - return True + self._postrun_check() def _get_result(self, taskid): raise NotImplementedError @@ -296,18 +202,26 @@ def _submit_job(self, node, updatehash=False): raise NotImplementedError def _report_crash(self, node, result=None): - raise NotImplementedError + tb = None + if result is not None: + node._result = result['result'] + tb = result['traceback'] + node._traceback = tb + return report_crash(node, traceback=tb) def _clear_task(self, taskid): raise NotImplementedError def _clean_queue(self, jobid, graph, result=None): + logger.debug('Clearing %d from queue', jobid) + + if self._status_callback: + self._status_callback(self.procs[jobid], 'exception') + if str2bool(self._config['execution']['stop_on_first_crash']): raise RuntimeError("".join(result['traceback'])) crashfile = self._report_crash(self.procs[jobid], result=result) - if self._status_callback: - self._status_callback(self.procs[jobid], 'exception') if jobid in self.mapnodesubids: # remove current jobid self.proc_pending[jobid] = False @@ -325,8 +239,8 @@ def _submit_mapnode(self, jobid): self.mapnodes.append(jobid) mapnodesubids = self.procs[jobid].get_subnodes() numnodes = len(mapnodesubids) - logger.info('Adding %d jobs for mapnode %s' % (numnodes, - self.procs[jobid]._id)) + logger.debug('Adding %d jobs for mapnode %s', + numnodes, self.procs[jobid]._id) for i in range(numnodes): self.mapnodesubids[self.depidx.shape[0] + i] = jobid self.procs.extend(mapnodesubids) @@ -347,9 +261,11 @@ def _submit_mapnode(self, jobid): return False def _send_procs_to_workers(self, updatehash=False, graph=None): - """ Sends jobs to workers """ - while np.any(self.proc_done == False): + Sends jobs to workers + """ + + while not np.all(self.proc_done): num_jobs = len(self.pending_tasks) if np.isinf(self.max_jobs): slots = None @@ -358,15 +274,16 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): logger.debug('Slots available: %s' % slots) if (num_jobs >= self.max_jobs) or (slots == 0): break - # Check to see if a job is available - jobids = np.flatnonzero((self.proc_done == False) & - (self.depidx.sum(axis=0) == 0).__array__()) + + # Check if a job is available (jobs with all dependencies run) + # https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + jobids = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1] + if len(jobids) > 0: # send all available jobs - if slots: - logger.info('Pending[%d] Submitting[%d] jobs Slots[%d]' % (num_jobs, len(jobids[:slots]), slots)) - else: - logger.info('Pending[%d] Submitting[%d] jobs Slots[inf]' % (num_jobs, len(jobids))) + logger.info('Pending[%d] Submitting[%d] jobs Slots[%d]', + num_jobs, len(jobids[:slots]), slots or 'inf') + for jobid in jobids[:slots]: if isinstance(self.procs[jobid], MapNode): try: @@ -387,27 +304,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): (self.procs[jobid]._id, jobid)) if self._status_callback: self._status_callback(self.procs[jobid], 'start') - continue_with_submission = True - if str2bool(self.procs[jobid].config['execution'] - ['local_hash_check']): - logger.debug('checking hash locally') - try: - hash_exists, _, _, _ = self.procs[ - jobid].hash_exists() - logger.debug('Hash exists %s' % str(hash_exists)) - if (hash_exists and (self.procs[jobid].overwrite is False or - (self.procs[jobid].overwrite is None and not - self.procs[jobid]._interface.always_run))): - continue_with_submission = False - self._task_finished_cb(jobid) - self._remove_node_dirs() - except Exception: - self._clean_queue(jobid, graph) - self.proc_pending[jobid] = False - continue_with_submission = False - logger.debug('Finished checking hash %s' % - str(continue_with_submission)) - if continue_with_submission: + + if not self._local_hash_check(jobid, graph): if self.procs[jobid].run_without_submitting: logger.debug('Running node %s on master thread' % self.procs[jobid]) @@ -430,13 +328,40 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): else: break - def _task_finished_cb(self, jobid): + def _local_hash_check(self, jobid, graph): + if not str2bool(self.procs[jobid].config['execution'][ + 'local_hash_check']): + return False + + logger.debug('Checking hash (%d) locally', jobid) + + hash_exists, _, _, _ = self.procs[jobid].hash_exists() + overwrite = self.procs[jobid].overwrite + always_run = self.procs[jobid]._interface.always_run + + if hash_exists and (overwrite is False or + overwrite is None and not always_run): + logger.debug('Skipping cached node %s with ID %s.', + self.procs[jobid]._id, jobid) + try: + self._task_finished_cb(jobid, cached=True) + self._remove_node_dirs() + except Exception: + logger.debug('Error skipping cached node %s (%s).', + self.procs[jobid]._id, jobid) + self._clean_queue(jobid, graph) + self.proc_pending[jobid] = False + return True + return False + + def _task_finished_cb(self, jobid, cached=False): """ Extract outputs and assign to inputs of dependent tasks This is called when a job is completed. """ - logger.info('[Job finished] jobname: %s jobid: %d' % - (self.procs[jobid]._id, jobid)) + logger.info('[Job %d] %s (%s).', jobid, + 'Cached' if cached else 'Completed', + self.procs[jobid].fullname) if self._status_callback: self._status_callback(self.procs[jobid], 'end') # Update job and worker queues @@ -477,13 +402,13 @@ def _remove_node_dirs(self): """Removes directories whose outputs have already been used up """ if str2bool(self._config['execution']['remove_node_directories']): - for idx in np.nonzero( - (self.refidx.sum(axis=1) == 0).__array__())[0]: + indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0] + for idx in indices: if idx in self.mapnodesubids: continue if self.proc_done[idx] and (not self.proc_pending[idx]): self.refidx[idx, idx] = -1 - outdir = self.procs[idx]._output_directory() + outdir = self.procs[idx].output_dir() logger.info(('[node dependencies finished] ' 'removing node: %s from directory %s') % (self.procs[idx]._id, outdir)) @@ -583,15 +508,6 @@ def _submit_job(self, node, updatehash=False): fp.writelines(batchscript) return self._submit_batchtask(batchscriptfile, node) - def _report_crash(self, node, result=None): - if result and result['traceback']: - node._result = result['result'] - node._traceback = result['traceback'] - return report_crash(node, - traceback=result['traceback']) - else: - return report_crash(node) - def _clear_task(self, taskid): del self._pending[taskid] @@ -601,8 +517,9 @@ class GraphPluginBase(PluginBase): """ def __init__(self, plugin_args=None): - if plugin_args and 'status_callback' in plugin_args: - warn('status_callback not supported for Graph submission plugins') + if plugin_args and plugin_args.get('status_callback'): + logger.warning('status_callback not supported for Graph submission' + ' plugins') super(GraphPluginBase, self).__init__(plugin_args=plugin_args) def run(self, graph, config, updatehash=False): diff --git a/nipype/pipeline/plugins/callback_log.py b/nipype/pipeline/plugins/callback_log.py deleted file mode 100644 index 5ddc9eedd5..0000000000 --- a/nipype/pipeline/plugins/callback_log.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -"""Callback logger for recording workflow and node run stats -""" -from __future__ import print_function, division, unicode_literals, absolute_import - - -# Log node stats function -def log_nodes_cb(node, status): - """Function to record node run statistics to a log file as json - dictionaries - - Parameters - ---------- - node : nipype.pipeline.engine.Node - the node being logged - status : string - acceptable values are 'start', 'end'; otherwise it is - considered and error - - Returns - ------- - None - this function does not return any values, it logs the node - status info to the callback logger - """ - - # Import packages - import datetime - import logging - import json - - # Check runtime profile stats - if node.result is not None: - try: - runtime = node.result.runtime - runtime_memory_gb = runtime.runtime_memory_gb - runtime_threads = runtime.runtime_threads - except AttributeError: - runtime_memory_gb = runtime_threads = 'Unknown' - else: - runtime_memory_gb = runtime_threads = 'N/A' - - # Init variables - logger = logging.getLogger('callback') - status_dict = {'name' : node.name, - 'id' : node._id, - 'estimated_memory_gb' : node._interface.estimated_memory_gb, - 'num_threads' : node._interface.num_threads} - - # Check status and write to log - # Start - if status == 'start': - status_dict['start'] = str(datetime.datetime.now()) - # End - elif status == 'end': - status_dict['finish'] = str(datetime.datetime.now()) - status_dict['runtime_threads'] = runtime_threads - status_dict['runtime_memory_gb'] = runtime_memory_gb - # Other - else: - status_dict['finish'] = str(datetime.datetime.now()) - status_dict['error'] = True - - # Dump string to log - logger.debug(json.dumps(status_dict)) diff --git a/nipype/pipeline/plugins/condor.py b/nipype/pipeline/plugins/condor.py index 9b8b5c218d..0548a7afbc 100644 --- a/nipype/pipeline/plugins/condor.py +++ b/nipype/pipeline/plugins/condor.py @@ -7,7 +7,9 @@ from time import sleep from ...interfaces.base import CommandLine -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) +from ... import logging +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') class CondorPlugin(SGELikeBatchManagerBase): @@ -46,6 +48,7 @@ def __init__(self, **kwargs): def _is_pending(self, taskid): cmd = CommandLine('condor_q', + resource_monitor=False, terminal_output='allatonce') cmd.inputs.args = '%d' % taskid # check condor cluster @@ -59,6 +62,7 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('condor_qsub', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' diff --git a/nipype/pipeline/plugins/dagman.py b/nipype/pipeline/plugins/dagman.py index 1001ab5dac..ce2a2a5592 100644 --- a/nipype/pipeline/plugins/dagman.py +++ b/nipype/pipeline/plugins/dagman.py @@ -10,7 +10,7 @@ import time from warnings import warn -from .base import (GraphPluginBase, logger) +from .base import GraphPluginBase, logger from ...interfaces.base import CommandLine @@ -154,6 +154,7 @@ def _submit_graph(self, pyfiles, dependencies, nodes): child)) # hand over DAG to condor_dagman cmd = CommandLine('condor_submit_dag', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') # needs -update_submit or re-running a workflow will fail cmd.inputs.args = '%s -update_submit %s' % (self._dagman_args, diff --git a/nipype/pipeline/plugins/ipythonx.py b/nipype/pipeline/plugins/ipythonx.py index 8cb3c4190a..d76cdfeb98 100644 --- a/nipype/pipeline/plugins/ipythonx.py +++ b/nipype/pipeline/plugins/ipythonx.py @@ -8,7 +8,7 @@ import sys from future.utils import raise_from -from ...interfaces.base import LooseVersion +from ... import LooseVersion from .base import (DistributedPluginBase, logger, report_crash) IPython_not_loaded = False @@ -20,8 +20,6 @@ IPython_not_loaded = True - - class IPythonXPlugin(DistributedPluginBase): """Execute workflow with ipython """ diff --git a/nipype/pipeline/plugins/lsf.py b/nipype/pipeline/plugins/lsf.py index 6e27b3ab95..5ee0483221 100644 --- a/nipype/pipeline/plugins/lsf.py +++ b/nipype/pipeline/plugins/lsf.py @@ -7,8 +7,10 @@ import re from time import sleep -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) +from ... import logging from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') class LSFPlugin(SGELikeBatchManagerBase): @@ -45,6 +47,7 @@ def _is_pending(self, taskid): finished and is ready to be checked for completeness. So return True if status is either 'PEND' or 'RUN'""" cmd = CommandLine('bjobs', + resource_monitor=False, terminal_output='allatonce') cmd.inputs.args = '%d' % taskid # check lsf task @@ -60,6 +63,7 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('bsub', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) bsubargs = '' diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py index 3994f2e1cd..86c021decd 100644 --- a/nipype/pipeline/plugins/multiproc.py +++ b/nipype/pipeline/plugins/multiproc.py @@ -7,25 +7,35 @@ http://stackoverflow.com/a/8963618/1183453 """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import open # Import packages from multiprocessing import Process, Pool, cpu_count, pool -import threading from traceback import format_exception import sys +from logging import INFO +import gc from copy import deepcopy import numpy as np - -from ... import logging, config -from ...utils.misc import str2bool +from ... import logging +from ...utils.profiler import get_system_total_memory_gb from ..engine import MapNode -from .base import (DistributedPluginBase, report_crash) +from .base import DistributedPluginBase + +try: + from textwrap import indent +except ImportError: + def indent(text, prefix): + """ A textwrap.indent replacement for Python < 3.3 """ + if not prefix: + return text + splittext = text.splitlines(True) + return prefix + prefix.join(splittext) # Init logger logger = logging.getLogger('workflow') + # Run node def run_node(node, updatehash, taskid): """Function to execute node.run(), catch and log any errors and @@ -51,8 +61,7 @@ def run_node(node, updatehash, taskid): try: result['result'] = node.run(updatehash=updatehash) except: - etype, eval, etr = sys.exc_info() - result['traceback'] = format_exception(etype, eval, etr) + result['traceback'] = format_exception(*sys.exc_info()) result['result'] = node.result # Return the result dictionary @@ -62,6 +71,7 @@ def run_node(node, updatehash, taskid): class NonDaemonProcess(Process): """A non-daemon process to support internal multiprocessing. """ + def _get_daemon(self): return False @@ -77,36 +87,9 @@ class NonDaemonPool(pool.Pool): Process = NonDaemonProcess -# Get total system RAM -def get_system_total_memory_gb(): - """Function to get the total RAM of the running system in GB - """ - - # Import packages - import os - import sys - - # Get memory - if 'linux' in sys.platform: - with open('/proc/meminfo', 'r') as f_in: - meminfo_lines = f_in.readlines() - mem_total_line = [line for line in meminfo_lines \ - if 'MemTotal' in line][0] - mem_total = float(mem_total_line.split()[1]) - memory_gb = mem_total/(1024.0**2) - elif 'darwin' in sys.platform: - mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1] - memory_gb = float(mem_str)/(1024.0**3) - else: - err_msg = 'System platform: %s is not supported' - raise Exception(err_msg) - - # Return memory - return memory_gb - - class MultiProcPlugin(DistributedPluginBase): - """Execute workflow with multiprocessing, not sending more jobs at once + """ + Execute workflow with multiprocessing, not sending more jobs at once than the system can support. The plugin_args input to run can be used to control the multiprocessing @@ -114,17 +97,27 @@ class MultiProcPlugin(DistributedPluginBase): should be used. When those parameters are not specified, the number of threads and memory of the system is used. - System consuming nodes should be tagged: - memory_consuming_node.interface.estimated_memory_gb = 8 - thread_consuming_node.interface.num_threads = 16 + System consuming nodes should be tagged:: + + memory_consuming_node.mem_gb = 8 + thread_consuming_node.n_procs = 16 - The default number of threads and memory for a node is 1. + The default number of threads and memory are set at node + creation, and are 1 and 0.25GB respectively. Currently supported options are: - non_daemon : boolean flag to execute as non-daemon processes - n_procs: maximum number of threads to be executed in parallel - memory_gb: maximum memory (in GB) that can be used at once. + - raise_insufficient: raise error if the requested resources for + a node over the maximum `n_procs` and/or `memory_gb` + (default is ``True``). + - scheduler: sort jobs topologically (``'tsort'``, default value) + or prioritize jobs by, first, memory consumption and, second, + number of threads (``'mem_thread'`` option). + - maxtasksperchild: number of nodes to run on each process before + refreshing the worker (default: 10). """ @@ -134,207 +127,222 @@ def __init__(self, plugin_args=None): self._taskresult = {} self._task_obj = {} self._taskid = 0 - non_daemon = True - self.plugin_args = plugin_args - self.processors = cpu_count() - self.memory_gb = get_system_total_memory_gb()*0.9 # 90% of system memory - - self._timeout=2.0 - self._event = threading.Event() + # Read in options or set defaults. + non_daemon = self.plugin_args.get('non_daemon', True) + maxtasks = self.plugin_args.get('maxtasksperchild', 10) + self.processors = self.plugin_args.get('n_procs', cpu_count()) + self.memory_gb = self.plugin_args.get( + 'memory_gb', # Allocate 90% of system memory + get_system_total_memory_gb() * 0.9) + self.raise_insufficient = self.plugin_args.get('raise_insufficient', + True) + # Instantiate different thread pools for non-daemon processes + logger.debug('[MultiProc] Starting in "%sdaemon" mode (n_procs=%d, ' + 'mem_gb=%0.2f)', 'non' * int(non_daemon), self.processors, + self.memory_gb) - # Check plugin args - if self.plugin_args: - if 'non_daemon' in self.plugin_args: - non_daemon = plugin_args['non_daemon'] - if 'n_procs' in self.plugin_args: - self.processors = self.plugin_args['n_procs'] - if 'memory_gb' in self.plugin_args: - self.memory_gb = self.plugin_args['memory_gb'] - - logger.debug("MultiProcPlugin starting %d threads in pool"%(self.processors)) + NipypePool = NonDaemonPool if non_daemon else Pool + try: + self.pool = NipypePool(processes=self.processors, + maxtasksperchild=maxtasks) + except TypeError: + self.pool = NipypePool(processes=self.processors) - # Instantiate different thread pools for non-daemon processes - if non_daemon: - # run the execution using the non-daemon pool subclass - self.pool = NonDaemonPool(processes=self.processors) - else: - self.pool = Pool(processes=self.processors) - - def _wait(self): - if len(self.pending_tasks) > 0: - if self._config['execution']['poll_sleep_duration']: - self._timeout = float(self._config['execution']['poll_sleep_duration']) - sig_received=self._event.wait(self._timeout) - if not sig_received: - logger.debug('MultiProcPlugin timeout before signal received. Deadlock averted??') - self._event.clear() + self._stats = None def _async_callback(self, args): - self._taskresult[args['taskid']]=args - self._event.set() + self._taskresult[args['taskid']] = args def _get_result(self, taskid): - if taskid not in self._taskresult: - result=None - else: - result=self._taskresult[taskid] - return result - - def _report_crash(self, node, result=None): - if result and result['traceback']: - node._result = result['result'] - node._traceback = result['traceback'] - return report_crash(node, - traceback=result['traceback']) - else: - return report_crash(node) + return self._taskresult.get(taskid) def _clear_task(self, taskid): del self._task_obj[taskid] def _submit_job(self, node, updatehash=False): self._taskid += 1 - if hasattr(node.inputs, 'terminal_output'): - if node.inputs.terminal_output == 'stream': - node.inputs.terminal_output = 'allatonce' - - self._task_obj[self._taskid] = \ - self.pool.apply_async(run_node, - (node, updatehash, self._taskid), - callback=self._async_callback) + + # Don't allow streaming outputs + if getattr(node.interface, 'terminal_output', '') == 'stream': + node.interface.terminal_output = 'allatonce' + + self._task_obj[self._taskid] = self.pool.apply_async( + run_node, (node, updatehash, self._taskid), + callback=self._async_callback) + + logger.debug('[MultiProc] Submitted task %s (taskid=%d).', + node.fullname, self._taskid) return self._taskid - def _close(self): + def _prerun_check(self, graph): + """Check if any node exeeds the available resources""" + tasks_mem_gb = [] + tasks_num_th = [] + for node in graph.nodes(): + tasks_mem_gb.append(node.mem_gb) + tasks_num_th.append(node.n_procs) + + if np.any(np.array(tasks_mem_gb) > self.memory_gb): + logger.warning( + 'Some nodes exceed the total amount of memory available ' + '(%0.2fGB).', self.memory_gb) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + if np.any(np.array(tasks_num_th) > self.processors): + logger.warning( + 'Some nodes demand for more threads than available (%d).', + self.processors) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + def _postrun_check(self): self.pool.close() - return True - def _send_procs_to_workers(self, updatehash=False, graph=None): - """ Sends jobs to workers when system resources are available. - Check memory (gb) and cores usage before running jobs. + def _check_resources(self, running_tasks): """ - executing_now = [] - - # Check to see if a job is available - currently_running_jobids = np.flatnonzero((self.proc_pending == True) & \ - (self.depidx.sum(axis=0) == 0).__array__()) + Make sure there are resources available + """ + free_memory_gb = self.memory_gb + free_processors = self.processors + for _, jobid in running_tasks: + free_memory_gb -= min(self.procs[jobid].mem_gb, free_memory_gb) + free_processors -= min(self.procs[jobid].n_procs, free_processors) - # Check available system resources by summing all threads and memory used - busy_memory_gb = 0 - busy_processors = 0 - for jobid in currently_running_jobids: - if self.procs[jobid]._interface.estimated_memory_gb <= self.memory_gb and \ - self.procs[jobid]._interface.num_threads <= self.processors: + return free_memory_gb, free_processors - busy_memory_gb += self.procs[jobid]._interface.estimated_memory_gb - busy_processors += self.procs[jobid]._interface.num_threads + def _send_procs_to_workers(self, updatehash=False, graph=None): + """ + Sends jobs to workers when system resources are available. + """ - else: - raise ValueError( - "Resources required by jobid {0} ({3}GB, {4} threads) exceed what is " - "available on the system ({1}GB, {2} threads)".format( - jobid, self.memory_gb, self.processors, - self.procs[jobid]._interface.estimated_memory_gb, - self.procs[jobid]._interface.num_threads)) - - free_memory_gb = self.memory_gb - busy_memory_gb - free_processors = self.processors - busy_processors - - # Check all jobs without dependency not run - jobids = np.flatnonzero((self.proc_done == False) & \ - (self.depidx.sum(axis=0) == 0).__array__()) - - # Sort jobs ready to run first by memory and then by number of threads - # The most resource consuming jobs run first - jobids = sorted(jobids, - key=lambda item: (self.procs[item]._interface.estimated_memory_gb, - self.procs[item]._interface.num_threads)) - - if str2bool(config.get('execution', 'profile_runtime')): - logger.debug('Free memory (GB): %d, Free processors: %d', - free_memory_gb, free_processors) - - # While have enough memory and processors for first job - # Submit first job on the list + # Check to see if a job is available (jobs with all dependencies run) + # See https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + jobids = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1] + + # Check available resources by summing all threads and memory used + free_memory_gb, free_processors = self._check_resources( + self.pending_tasks) + + stats = (len(self.pending_tasks), len(jobids), free_memory_gb, + self.memory_gb, free_processors, self.processors) + if self._stats != stats: + tasks_list_msg = '' + + if logger.level <= INFO: + running_tasks = [' * %s' % self.procs[jobid].fullname + for _, jobid in self.pending_tasks] + if running_tasks: + tasks_list_msg = '\nCurrently running:\n' + tasks_list_msg += '\n'.join(running_tasks) + tasks_list_msg = indent(tasks_list_msg, ' ' * 21) + logger.info('[MultiProc] Running %d tasks, and %d jobs ready. Free ' + 'memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s', + len(self.pending_tasks), len(jobids), free_memory_gb, self.memory_gb, + free_processors, self.processors, tasks_list_msg) + self._stats = stats + + if free_memory_gb < 0.01 or free_processors == 0: + logger.debug('No resources available') + return + + if len(jobids) + len(self.pending_tasks) == 0: + logger.debug('No tasks are being run, and no jobs can ' + 'be submitted to the queue. Potential deadlock') + return + + jobids = self._sort_jobs(jobids, + scheduler=self.plugin_args.get('scheduler')) + + # Run garbage collector before potentially submitting jobs + gc.collect() + + # Submit jobs for jobid in jobids: - if str2bool(config.get('execution', 'profile_runtime')): - logger.debug('Next Job: %d, memory (GB): %d, threads: %d' \ - % (jobid, - self.procs[jobid]._interface.estimated_memory_gb, - self.procs[jobid]._interface.num_threads)) - - if self.procs[jobid]._interface.estimated_memory_gb <= free_memory_gb and \ - self.procs[jobid]._interface.num_threads <= free_processors: - logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid)) - executing_now.append(self.procs[jobid]) - - if isinstance(self.procs[jobid], MapNode): - try: - num_subnodes = self.procs[jobid].num_subnodes() - except Exception: - etype, eval, etr = sys.exc_info() - traceback = format_exception(etype, eval, etr) - report_crash(self.procs[jobid], traceback=traceback) - self._clean_queue(jobid, graph) - self.proc_pending[jobid] = False - continue - if num_subnodes > 1: - submit = self._submit_mapnode(jobid) - if not submit: - continue - - # change job status in appropriate queues - self.proc_done[jobid] = True - self.proc_pending[jobid] = True - - free_memory_gb -= self.procs[jobid]._interface.estimated_memory_gb - free_processors -= self.procs[jobid]._interface.num_threads - - # Send job to task manager and add to pending tasks - if self._status_callback: - self._status_callback(self.procs[jobid], 'start') - if str2bool(self.procs[jobid].config['execution']['local_hash_check']): - logger.debug('checking hash locally') - try: - hash_exists, _, _, _ = self.procs[ - jobid].hash_exists() - logger.debug('Hash exists %s' % str(hash_exists)) - if (hash_exists and (self.procs[jobid].overwrite == False or - (self.procs[jobid].overwrite == None and - not self.procs[jobid]._interface.always_run))): - self._task_finished_cb(jobid) - self._remove_node_dirs() - continue - except Exception: - etype, eval, etr = sys.exc_info() - traceback = format_exception(etype, eval, etr) - report_crash(self.procs[jobid], traceback=traceback) - self._clean_queue(jobid, graph) - self.proc_pending[jobid] = False + # First expand mapnodes + if isinstance(self.procs[jobid], MapNode): + try: + num_subnodes = self.procs[jobid].num_subnodes() + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, graph, + result={'result': None, 'traceback': traceback} + ) + self.proc_pending[jobid] = False + continue + if num_subnodes > 1: + submit = self._submit_mapnode(jobid) + if not submit: continue - logger.debug('Finished checking hash') - - if self.procs[jobid].run_without_submitting: - logger.debug('Running node %s on master thread' \ - % self.procs[jobid]) - try: - self.procs[jobid].run() - except Exception: - etype, eval, etr = sys.exc_info() - traceback = format_exception(etype, eval, etr) - report_crash(self.procs[jobid], traceback=traceback) - self._task_finished_cb(jobid) - self._remove_node_dirs() - - else: - logger.debug('MultiProcPlugin submitting %s' % str(jobid)) - tid = self._submit_job(deepcopy(self.procs[jobid]), - updatehash=updatehash) - if tid is None: - self.proc_done[jobid] = False - self.proc_pending[jobid] = False - else: - self.pending_tasks.insert(0, (tid, jobid)) + + # Check requirements of this job + next_job_gb = min(self.procs[jobid].mem_gb, self.memory_gb) + next_job_th = min(self.procs[jobid].n_procs, self.processors) + + # If node does not fit, skip at this moment + if next_job_th > free_processors or next_job_gb > free_memory_gb: + logger.debug('Cannot allocate job %d (%0.2fGB, %d threads).', + jobid, next_job_gb, next_job_th) + continue + + free_memory_gb -= next_job_gb + free_processors -= next_job_th + logger.debug('Allocating %s ID=%d (%0.2fGB, %d threads). Free: ' + '%0.2fGB, %d threads.', self.procs[jobid].fullname, + jobid, next_job_gb, next_job_th, free_memory_gb, + free_processors) + + # change job status in appropriate queues + self.proc_done[jobid] = True + self.proc_pending[jobid] = True + + # If cached just retrieve it, don't run + if self._local_hash_check(jobid, graph): + continue + + if self.procs[jobid].run_without_submitting: + logger.debug('Running node %s on master thread', + self.procs[jobid]) + try: + self.procs[jobid].run(updatehash=updatehash) + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, graph, + result={'result': None, 'traceback': traceback} + ) + + # Release resources + self._task_finished_cb(jobid) + self._remove_node_dirs() + free_memory_gb += next_job_gb + free_processors += next_job_th + # Display stats next loop + self._stats = None + + # Clean up any debris from running node in main process + gc.collect() + continue + + # Task should be submitted to workers + # Send job to task manager and add to pending tasks + if self._status_callback: + self._status_callback(self.procs[jobid], 'start') + tid = self._submit_job(deepcopy(self.procs[jobid]), + updatehash=updatehash) + if tid is None: + self.proc_done[jobid] = False + self.proc_pending[jobid] = False else: - break + self.pending_tasks.insert(0, (tid, jobid)) + # Display stats next loop + self._stats = None + + def _sort_jobs(self, jobids, scheduler='tsort'): + if scheduler == 'mem_thread': + return sorted(jobids, key=lambda item: ( + self.procs[item].mem_gb, self.procs[item].n_procs)) + return jobids diff --git a/nipype/pipeline/plugins/oar.py b/nipype/pipeline/plugins/oar.py index ca77fade1e..e3f5ef7947 100644 --- a/nipype/pipeline/plugins/oar.py +++ b/nipype/pipeline/plugins/oar.py @@ -10,9 +10,10 @@ import subprocess import simplejson as json -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) +from ... import logging from ...interfaces.base import CommandLine - +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') class OARPlugin(SGELikeBatchManagerBase): """Execute using OAR @@ -68,6 +69,7 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('oarsub', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) oarsubargs = '' diff --git a/nipype/pipeline/plugins/pbs.py b/nipype/pipeline/plugins/pbs.py index 5288bb36cb..6154abad74 100644 --- a/nipype/pipeline/plugins/pbs.py +++ b/nipype/pipeline/plugins/pbs.py @@ -7,9 +7,11 @@ import os from time import sleep +from ... import logging from ...interfaces.base import CommandLine -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') class PBSPlugin(SGELikeBatchManagerBase): @@ -48,6 +50,7 @@ def _is_pending(self, taskid): result = CommandLine('qstat {}'.format(taskid), environ=dict(os.environ), terminal_output='allatonce', + resource_monitor=False, ignore_exception=True).run() stderr = result.runtime.stderr errmsg = 'Unknown Job Id' # %s' % taskid @@ -59,6 +62,7 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('qsub', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' diff --git a/nipype/pipeline/plugins/pbsgraph.py b/nipype/pipeline/plugins/pbsgraph.py index 1aafd24e37..719b82578c 100644 --- a/nipype/pipeline/plugins/pbsgraph.py +++ b/nipype/pipeline/plugins/pbsgraph.py @@ -55,6 +55,7 @@ def _submit_graph(self, pyfiles, dependencies, nodes): qsub_args, batchscriptfile)) cmd = CommandLine('sh', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') cmd.inputs.args = '%s' % submitjobsfile cmd.run() diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py index 268fecf2a9..42aa4bc915 100644 --- a/nipype/pipeline/plugins/sge.py +++ b/nipype/pipeline/plugins/sge.py @@ -15,9 +15,10 @@ import random +from ... import logging from ...interfaces.base import CommandLine -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) - +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') DEBUGGING_PREFIX = str(int(random.uniform(100, 999))) @@ -312,9 +313,9 @@ def qsub_sanitize_job_name(testjobname): Numbers and punctuation are not allowed. - >>> qsub_sanitize_job_name('01') # doctest: +ALLOW_UNICODE + >>> qsub_sanitize_job_name('01') 'J01' - >>> qsub_sanitize_job_name('a01') # doctest: +ALLOW_UNICODE + >>> qsub_sanitize_job_name('a01') 'a01' """ if testjobname[0].isalpha(): @@ -364,6 +365,7 @@ def _is_pending(self, taskid): def _submit_batchtask(self, scriptfile, node): cmd = CommandLine('qsub', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) qsubargs = '' diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py index dd4b8076e8..882c455450 100644 --- a/nipype/pipeline/plugins/sgegraph.py +++ b/nipype/pipeline/plugins/sgegraph.py @@ -147,6 +147,7 @@ def make_job_name(jobnumber, nodeslist): batchscript=batchscriptfile) fp.writelines(full_line) cmd = CommandLine('bash', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') cmd.inputs.args = '%s' % submitjobsfile cmd.run() diff --git a/nipype/pipeline/plugins/slurm.py b/nipype/pipeline/plugins/slurm.py index 3f83772f6a..e5b797da5d 100644 --- a/nipype/pipeline/plugins/slurm.py +++ b/nipype/pipeline/plugins/slurm.py @@ -12,9 +12,11 @@ import re from time import sleep +from ... import logging from ...interfaces.base import CommandLine -from .base import (SGELikeBatchManagerBase, logger, iflogger, logging) +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('interface') class SLURMPlugin(SGELikeBatchManagerBase): @@ -62,6 +64,7 @@ def _is_pending(self, taskid): # subprocess.Popen requires taskid to be a string res = CommandLine('squeue', args=' '.join(['-j', '%s' % taskid]), + resource_monitor=False, terminal_output='allatonce').run() return res.runtime.stdout.find(str(taskid)) > -1 @@ -72,6 +75,7 @@ def _submit_batchtask(self, scriptfile, node): formatting/processing """ cmd = CommandLine('sbatch', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') path = os.path.dirname(scriptfile) diff --git a/nipype/pipeline/plugins/slurmgraph.py b/nipype/pipeline/plugins/slurmgraph.py index 794a35bc84..ed571ecffe 100644 --- a/nipype/pipeline/plugins/slurmgraph.py +++ b/nipype/pipeline/plugins/slurmgraph.py @@ -146,6 +146,7 @@ def make_job_name(jobnumber, nodeslist): batchscript=batchscriptfile) fp.writelines(full_line) cmd = CommandLine('bash', environ=dict(os.environ), + resource_monitor=False, terminal_output='allatonce') cmd.inputs.args = '%s' % submitjobsfile cmd.run() diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py index 82a2a4480a..f8838e691a 100644 --- a/nipype/pipeline/plugins/tests/test_base.py +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -5,12 +5,6 @@ """ import numpy as np import scipy.sparse as ssp -import re - -import mock - -import nipype.pipeline.plugins.base as pb - def test_scipy_sparse(): foo = ssp.lil_matrix(np.eye(3, k=1)) @@ -18,26 +12,6 @@ def test_scipy_sparse(): goo[goo.nonzero()] = 0 assert foo[0, 1] == 0 -def test_report_crash(): - with mock.patch('pickle.dump', mock.MagicMock()) as mock_pickle_dump: - with mock.patch('nipype.pipeline.plugins.base.format_exception', mock.MagicMock()): # see iss 1517 - mock_pickle_dump.return_value = True - mock_node = mock.MagicMock(name='mock_node') - mock_node._id = 'an_id' - mock_node.config = { - 'execution' : { - 'crashdump_dir' : '.', - 'crashfile_format' : 'pklz', - } - } - - actual_crashfile = pb.report_crash(mock_node) - - expected_crashfile = re.compile('.*/crash-.*-an_id-[0-9a-f\-]*.pklz') - - assert expected_crashfile.match(actual_crashfile).group() == actual_crashfile - assert mock_pickle_dump.call_count == 1 - ''' Can use the following code to test that a mapnode crash continues successfully Need to put this into a nose-test with a timeout diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py index 822d13c5a9..7212ff7302 100644 --- a/nipype/pipeline/plugins/tests/test_callback.py +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -31,8 +31,10 @@ def callback(self, node, status, result=None): def test_callback_normal(tmpdir): + tmpdir.chdir() + so = Status() - wf = pe.Workflow(name='test', base_dir=str(tmpdir)) + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) f_node = pe.Node(niu.Function(function=func, input_names=[], output_names=[]), name='f_node') @@ -47,8 +49,10 @@ def test_callback_normal(tmpdir): def test_callback_exception(tmpdir): + tmpdir.chdir() + so = Status() - wf = pe.Workflow(name='test', base_dir=str(tmpdir)) + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) f_node = pe.Node(niu.Function(function=bad_func, input_names=[], output_names=[]), name='f_node') @@ -65,8 +69,10 @@ def test_callback_exception(tmpdir): assert so.statuses[1][1] == 'exception' def test_callback_multiproc_normal(tmpdir): + tmpdir.chdir() + so = Status() - wf = pe.Workflow(name='test', base_dir=str(tmpdir)) + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) f_node = pe.Node(niu.Function(function=func, input_names=[], output_names=[]), name='f_node') @@ -81,13 +87,16 @@ def test_callback_multiproc_normal(tmpdir): assert so.statuses[1][1] == 'end' def test_callback_multiproc_exception(tmpdir): + tmpdir.chdir() + so = Status() - wf = pe.Workflow(name='test', base_dir=str(tmpdir)) + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) f_node = pe.Node(niu.Function(function=bad_func, input_names=[], output_names=[]), name='f_node') wf.add_nodes([f_node]) - wf.config['execution']['crashdump_dir'] = wf.base_dir + wf.config['execution'] = {'crashdump_dir': wf.base_dir} + try: wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) diff --git a/nipype/pipeline/plugins/tests/test_debug.py b/nipype/pipeline/plugins/tests/test_debug.py index 2bd2003492..e7997ba7f0 100644 --- a/nipype/pipeline/plugins/tests/test_debug.py +++ b/nipype/pipeline/plugins/tests/test_debug.py @@ -34,21 +34,24 @@ def callme(node, graph): def test_debug(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=DebugTestInterface(), name='mod1') - mod2 = pe.MapNode(interface=DebugTestInterface(), - iterfield=['input1'], + mod1 = pe.Node(DebugTestInterface(), name='mod1') + mod2 = pe.MapNode(DebugTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) pipe.base_dir = os.getcwd() mod1.inputs.input1 = 1 + run_wf = lambda: pipe.run(plugin="Debug") with pytest.raises(ValueError): run_wf() + + exc = None try: pipe.run(plugin="Debug", plugin_args={'callable': callme}) - exception_raised = False - except Exception: - exception_raised = True - assert not exception_raised + except Exception as e: + exc = e + + assert exc is None, 'unexpected exception caught' diff --git a/nipype/pipeline/plugins/tests/test_linear.py b/nipype/pipeline/plugins/tests/test_linear.py index e4df3f7db3..afb916f6eb 100644 --- a/nipype/pipeline/plugins/tests/test_linear.py +++ b/nipype/pipeline/plugins/tests/test_linear.py @@ -29,7 +29,7 @@ def _list_outputs(self): def test_run_in_series(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=LinearTestInterface(), name='mod1') @@ -41,6 +41,6 @@ def test_run_in_series(tmpdir): mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="Linear") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.mod1')] + node = list(execgraph.nodes())[names.index('pipe.mod1')] result = node.get_output('output1') assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py index 20ea72a929..780763405c 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc.py +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- -import logging -import os, sys -from multiprocessing import cpu_count - -import nipype.interfaces.base as nib -from nipype.utils import draw_gantt_chart +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Test the resource management of MultiProc +""" +import os import pytest -import nipype.pipeline.engine as pe -from nipype.pipeline.plugins.callback_log import log_nodes_cb -from nipype.pipeline.plugins.multiproc import get_system_total_memory_gb +from nipype.pipeline import engine as pe +from nipype.interfaces import base as nib + class InputSpec(nib.TraitedSpec): input1 = nib.traits.Int(desc='a random int') @@ -32,12 +32,13 @@ def _list_outputs(self): outputs['output1'] = [1, self.inputs.input1] return outputs + def test_run_multiproc(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() pipe = pe.Workflow(name='pipe') - mod1 = pe.Node(interface=MultiprocTestInterface(), name='mod1') - mod2 = pe.MapNode(interface=MultiprocTestInterface(), + mod1 = pe.Node(MultiprocTestInterface(), name='mod1') + mod2 = pe.MapNode(MultiprocTestInterface(), iterfield=['input1'], name='mod2') pipe.connect([(mod1, mod2, [('output1', 'input1')])]) @@ -45,8 +46,8 @@ def test_run_multiproc(tmpdir): mod1.inputs.input1 = 1 pipe.config['execution']['poll_sleep_duration'] = 2 execgraph = pipe.run(plugin="MultiProc") - names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.mod1')] + names = [node.fullname for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index('pipe.mod1')] result = node.get_output('output1') assert result == [1, 1] @@ -74,67 +75,13 @@ def _list_outputs(self): return outputs -def find_metrics(nodes, last_node): - """ - """ - - # Import packages - from dateutil.parser import parse - import datetime - - start = nodes[0]['start'] - total_duration = int((last_node['finish'] - start).total_seconds()) - - total_memory = [] - total_threads = [] - for i in range(total_duration): - total_memory.append(0) - total_threads.append(0) - - now = start - for i in range(total_duration): - start_index = 0 - node_start = None - node_finish = None - - x = now - - for j in range(start_index, len(nodes)): - node_start = nodes[j]['start'] - node_finish = nodes[j]['finish'] - - if node_start < x and node_finish > x: - total_memory[i] += float(nodes[j]['estimated_memory_gb']) - total_threads[i] += int(nodes[j]['num_threads']) - start_index = j - - if node_start > x: - break - - now += datetime.timedelta(seconds=1) - - return total_memory, total_threads - -def test_no_more_memory_than_specified(): - LOG_FILENAME = 'callback.log' - my_logger = logging.getLogger('callback') - my_logger.setLevel(logging.DEBUG) - - # Add the log message handler to the logger - handler = logging.FileHandler(LOG_FILENAME) - my_logger.addHandler(handler) - - max_memory = 1 +def test_no_more_memory_than_specified(tmpdir): + tmpdir.chdir() pipe = pe.Workflow(name='pipe') - n1 = pe.Node(interface=SingleNodeTestInterface(), name='n1') - n2 = pe.Node(interface=SingleNodeTestInterface(), name='n2') - n3 = pe.Node(interface=SingleNodeTestInterface(), name='n3') - n4 = pe.Node(interface=SingleNodeTestInterface(), name='n4') - - n1.interface.estimated_memory_gb = 1 - n2.interface.estimated_memory_gb = 1 - n3.interface.estimated_memory_gb = 1 - n4.interface.estimated_memory_gb = 1 + n1 = pe.Node(SingleNodeTestInterface(), name='n1', mem_gb=1) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', mem_gb=1) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', mem_gb=1) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', mem_gb=1) pipe.connect(n1, 'output1', n2, 'input1') pipe.connect(n1, 'output1', n3, 'input1') @@ -142,87 +89,49 @@ def test_no_more_memory_than_specified(): pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 1 - pipe.run(plugin='MultiProc', - plugin_args={'memory_gb': max_memory, - 'status_callback': log_nodes_cb}) - + max_memory = 0.5 + with pytest.raises(RuntimeError): + pipe.run(plugin='MultiProc', + plugin_args={'memory_gb': max_memory, + 'n_procs': 2}) - nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) - last_node = nodes[-1] - #usage in every second - memory, threads = find_metrics(nodes, last_node) - result = True - for m in memory: - if m > max_memory: - result = False - break +def test_no_more_threads_than_specified(tmpdir): + tmpdir.chdir() - assert result - - max_threads = cpu_count() - - result = True - for t in threads: - if t > max_threads: - result = False - break + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=4) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) - assert result,\ - "using more threads than system has (threads is not specified by user)" + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 4 - os.remove(LOG_FILENAME) + max_threads = 2 + with pytest.raises(RuntimeError): + pipe.run(plugin='MultiProc', + plugin_args={'n_procs': max_threads}) -def test_no_more_threads_than_specified(): - LOG_FILENAME = 'callback.log' - my_logger = logging.getLogger('callback') - my_logger.setLevel(logging.DEBUG) - # Add the log message handler to the logger - handler = logging.FileHandler(LOG_FILENAME) - my_logger.addHandler(handler) +def test_hold_job_until_procs_available(tmpdir): + tmpdir.chdir() - max_threads = 4 pipe = pe.Workflow(name='pipe') - n1 = pe.Node(interface=SingleNodeTestInterface(), name='n1') - n2 = pe.Node(interface=SingleNodeTestInterface(), name='n2') - n3 = pe.Node(interface=SingleNodeTestInterface(), name='n3') - n4 = pe.Node(interface=SingleNodeTestInterface(), name='n4') - - n1.interface.num_threads = 1 - n2.interface.num_threads = 1 - n3.interface.num_threads = 4 - n4.interface.num_threads = 1 + n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=2) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) pipe.connect(n1, 'output1', n2, 'input1') pipe.connect(n1, 'output1', n3, 'input1') pipe.connect(n2, 'output1', n4, 'input1') pipe.connect(n3, 'output1', n4, 'input2') n1.inputs.input1 = 4 - pipe.config['execution']['poll_sleep_duration'] = 1 - pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads, - 'status_callback': log_nodes_cb}) - - nodes = draw_gantt_chart.log_to_dict(LOG_FILENAME) - last_node = nodes[-1] - #usage in every second - memory, threads = find_metrics(nodes, last_node) - - result = True - for t in threads: - if t > max_threads: - result = False - break - - assert result, "using more threads than specified" - - max_memory = get_system_total_memory_gb() - result = True - for m in memory: - if m > max_memory: - result = False - break - assert result,\ - "using more memory than system has (memory is not specified by user)" - - os.remove(LOG_FILENAME) + + max_threads = 2 + pipe.run(plugin='MultiProc', + plugin_args={'n_procs': max_threads}) diff --git a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py index f8dd22ed66..7112aa2448 100644 --- a/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py +++ b/nipype/pipeline/plugins/tests/test_multiproc_nondaemon.py @@ -123,7 +123,7 @@ def run_multiproc_nondaemon_with_flag(nondaemon_flag): 'non_daemon': nondaemon_flag}) names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.f2')] + node = list(execgraph.nodes())[names.index('pipe.f2')] result = node.get_output('sum_out') os.chdir(cur_dir) rmtree(temp_dir) diff --git a/nipype/pipeline/plugins/tests/test_oar.py b/nipype/pipeline/plugins/tests/test_oar.py index 68dc98c344..181aff0f6f 100644 --- a/nipype/pipeline/plugins/tests/test_oar.py +++ b/nipype/pipeline/plugins/tests/test_oar.py @@ -50,7 +50,7 @@ def test_run_oar(): '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() ] - node = execgraph.nodes()[names.index('pipe.mod1')] + node = list(execgraph.nodes())[names.index('pipe.mod1')] result = node.get_output('output1') assert result == [1, 1] os.chdir(cur_dir) diff --git a/nipype/pipeline/plugins/tests/test_pbs.py b/nipype/pipeline/plugins/tests/test_pbs.py index d7b5a83528..719ffbfc72 100644 --- a/nipype/pipeline/plugins/tests/test_pbs.py +++ b/nipype/pipeline/plugins/tests/test_pbs.py @@ -48,7 +48,7 @@ def test_run_pbsgraph(): mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="PBSGraph") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.mod1')] + node = list(execgraph.nodes())[names.index('pipe.mod1')] result = node.get_output('output1') assert result == [1, 1] os.chdir(cur_dir) diff --git a/nipype/pipeline/plugins/tests/test_somaflow.py b/nipype/pipeline/plugins/tests/test_somaflow.py index f8309bf826..7449d0d3ae 100644 --- a/nipype/pipeline/plugins/tests/test_somaflow.py +++ b/nipype/pipeline/plugins/tests/test_somaflow.py @@ -34,7 +34,7 @@ def _list_outputs(self): @pytest.mark.skipif(soma_not_loaded, reason="soma not loaded") def test_run_somaflow(tmpdir): - os.chdir(str(tmpdir)) + tmpdir.chdir() pipe = pe.Workflow(name='pipe') mod1 = pe.Node(interface=SomaTestInterface(), name='mod1') @@ -46,6 +46,6 @@ def test_run_somaflow(tmpdir): mod1.inputs.input1 = 1 execgraph = pipe.run(plugin="SomaFlow") names = ['.'.join((node._hierarchy, node.name)) for node in execgraph.nodes()] - node = execgraph.nodes()[names.index('pipe.mod1')] + node = list(execgraph.nodes())[names.index('pipe.mod1')] result = node.get_output('output1') assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_tools.py b/nipype/pipeline/plugins/tests/test_tools.py new file mode 100644 index 0000000000..479cc773df --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_tools.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine module +""" +import numpy as np +import scipy.sparse as ssp +import re + +import mock + +from nipype.pipeline.plugins.tools import report_crash + +def test_report_crash(): + with mock.patch('pickle.dump', mock.MagicMock()) as mock_pickle_dump: + with mock.patch('nipype.pipeline.plugins.tools.format_exception', mock.MagicMock()): # see iss 1517 + mock_pickle_dump.return_value = True + mock_node = mock.MagicMock(name='mock_node') + mock_node._id = 'an_id' + mock_node.config = { + 'execution' : { + 'crashdump_dir' : '.', + 'crashfile_format' : 'pklz', + } + } + + actual_crashfile = report_crash(mock_node) + + expected_crashfile = re.compile('.*/crash-.*-an_id-[0-9a-f\-]*.pklz') + + assert expected_crashfile.match(actual_crashfile).group() == actual_crashfile + assert mock_pickle_dump.call_count == 1 + +''' +Can use the following code to test that a mapnode crash continues successfully +Need to put this into a nose-test with a timeout + +import nipype.interfaces.utility as niu +import nipype.pipeline.engine as pe + +wf = pe.Workflow(name='test') + +def func(arg1): + if arg1 == 2: + raise Exception('arg cannot be ' + str(arg1)) + return arg1 + +funkynode = pe.MapNode(niu.Function(function=func, input_names=['arg1'], output_names=['out']), + iterfield=['arg1'], + name = 'functor') +funkynode.inputs.arg1 = [1,2] + +wf.add_nodes([funkynode]) +wf.base_dir = '/tmp' + +wf.run(plugin='MultiProc') +''' diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py new file mode 100644 index 0000000000..c07a8966b6 --- /dev/null +++ b/nipype/pipeline/plugins/tools.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Common graph operations for execution +""" +from __future__ import print_function, division, unicode_literals, absolute_import +from builtins import open + +import os +import getpass +from socket import gethostname +import sys +import uuid +from time import strftime +from traceback import format_exception + +from ... import logging +from ...utils.filemanip import savepkl, crash2txt, makedirs + +logger = logging.getLogger('workflow') + + +def report_crash(node, traceback=None, hostname=None): + """Writes crash related information to a file + """ + name = node._id + host = None + if node.result and getattr(node.result, 'runtime'): + if isinstance(node.result.runtime, list): + host = node.result.runtime[0].hostname + else: + host = node.result.runtime.hostname + + # Try everything to fill in the host + host = host or hostname or gethostname() + logger.error('Node %s failed to run on host %s.', name, host) + if not traceback: + traceback = format_exception(*sys.exc_info()) + timeofcrash = strftime('%Y%m%d-%H%M%S') + login_name = getpass.getuser() + crashfile = 'crash-%s-%s-%s-%s' % ( + timeofcrash, login_name, name, str(uuid.uuid4())) + crashdir = node.config['execution'].get('crashdump_dir', os.getcwd()) + + makedirs(crashdir, exist_ok=True) + crashfile = os.path.join(crashdir, crashfile) + + if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: + crashfile += '.txt' + else: + crashfile += '.pklz' + + logger.error('Saving crash info to %s\n%s', crashfile, ''.join(traceback)) + if crashfile.endswith('.txt'): + crash2txt(crashfile, dict(node=node, traceback=traceback)) + else: + savepkl(crashfile, dict(node=node, traceback=traceback)) + return crashfile + + +def report_nodes_not_run(notrun): + """List nodes that crashed with crashfile info + + Optionally displays dependent nodes that weren't executed as a result of + the crash. + """ + if notrun: + logger.info("***********************************") + for info in notrun: + logger.error("could not run node: %s" % + '.'.join((info['node']._hierarchy, + info['node']._id))) + logger.info("crashfile: %s" % info['crashfile']) + logger.debug("The following dependent nodes were not run") + for subnode in info['dependents']: + logger.debug(subnode._id) + logger.info("***********************************") + raise RuntimeError(('Workflow did not execute cleanly. ' + 'Check log for details')) + + +def create_pyscript(node, updatehash=False, store_exception=True): + # pickle node + timestamp = strftime('%Y%m%d_%H%M%S') + if node._hierarchy: + suffix = '%s_%s_%s' % (timestamp, node._hierarchy, node._id) + batch_dir = os.path.join(node.base_dir, + node._hierarchy.split('.')[0], + 'batch') + else: + suffix = '%s_%s' % (timestamp, node._id) + batch_dir = os.path.join(node.base_dir, 'batch') + if not os.path.exists(batch_dir): + os.makedirs(batch_dir) + pkl_file = os.path.join(batch_dir, 'node_%s.pklz' % suffix) + savepkl(pkl_file, dict(node=node, updatehash=updatehash)) + mpl_backend = node.config["execution"]["matplotlib_backend"] + # create python script to load and trap exception + cmdstr = """import os +import sys + +can_import_matplotlib = True #Silently allow matplotlib to be ignored +try: + import matplotlib + matplotlib.use('%s') +except ImportError: + can_import_matplotlib = False + pass + +from nipype import config, logging +from nipype.utils.filemanip import loadpkl, savepkl +from socket import gethostname +from traceback import format_exception +info = None +pklfile = '%s' +batchdir = '%s' +from nipype.utils.filemanip import loadpkl, savepkl +try: + if not sys.version_info < (2, 7): + from collections import OrderedDict + config_dict=%s + config.update_config(config_dict) + ## Only configure matplotlib if it was successfully imported, + ## matplotlib is an optional component to nipype + if can_import_matplotlib: + config.update_matplotlib() + logging.update_logging(config) + traceback=None + cwd = os.getcwd() + info = loadpkl(pklfile) + result = info['node'].run(updatehash=info['updatehash']) +except Exception as e: + etype, eval, etr = sys.exc_info() + traceback = format_exception(etype,eval,etr) + if info is None or not os.path.exists(info['node'].output_dir()): + result = None + resultsfile = os.path.join(batchdir, 'crashdump_%s.pklz') + else: + result = info['node'].result + resultsfile = os.path.join(info['node'].output_dir(), + 'result_%%s.pklz'%%info['node'].name) +""" + if store_exception: + cmdstr += """ + savepkl(resultsfile, dict(result=result, hostname=gethostname(), + traceback=traceback)) +""" + else: + cmdstr += """ + if info is None: + savepkl(resultsfile, dict(result=result, hostname=gethostname(), + traceback=traceback)) + else: + from nipype.pipeline.plugins.base import report_crash + report_crash(info['node'], traceback, gethostname()) + raise Exception(e) +""" + cmdstr = cmdstr % (mpl_backend, pkl_file, batch_dir, node.config, suffix) + pyscript = os.path.join(batch_dir, 'pyscript_%s.py' % suffix) + with open(pyscript, 'wt') as fp: + fp.writelines(cmdstr) + return pyscript diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py index 2adb7ecba7..f1323750a1 100644 --- a/nipype/pkg_info.py +++ b/nipype/pkg_info.py @@ -3,14 +3,13 @@ from future import standard_library standard_library.install_aliases() +from builtins import open import configparser import os import sys import subprocess -from .info import VERSION - COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' PY3 = sys.version_info[0] >= 3 @@ -52,7 +51,8 @@ def pkg_commit_hash(pkg_path): cfg_parser = configparser.RawConfigParser() else: cfg_parser = configparser.ConfigParser() - cfg_parser.read(pth) + with open(pth, encoding='utf-8') as fp: + cfg_parser.readfp(fp) archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') if not archive_subst.startswith('$Format'): # it has been substituted return 'archive substitution', archive_subst @@ -86,6 +86,9 @@ def get_pkg_info(pkg_path): with named parameters of interest ''' src, hsh = pkg_commit_hash(pkg_path) + from .info import VERSION + if not PY3: + src, hsh, VERSION = src.encode(), hsh.encode(), VERSION.encode() import networkx import nibabel import numpy diff --git a/nipype/pytest.ini b/nipype/pytest.ini new file mode 100644 index 0000000000..835b6381c9 --- /dev/null +++ b/nipype/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +norecursedirs = .git build dist doc nipype/external tools examples src +addopts = --doctest-modules +doctest_optionflags = ALLOW_UNICODE NORMALIZE_WHITESPACE diff --git a/nipype/refs.py b/nipype/refs.py index 12e435316e..3b4d394136 100644 --- a/nipype/refs.py +++ b/nipype/refs.py @@ -1,4 +1,3 @@ - # Use duecredit (duecredit.org) to provide a citation to relevant work to # be cited. This does nothing, unless the user has duecredit installed, # And calls this with duecredit (as in `python -m duecredit script.py`): diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py index bab1bdf899..e35f4d464e 100644 --- a/nipype/scripts/utils.py +++ b/nipype/scripts/utils.py @@ -71,7 +71,6 @@ def add_args_options(arg_parser, interface): args["default"] = getattr(inputs, name) args["action"] = 'store_true' - print(name, spec.trait_type) # current support is for simple trait types if not spec.inner_traits: if not spec.is_trait_type(traits.TraitCompound): diff --git a/nipype/testing/data/Fred+orig b/nipype/testing/data/Fred+orig new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/NWARP b/nipype/testing/data/NWARP new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/Q25_warp+tlrc.HEAD b/nipype/testing/data/Q25_warp+tlrc.HEAD new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/README b/nipype/testing/data/README index 825acd11e2..550854c57e 100644 --- a/nipype/testing/data/README +++ b/nipype/testing/data/README @@ -3,4 +3,4 @@ in the doctests of nipype. For verion 0.3 of nipype, we're using Traits and for input files, the code checks to confirm the assigned files actually exist. It doesn't matter what the files are, or even if they contain "real data", only that they exist. Again, these files -are only meant to serve as documentation in the doctests. \ No newline at end of file +are only meant to serve as documentation in the doctests. diff --git a/nipype/testing/data/bedpostxout/do_not_delete.txt b/nipype/testing/data/bedpostxout/do_not_delete.txt index a1df420e34..9c5c450dfa 100644 --- a/nipype/testing/data/bedpostxout/do_not_delete.txt +++ b/nipype/testing/data/bedpostxout/do_not_delete.txt @@ -1 +1 @@ -This file has to be here because git ignores empty folders. \ No newline at end of file +This file has to be here because git ignores empty folders. diff --git a/nipype/testing/data/ds005/filler.txt b/nipype/testing/data/ds005/filler.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/f1.1D b/nipype/testing/data/f1.1D new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/f2.1D b/nipype/testing/data/f2.1D new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/fmri_timeseries_nolabels.csv b/nipype/testing/data/fmri_timeseries_nolabels.csv index 78df6fbd0b..c0fed6c90f 100644 --- a/nipype/testing/data/fmri_timeseries_nolabels.csv +++ b/nipype/testing/data/fmri_timeseries_nolabels.csv @@ -1 +1 @@ -10125.9,10112.8,9219.5,-7.39443,-8.74936,7.28395,13.7953,32.2328,32.4809,18.958,-12.2383,-6.86466,-23.0912,-16.425,-5.70842,11.2467,-1.58574,-4.53717,-17.3842,0.912601,13.0428,2.44622,2.08875,-8.74373,-9.47217,-6.87574,-8.11158,-14.54,0.414787,6.04424,0.540389 10136.8,10115.1,9222.54,-0.120582,-1.94906,6.92247,4.75197,11.0735,0.972766,10.2285,0.717545,-1.04488,-7.64424,-2.10875,-2.44368,1.52535,-1.14131,-1.72589,-1.1247,-0.993354,2.98318,1.29855,2.0688,1.00297,0.135373,-3.25325,-3.12065,0.913296,-1.7868,1.58829,-0.735248 10148,10122.2,9228.62,4.24336,-0.689111,5.12782,0.132862,-6.64526,-14.7952,5.19361,3.68198,2.77598,-0.691866,1.07559,1.71444,-1.30287,-2.75746,1.74208,4.75944,1.80799,-0.064464,2.37174,1.09905,3.5756,2.98064,-0.238711,0.822007,5.07188,-0.864496,-0.208741,-1.31367 10156.6,10132.2,9236.11,-0.047434,-1.79438,-0.767925,-3.78683,-2.46365,-12.9433,2.00586,-0.48292,1.16216,0.113706,-0.639879,-0.0445654,-2.82995,-2.22008,1.46544,3.70217,2.84476,-3.32792,6.701,0.982599,0.145487,0.0501163,-1.16747,-0.630382,-0.0550437,-0.0563951,0.0449386,-0.715988 10162.9,10141.8,9243.46,-0.3687,0.640608,-2.93969,-0.37466,-5.42813,-8.55527,-4.70566,-3.62351,-3.94857,0.847112,0.357187,1.39279,-3.07124,0.779726,5.12671,3.62277,2.86265,3.44378,5.49842,0.895482,-2.1777,0.14728,-0.491475,-0.0257423,-0.32504,2.28464,-0.610659,2.01955 10168.7,10149.5,9249.62,-0.272231,3.00751,-2.20783,-5.50238,-1.65733,-2.39574,-6.82249,-1.5591,-5.38806,-0.315138,2.41171,-0.227563,-0.306796,1.26618,4.45885,3.55662,3.14737,-0.0497907,2.76691,1.04757,-2.50276,3.25334,1.90194,3.54754,3.2308,0.393197,0.115407,1.88919 10175.3,10155.8,9253.09,0.271133,3.11725,-1.24188,-5.32432,6.94595,5.40219,2.63329,1.77742,-0.434798,3.20784,3.1926,-2.12653,1.4207,-0.162939,1.57116,1.20026,2.14004,-4.36978,-0.074248,0.344989,-2.79157,3.57441,2.795,6.81971,4.61981,-3.15395,-0.556388,-0.951462 10181,10160.9,9253.62,-1.52186,-1.02665,-1.31765,-8.89055,1.45638,-6.40533,-8.20284,3.42071,6.34151,7.32703,2.81444,-5.56924,-2.07761,-2.82472,1.75969,1.56549,2.59032,-4.99642,-0.861721,0.661704,1.27294,4.24609,5.72265,7.93181,6.46356,-4.54558,-2.93302,-2.55741 10182,10163.1,9253.53,-4.12759,-5.01517,-1.383,-11.7032,7.03273,-0.354258,-4.14846,2.56836,5.49077,2.70724,-0.00938943,-7.91268,-3.33257,-3.77932,-2.70035,-1.95288,1.51899,-10.5021,0.604386,1.13765,2.8031,0.719838,5.10986,5.4321,3.01561,-5.05514,-2.51591,-2.29453 10178.9,10161.7,9255.33,-2.09727,-3.23639,-0.971464,-6.47564,-1.86208,1.47429,-8.69004,2.23012,2.64935,4.20852,-0.00802028,-4.11236,-1.54808,-1.73414,-2.21966,-2.31888,0.521142,-4.49634,-1.66003,1.37105,1.47741,-1.17943,3.52554,2.31201,0.381259,-1.24137,-0.930002,-0.860505 10176.3,10158.2,9258.8,-2.87976,-1.16821,-1.15587,-7.36873,-2.70663,3.69409,-6.23946,3.17083,3.67683,5.95472,2.6739,-2.5798,1.61294,2.31642,-4.31408,-1.6647,-0.422612,-6.13843,-0.39141,1.92345,-2.82275,-0.742784,1.68164,-0.706688,-1.87652,0.172975,1.51911,1.04727 10176.2,10155.4,9261.93,-1.79655,0.511159,-2.91648,-1.19976,-6.01265,2.43062,-4.91165,1.64787,2.485,6.04132,2.79139,1.36683,2.36631,4.70105,-3.09068,-0.875835,-2.73203,-1.04036,0.0279962,0.57264,-4.70596,0.399049,0.109101,0.540718,-2.52779,1.90878,1.47212,2.48712 10177,10154.3,9263.36,-2.06935,1.47151,-1.59814,1.1621,-8.21806,2.74994,-4.8666,1.6535,2.86737,3.56179,1.87379,3.98852,2.20191,7.00018,-2.12026,-0.322149,-0.459427,1.99009,-0.386875,-1.65524,-2.88602,2.5405,3.09752,5.52644,1.72241,3.28467,2.06659,4.48929 10176.7,10153.6,9262.97,-2.47996,0.0736981,-1.18826,-1.40068,-2.38119,-1.33094,-3.87199,0.498621,1.31667,-0.952908,0.481976,0.0885501,1.11339,4.67043,-2.37383,-2.32579,0.991108,-0.25346,2.41941,-1.44295,0.0394728,1.67752,2.73018,4.10445,2.29859,0.993454,2.7469,3.39394 10174.9,10153,9261.77,-0.957748,-0.455644,0.885525,1.7746,0.0437147,0.878291,0.0855234,-0.572903,1.39546,0.00119098,1.69176,-1.96049,0.156938,2.84845,-1.18488,-2.65197,1.35428,1.98606,1.65427,-0.643756,-1.03602,-0.0406435,-0.236011,-0.961959,1.28125,-0.464305,1.75539,1.84618 10173.4,10153.5,9261.3,-0.583682,-0.792331,1.36077,0.644185,-3.55594,-0.618864,-4.88099,-0.136266,1.51362,2.73872,3.65897,-2.63062,0.416981,0.735765,0.533665,-0.326252,1.0146,2.83848,2.16063,2.30307,-2.01136,0.638055,-0.22921,-3.19692,0.947596,-0.379132,0.678065,0.747812 10174.5,10155.7,9262.24,-0.685336,0.856591,-2.63545,-0.959601,3.25442,0.791955,-2.20612,0.263046,-1.34292,4.47114,2.99912,-2.56858,-0.21931,-1.56389,-0.808263,0.311028,-2.34261,-0.965718,1.98615,3.50723,-1.41951,-0.258476,-1.16227,-1.73014,0.372641,-0.118946,-0.422557,-1.3986 10179.6,10157.8,9264.01,2.59538,3.68921,-1.9033,3.99249,0.109215,-1.86778,-4.51336,0.591929,-1.29086,1.52475,1.01934,0.773735,0.0652847,-3.00075,1.79923,2.1369,-2.11635,3.17035,-1.87907,2.19309,0.880052,-0.480886,-1.94369,-0.204693,1.63785,1.43004,-2.081,-3.24652 10186.9,10157.6,9265.4,2.10402,4.02633,0.884264,0.1708,-3.27208,-4.9215,-1.0364,1.60796,1.70888,-1.43476,1.10519,1.26841,0.0627916,-2.97727,1.13683,2.82663,-0.301705,-0.592683,-3.81587,-0.70989,1.60855,0.103857,-2.48043,-1.22737,-0.312858,1.31617,-1.91269,-3.98886 10192.2,10155.4,9265.29,1.6824,4.26755,1.57687,1.43194,-5.98808,-2.25097,0.153789,0.168572,0.879003,1.68604,0.75956,3.65922,-0.869793,-2.49312,0.497574,2.41553,-1.34226,-0.127659,-3.59295,-1.56547,0.88849,-0.785242,-4.24845,-5.15572,-4.81836,2.77035,-1.44493,-3.44434 10193.6,10153.7,9263.38,1.6491,4.80854,1.08823,5.10222,-5.26833,5.52263,-0.997094,-0.959485,-1.52356,6.15147,0.897033,7.60472,-1.50848,-0.576994,0.845199,3.25263,-2.21353,2.36454,-2.11918,-0.480371,1.405,-1.24949,-1.88424,-5.50221,-4.39822,4.6832,-0.575266,-0.350337 10193.7,10153.5,9260.14,0.371243,3.4575,-0.922956,2.86612,3.70316,4.4652,-2.35097,-2.08567,-4.55866,2.05406,0.20181,5.48777,-0.851734,-0.932792,0.852325,2.66059,-2.76402,-0.836483,3.32512,2.58318,3.54953,-1.82575,1.03107,-3.58566,-4.1055,2.71087,0.64122,1.16036 10193.4,10154.1,9256.45,0.655998,2.95689,-0.961572,2.95967,6.90968,-0.0847335,-1.13659,-2.64581,-3.78971,-2.43015,-0.722449,3.08777,-0.234356,-0.603156,1.30068,1.14368,-2.23215,0.241084,3.91588,3.38796,4.07024,-1.08082,1.15617,-0.375163,-2.54369,1.29418,0.795869,1.31402 10190.3,10152.8,9253.2,2.59279,1.93007,1.93861,4.82647,-1.84288,-5.84018,-7.03235,-2.16958,-0.8999,-4.4747,-1.99497,2.40008,0.0349671,-0.825783,2.00993,-0.184404,-0.576706,6.30193,1.43455,3.63536,2.34484,0.148851,-1.22127,-0.718508,-0.716753,1.50537,0.412978,0.73252 10185.2,10148.2,9250.73,1.88291,-0.127643,2.41457,0.38457,3.28565,2.40364,1.07674,-0.352091,-0.192694,-2.80281,-2.45121,-0.746935,0.454781,-0.345492,-2.38393,-2.35152,-0.468918,-0.28004,0.207449,2.6636,-1.39254,-2.09536,-4.44811,-4.48824,-2.93117,-0.770421,1.19,0.219788 10183,10142.2,9248.93,3.78484,0.701338,-0.71552,3.48407,0.454755,4.3743,3.68099,-0.668556,-3.42636,5.52772,-1.23863,-0.405148,0.665698,1.06479,-0.0251586,-0.48849,-0.847741,1.4814,-5.36764,-0.405219,-1.51485,-3.88226,-5.12764,-5.33767,-4.3365,-1.173,0.417418,0.415356 10185.4,10138.4,9247.93,3.11727,0.196163,-2.018,0.721283,-2.5075,-1.06349,0.331823,-1.2182,-4.01712,4.78444,0.452166,-2.16432,0.55673,1.61447,1.16718,1.44415,0.569846,-0.812131,-8.14324,-2.91296,2.43154,-1.45218,-0.730675,-1.0947,-2.25658,-3.52675,-0.361214,1.09266 10188,10139,9248.05,1.52249,-1.16117,-2.4591,-2.41492,-0.35832,-7.48161,-0.0490082,-2.1421,-3.52013,0.903896,-0.958215,-5.8036,-2.36788,-0.368615,-1.88998,-1.40466,-1.28791,-4.79995,-5.58563,-3.57656,4.13739,-0.274441,1.53352,2.93946,-1.96753,-6.76034,-1.87752,-0.324793 10186.8,10142.9,9249.23,2.29541,-0.414867,0.263844,-2.42527,-9.23597,-12.7958,-5.40665,-1.3296,-0.255947,1.05195,-3.09731,-3.83996,-4.40177,-0.0123634,-1.79533,-2.22933,-1.59891,-1.58539,-4.29444,-3.24283,2.73497,0.939395,2.25632,3.98042,0.672842,-4.87272,-3.0871,0.140664 10183.8,10146.3,9250.93,1.04007,-0.107056,-0.719832,-5.17314,-6.41206,-13.4527,-3.51115,-1.82372,-1.0661,0.164654,-4.87432,-3.16371,-3.16216,0.547311,-2.31938,-3.32366,-2.59406,-3.07878,1.07584,0.135595,-0.15385,-0.198986,-1.76614,-0.364142,-1.44816,-3.17832,-0.666637,0.539005 10182.5,10148.1,9252.57,1.58315,0.552138,-2.38854,1.84879,-2.25441,-6.8381,0.208721,-2.73312,-3.19332,-2.49192,-4.21087,0.445019,0.0651566,2.67403,-0.780414,-2.43461,-3.10543,1.48742,-0.123359,0.0321366,-2.00728,-1.30717,-5.02137,-5.05394,-3.39985,-0.233706,2.10556,1.51466 10182.7,10149.6,9253.33,0.671616,-1.8801,-5.19861,1.6691,-0.386439,-6.73637,0.390118,-1.36276,-2.8229,-3.74619,-1.53148,0.15594,0.934737,1.96014,-1.35363,-0.924511,-3.00858,0.653744,-1.84706,-3.59509,-0.247233,0.962108,-1.40552,-3.28119,-2.22432,0.0626129,2.48273,0.969888 10182.9,10150.9,9252.01,0.0166707,-2.52456,-5.48285,2.26653,-2.03587,-6.50283,-1.00325,0.264499,-1.46362,-0.822672,-1.11829,0.403605,-0.734484,-0.382999,-0.186567,1.24812,-2.13095,1.80897,-2.82131,-6.15356,2.54337,2.39696,2.51379,2.41699,0.307725,-0.195503,-0.252349,-0.890546 10182.1,10151,9248.33,-1.21698,-1.52567,-2.334,0.102378,3.74418,-1.36756,3.51501,1.50357,-1.80774,-0.855037,-2.71284,0.0746735,-1.2904,-2.37263,-0.326812,1.37779,0.0811662,-2.04277,0.452769,-4.37491,4.60025,0.785458,0.944597,2.57121,-0.443829,-1.9031,-1.78376,-2.25217 10180.2,10149.4,9243.85,-0.498632,0.815261,-1.05027,1.32586,2.65892,-5.17029,-0.588453,1.63481,-3.33979,4.4087,-1.26981,2.01576,-3.03953,-3.66687,1.33091,1.62961,0.568999,0.53543,0.477935,-1.78405,3.91722,-1.12653,-3.07327,-2.27103,-2.21119,-0.0469714,-3.05949,-3.83303 10176.1,10146.3,9240.54,-0.464849,1.25223,-1.14736,-0.645201,4.96922,-0.805424,1.85313,1.43677,-1.45072,6.22509,1.54511,2.89442,-3.56094,-4.35854,-0.476689,0.39343,-0.929162,-1.07774,0.941846,-0.57756,0.363373,-1.13491,-1.30865,-3.06369,-1.8739,2.47973,-3.19611,-5.38414 10169.3,10142.4,9238.91,2.28739,1.91951,-0.759834,1.17008,-1.10807,0.137649,-1.76481,-0.427729,-0.592675,2.50623,0.607717,4.10404,-2.20382,-5.11375,1.80008,0.383348,-3.40396,4.33491,0.605228,-0.0871236,0.185566,0.480246,2.74078,1.48145,2.07534,4.96863,-2.65852,-5.78272 10162.1,10139,9238.14,2.03262,2.32633,0.46709,-2.26524,5.80967,5.85587,5.67759,0.185696,-0.246666,-0.787877,-0.201738,0.61348,-0.542043,-3.51173,0.345287,-0.426571,-4.01566,0.315299,2.10005,-0.391753,2.39343,1.28396,3,4.99164,5.3145,2.31592,0.0224444,-4.14279 10158.4,10136.9,9237.31,2.77556,2.83113,1.37245,1.19159,2.19923,-2.0116,3.1913,1.03754,-0.929092,0.870894,1.00256,-0.624392,-0.561338,-2.99529,2.23674,0.823539,-1.63024,3.75817,0.298891,-1.18515,4.54738,1.25951,1.91277,3.57793,5.44217,0.785618,0.025315,-3.27161 10158.5,10135.5,9236.37,0.0672571,0.761886,2.35427,-0.889999,6.73976,-1.98269,8.45302,1.1398,0.0604089,-1.15193,1.32222,-2.47069,0.131408,-3.48238,-0.669944,0.753279,3.07189,-2.04262,0.174304,-2.32107,2.83224,0.708328,3.23848,0.984911,2.384,-1.28385,-0.548071,-3.32946 10160.6,10134.8,9236.46,-0.783525,0.239203,0.00548465,1.88108,6.83171,-2.89703,7.27976,-2.71585,-1.47417,2.12383,-1.04536,-1.14095,0.145875,-4.3962,-0.139564,0.781551,3.40043,-0.28834,-0.343608,-2.36391,0.0938093,-0.36295,1.0276,-0.578692,-0.619797,-0.489157,-1.92106,-4.163 10166.1,10135,9239.02,0.124276,1.29463,-1.44975,3.21172,2.53479,-3.38317,-0.20102,-4.72755,-2.14129,5.53743,-1.24849,0.994366,0.436372,-3.09635,2.19121,1.13794,1.52365,3.0586,0.622146,-0.699363,0.103461,0.316277,-1.73095,-0.195395,0.490618,1.44514,-2.50878,-3.62472 10175.6,10136.9,9243.9,1.67228,1.70099,-0.125799,2.04051,6.74509,2.05118,7.82124,-3.08565,-1.70842,3.37127,-0.160655,1.32998,0.57087,-1.46351,1.80831,-0.585194,-0.267853,0.719624,2.12333,-0.931791,2.61407,0.519467,-1.78038,1.70819,2.66646,1.47407,-2.48388,-2.6294 10184.4,10140.5,9249.09,4.05746,1.49391,3.1491,4.74869,1.42089,-7.65297,4.6083,-1.50292,-0.681543,0.792377,-1.54194,2.19467,-1.449,-2.54459,5.38937,-0.0662613,0.683022,6.46847,-1.151,-2.09676,5.40097,0.0884146,-0.584039,0.411805,2.87021,2.70096,-3.69024,-2.72328 10185.2,10143.8,9252.71,2.20708,-1.9117,6.2705,-1.38994,9.88462,0.984595,14.8745,1.09177,3.01497,-6.59006,-3.06879,0.864155,-0.352553,-2.42934,1.6214,-0.899998,2.90809,-2.62154,-0.748965,-1.78716,3.1828,-0.76616,1.51574,-1.80336,0.759499,1.08543,-1.48814,-0.830864 10176.5,10145.2,9254.8,3.08758,-1.24415,2.30133,1.5123,4.9996,-2.25743,5.71269,0.326257,0.862459,-5.32366,-2.15784,1.98295,-0.769376,-3.24456,1.73394,-1.18022,0.303592,1.19388,-1.18318,1.1848,-0.484859,-3.12715,-2.31674,-4.16244,-1.41399,2.32149,-1.0187,-1.70219 10164.6,10145.4,9256.92,1.59078,-1.06701,-0.557541,-2.88977,3.22953,-0.245042,-0.474481,0.0498212,-1.16809,-8.33134,-0.306573,0.38113,0.242976,-2.39828,-1.29092,-1.68013,-0.127576,-1.94114,1.03024,1.7825,-1.44807,-2.86352,-4.13379,-1.78466,1.5241,1.16147,-0.513496,-2.30027 10156.4,10145.9,9260.21,0.0333157,-1.40254,-1.63643,-2.63202,2.15792,2.8366,-1.32406,-2.25364,-4.61227,-7.74587,-1.005,0.107792,-0.131513,-2.0428,-1.28031,-1.65736,-0.0589992,-0.767749,0.0451012,-1.23948,0.334266,-2.05544,-5.74107,1.40617,2.47259,0.129519,-1.22605,-3.50154 10152.5,10145.2,9264.25,-2.23854,-3.34598,0.871046,-4.48776,-5.12246,-0.367558,-7.49548,-3.04105,-2.99035,-3.84367,-2.67766,1.19195,0.695189,-1.99211,2.38266,0.800284,2.92667,1.82052,-0.796218,-1.82753,3.43662,1.60186,-2.49788,2.02216,2.59346,0.975508,-0.397427,-2.78437 10148.6,10141.1,9267.56,-4.64613,-5.4569,3.80281,-6.22039,0.554038,5.00519,-0.395733,-3.04225,0.570141,-6.95862,-4.49105,-0.00732036,3.78285,-2.09066,1.46914,-0.873643,3.95228,-2.08532,2.8568,0.749314,1.78963,1.02579,-0.808831,-1.60113,-1.17483,0.544949,1.95805,-1.27827 10142.4,10134.6,9268.73,-4.02228,-5.3818,4.39201,-6.57399,-2.68308,-0.146626,-0.297909,-1.28233,3.72363,-10.5635,-3.46562,-0.498293,3.92457,-1.10422,0.725311,-0.888612,3.1725,-1.82837,4.64182,1.32637,-0.56378,0.781271,3.29557,-0.557202,-0.712584,0.587691,2.76212,1.05325 10137.8,10128,9266.83,-2.98689,-3.62614,2.49614,-3.78405,5.33483,-3.24499,-1.4797,-1.49474,0.75769,-13.0722,-3.57543,-1.73535,1.13307,-2.81826,-2.67056,-2.75063,-0.407379,-1.38965,7.67619,2.2374,-2.93415,-2.1994,0.956463,-2.25511,-4.42128,-0.889014,2.30781,-0.144069 10139.6,10121.2,9261.84,-1.19244,-2.09691,-1.17019,-2.92359,1.84257,-9.64131,-8.2266,-2.48032,-2.29368,-7.41116,-3.60172,0.404837,-2.31741,-3.52505,-1.14341,-1.1367,-2.22469,2.93998,5.91064,0.841518,-1.68308,-1.06298,-0.398387,-1.68239,-3.53445,0.38234,1.02165,-0.403129 10146.2,10113.8,9255.3,-3.35595,-3.34535,-1.74811,-10.4556,3.60927,-0.776329,-3.08604,-1.29687,0.835023,-5.76979,-1.7646,-2.22816,-1.31439,-0.382083,-1.73312,-0.792276,0.206848,-4.1992,4.29806,-0.830575,-1.71405,1.40452,2.00247,0.106559,-0.768805,-1.08451,1.11784,1.22578 10152.4,10107.8,9249.87,-2.49869,-3.87311,-1.98238,-6.90342,-1.23671,2.90852,2.97754,-0.581043,2.81778,-2.71728,-1.21684,-5.07044,0.497485,2.01224,-0.365556,-1.64542,1.17956,-3.76085,-0.573467,-2.58111,-2.12663,0.378165,4.18795,1.24581,-1.36196,-2.87649,0.482267,1.63454 10154.8,10107.2,9247.27,-4.01788,-5.39388,-1.72161,-10.3153,-0.251037,-1.57831,1.61553,1.18147,5.7765,-0.599766,-1.22598,-10.0294,0.895145,2.02015,-4.45992,-2.58818,2.98391,-9.45103,-1.41902,-1.29446,-0.55725,-0.180421,6.94249,-0.594659,-3.53394,-6.50742,1.38112,1.51458 10153,10112.2,9246.76,-3.24249,-5.01072,-2.02956,-7.46567,0.0264794,-1.5224,-3.31193,1.53111,5.32332,2.5335,0.40251,-7.05633,-0.711568,2.89381,-5.39998,-1.36446,2.04786,-7.02942,-4.53297,-0.88262,-0.357391,0.595822,6.5409,-2.84395,-2.64994,-5.7378,1.39939,2.97985 10148.7,10119,9246.16,-3.96002,-4.42756,-3.26432,-8.69557,4.03628,0.616301,-3.92147,2.76458,1.652,2.17356,4.22927,-4.5247,-2.33417,3.89508,-5.29918,-0.309883,-0.288513,-8.36711,-3.09529,-0.126421,-1.8539,2.38545,3.61409,-1.26649,0.429596,-4.19612,1.45711,3.95651 10145,10125.2,9244.17,-1.75695,-0.511195,-1.73883,-3.34742,-1.26592,5.24499,-3.03549,2.78645,-2.1334,0.220919,5.88292,0.160927,-1.7455,5.37331,-1.59599,1.91312,-0.631146,-3.16886,-2.94994,0.34822,-3.01289,2.84951,0.356135,3.47859,4.18276,-0.12287,0.984563,3.64398 10143.1,10130.2,9241.27,-1.71615,1.12867,1.04805,-6.57347,2.41341,16.2593,7.00371,0.924589,-2.71609,-6.2656,3.57183,0.37743,1.96421,5.66573,-2.3041,2.26799,0.668846,-8.32571,2.30148,2.66333,-1.75615,2.71555,1.44408,6.00224,4.85886,0.685304,3.03234,2.82015 10140.7,10134.4,9239.05,-1.25992,2.46902,-0.556969,-2.76672,5.45596,12.4649,8.36959,-2.49709,-3.8708,-1.40646,1.38854,1.37064,2.12007,3.84209,0.459629,2.15086,-1.24194,-4.15365,4.52043,5.4809,0.876317,0.656659,-1.01116,2.09458,1.65028,2.77599,3.21635,0.381243 10133.6,10137.8,9238.32,-2.22442,1.37094,-0.787327,-1.05469,3.55443,5.14715,-0.0509983,-0.0905216,0.72894,3.96149,2.38061,1.75467,3.09083,4.18358,2.79613,3.29833,0.325666,-0.671704,6.07566,7.72379,3.13564,0.655668,-2.59152,-1.76199,1.58102,4.45884,3.34631,0.480564 10121.1,10140.7,9238.2,-2.17367,-0.866588,-2.79273,0.692199,10.1863,9.97874,6.04483,2.66482,1.76948,2.61332,1.9281,-1.1243,5.03132,3.85731,-0.443337,0.284932,-0.868815,-3.31091,8.51065,6.49177,2.23459,-1.67042,-3.77735,-2.781,-0.902713,1.50205,4.04064,0.197185 10110.8,10144,9237.47,0.303664,0.966366,-2.65365,4.69141,3.98147,5.09796,4.57488,3.26927,0.562439,5.41174,1.92471,-1.15766,3.6349,2.42314,-0.0874924,-0.0560302,-1.22366,1.9914,3.44357,1.69106,1.98031,-1.32375,-0.576816,-1.03349,0.269332,-0.300454,3.28264,-0.458562 10110.3,10147.7,9235.48,1.28867,0.940385,2.1165,-0.581377,-0.643187,-2.16313,1.69237,2.47912,1.37859,3.32286,1.26412,-0.720553,2.36863,-1.25903,0.0706914,0.944374,2.2859,0.229574,1.5842,-0.12766,4.43122,1.34327,3.34673,-0.404948,2.87655,-1.67866,3.04869,-0.25307 10116.7,10150.7,9232.33,0.394714,-0.833445,4.94793,-6.11826,9.22151,2.99358,11.1041,1.15853,2.93899,0.397365,0.0221406,-0.0976144,-1.13452,-3.42557,-3.72862,0.476803,3.69054,-8.12164,2.48493,0.363106,3.87676,0.504363,0.972674,-1.44388,2.15926,-0.828986,1.75931,-0.549928 10121.4,10152.8,9229.14,1.29508,-0.757006,3.12597,-1.6729,7.62364,-0.936804,6.48918,-1.03742,1.86227,-0.262351,-0.75051,2.31301,-4.8422,-4.5034,-2.66476,0.578808,1.27532,-2.04282,3.45288,3.01897,0.564668,-1.21876,-3.06331,-2.70583,0.257935,3.52846,-1.56111,-1.5308 10121.6,10152.4,9226.86,0.677648,0.378414,1.31475,-2.61018,4.91454,0.37514,2.86121,-0.193973,1.93324,-4.63591,1.10695,3.14457,-2.96694,-2.19304,-2.99025,0.50097,0.165722,-0.200595,6.85438,4.63234,-2.47705,0.342532,-1.30419,-0.141339,1.63084,4.32707,-1.19328,0.76139 10120.5,10149.2,9225.49,0.499478,1.88224,-2.14427,-2.77288,10.6927,1.71766,6.49787,0.43981,0.0705592,-5.13201,2.57263,1.48076,-1.20267,-0.591255,-4.74193,-1.79266,-1.46188,-3.42451,8.04316,3.54243,-2.30088,0.0710442,-2.83238,0.653942,0.240506,0.904871,0.430945,1.6283 10121.2,10144.8,9224.89,1.35965,2.80608,-1.94166,1.75583,0.26227,-8.26437,0.567312,1.6259,1.60009,0.0627174,2.62631,2.65738,-1.31444,1.36503,-0.138702,-0.303116,1.07964,0.805711,0.6712,-0.0379901,0.596301,1.49046,-2.9437,-0.0854658,1.7116,1.14138,0.19577,2.11315 10121.7,10140,9224.64,-0.625981,1.46152,0.571473,-0.708952,-3.97306,-7.60183,3.54876,2.52756,3.43643,-3.37318,1.25185,1.95327,-0.430742,1.99167,1.38528,0.439469,3.35733,-3.21518,-3.33649,-3.33716,1.63613,2.87364,0.216347,-1.19264,2.34646,1.38095,0.250252,2.26893 10117.5,10135.7,9223.59,-0.644241,3.50756,1.18011,1.32346,-4.09529,-1.15572,8.91836,0.864807,0.810206,-4.21922,0.85698,1.54667,-0.984211,1.49262,0.424346,0.272079,0.55043,-3.11065,-4.92549,-5.21789,0.616593,0.933381,0.453042,-0.907799,0.816878,0.888407,-1.07882,0.897744 10109,10134,9221.44,1.24811,3.97674,3.11247,-1.16572,-9.20759,1.26864,10.07,0.861166,0.629341,-5.07074,1.84156,0.554677,0.501606,2.3508,-1.99158,1.42546,-0.0624237,-4.75601,-4.11731,-5.27973,3.12042,0.927954,2.01431,1.91643,2.26937,-2.42322,-1.85499,2.11246 10103,10135.6,9219.87,2.2046,4.10281,1.87105,-2.44462,-1.81059,2.73657,16.517,1.49188,0.862687,-1.50652,2.91423,-2.27191,-0.311967,3.16828,-6.05317,-0.647296,-0.600809,-9.86797,-3.317,-4.05579,3.51099,-1.77799,-1.17227,0.17711,-2.12588,-5.86398,-2.08211,1.43944 10103.9,10138.7,9220.3,3.77174,5.49059,1.2637,1.03751,-12.6254,-6.24364,0.90728,3.65224,3.71822,2.59825,4.31988,1.86088,-2.62582,4.43061,-1.00461,2.10803,1.47555,-3.28777,-8.18549,-4.31695,2.95113,-1.34785,0.676274,-1.38936,-3.04336,-1.37001,-2.35773,2.00922 10108.6,10140.8,9221.82,-0.70593,3.90046,-1.14247,-3.0764,-1.47295,-1.10809,-0.510284,3.79285,2.60078,-1.28697,3.77566,2.32766,-3.54475,2.99719,-1.20306,1.33262,-0.719923,-9.06449,-7.33119,-4.80493,-0.721145,-2.4024,1.79362,-1.97223,-5.04385,0.0875954,-1.73778,0.950888 10113.1,10142.1,9223.55,-1.06377,0.843971,-1.44889,-5.32939,2.69029,-3.83385,-5.63119,0.535717,-1.61039,-5.59267,1.26514,2.05707,-3.31026,-0.958826,1.33732,1.46551,-3.13585,-9.66605,-6.00234,-4.35532,-0.26599,-0.831562,2.98878,0.128679,-2.54674,-0.278737,-3.58409,-1.324 10120.7,10142.9,9227.01,3.56995,1.04759,3.75113,-1.7421,5.12807,3.1454,2.38504,-1.62768,-2.93793,-5.71266,-0.530001,2.84448,-2.04436,-1.31251,2.17243,2.11298,-0.867238,-7.66197,-6.87331,-3.32769,-0.373459,-0.116178,2.03689,0.379397,-0.00605166,-0.182103,-4.1657,-1.22794 10135.1,10142.1,9232.63,4.13322,3.14571,5.42112,-9.50857,6.61076,-1.5265,-1.3563,-0.229734,-0.953633,-2.39287,0.0907423,-2.25912,-2.95494,-0.622513,-0.878638,3.11006,2.20909,-12.7591,-4.65267,-0.652931,-0.508727,-0.484787,-1.43884,-3.89903,-1.68783,-1.20607,-1.47415,-0.30987 10150.6,10139.9,9237.26,7.08686,7.1115,3.05908,-7.31514,-2.75139,-6.15754,-6.75994,1.34201,0.583247,1.72791,0.0586144,-1.05549,-2.23348,1.35232,0.957745,3.9225,0.27845,-7.28043,-8.71747,-3.21629,1.12263,-1.08286,-3.72117,-4.10901,-0.817087,-0.319549,-0.171801,1.86899 10161.3,10137.9,9238.2,5.45348,5.872,0.0360833,-8.71486,1.68904,-1.57501,-9.84544,2.70784,2.39605,-1.45535,-0.548901,-2.93743,2.31592,2.21738,-0.0678836,1.75621,-1.90485,-7.83172,-5.34721,-0.902631,2.89369,0.938874,1.08004,0.946796,3.39736,-3.2386,1.23533,3.43628 10168.7,10135,9236.89,1.9988,3.16081,-0.959961,-1.65775,15.8147,12.2058,-6.43511,1.69639,2.59198,-2.06327,-0.47323,-4.35241,3.77438,3.79233,-2.16153,-2.08622,-2.56136,-3.89096,-0.736348,5.49778,-0.475583,0.770127,3.05002,3.17719,3.81221,-4.99556,1.59718,3.01185 10178.3,10131.2,9237.28,0.818385,-0.233269,1.46873,6.63122,10.9706,17.5879,-3.54675,0.677416,3.72244,0.655626,-0.201865,-1.16835,1.57109,5.42876,-0.444523,-1.12764,-0.256929,5.62565,-1.99386,6.4084,-2.47406,1.18593,3.2834,3.0293,3.51573,-2.53776,0.959038,3.23253 10193.3,10130.2,9242.16,-2.48525,-2.35837,2.98987,5.98816,11.4719,15.9039,-4.84232,-0.825315,2.54659,1.43064,-0.659643,-2.96556,0.571285,2.41784,-2.00371,-0.757574,1.41844,6.37057,1.42823,7.71148,-4.93994,-1.54988,-0.232174,-1.34349,-1.26249,-2.05601,1.26179,0.464125 10210.2,10133.3,9250.5,-0.302459,-1.69801,0.843368,2.30597,6.15326,11.0157,-5.9274,-1.05244,-1.68469,-0.278629,-0.694935,-0.891837,1.23651,-0.21345,-0.305015,-0.0987808,0.160233,4.91775,0.166271,3.92353,-3.88399,-2.55526,0.198425,-0.923912,-1.86728,-0.552523,1.22445,1.15572 10221,10137.3,9258.6,-1.56339,-0.256664,0.840544,-1.61826,11.0061,14.4706,-2.59098,0.449882,-1.65171,-1.89163,-1.35949,-1.40198,3.60618,0.270121,-1.02351,-1.1912,0.778059,-0.110922,0.867721,2.27546,-5.20223,-2.14642,1.17716,-1.36266,-2.51971,-1.10085,2.42789,2.32548 10222.9,10141.6,9264.61,-4.74868,-0.212232,1.05283,-1.29221,10.744,4.75459,-2.81401,0.644295,0.850172,0.179994,-3.01777,-4.30435,2.71079,-1.12735,-1.29174,-2.07496,1.34575,1.0376,2.5823,1.95702,-4.5778,-1.28586,-0.494008,-4.39926,-5.46478,-2.40477,1.70545,-0.546783 10222.5,10148.7,9269.02,-3.49502,-0.678579,-0.213247,8.06515,8.4472,0.736921,12.8231,-0.680516,1.09355,1.44143,-3.62765,-2.08929,0.194595,-2.35671,-0.392866,-2.86869,-0.655593,6.76095,0.52286,-1.94996,-0.69629,-1.94695,-3.05311,-3.36287,-5.8798,-2.04553,-0.962602,-2.08692 10226.3,10155.2,9271.48,-1.96969,-0.131236,-7.34816,10.3469,1.43629,-18.1274,6.28789,-1.94889,-4.21799,9.10578,-0.96868,-0.513386,-5.07894,-4.75252,3.07715,-1.21549,-4.62974,12.6049,-2.11208,-4.5134,4.07597,-2.26695,-5.31607,-0.080814,-4.75562,0.0499323,-2.60796,-2.05158 10230.1,10151.7,9270.27,-0.441668,1.99564,-2.24149,10.4542,-4.09391,-6.45561,-1.77752,0.712394,-1.02642,8.25875,2.54249,4.31177,-1.67116,1.28898,3.90167,2.27301,-0.292013,13.1856,-3.31394,-4.23242,0.509949,-0.582218,-1.55254,1.54596,0.383257,3.15094,0.659781,3.83919 10224.9,10138.7,9266.49,4.67287,5.1299,-1.26323,13.4301,-10.2745,-9.49416,-12.2719,-1.18436,-2.87586,6.16837,2.83569,6.07774,-2.8315,2.00898,6.40272,2.01559,-1.86315,15.8694,-4.72684,-3.25468,-2.65905,-3.311,-6.24296,-4.21139,-3.70695,4.80612,0.395122,1.76566 10212.8,10131.4,9265.67,3.01888,4.86272,2.80549,9.41976,5.08199,16.7307,3.01517,-1.39232,-0.901598,-3.17761,2.70511,2.89126,0.206015,2.09237,1.79821,0.427067,-0.286912,4.97158,1.88506,1.52106,-4.78901,-3.10639,-5.19696,-1.88352,-1.17405,1.76068,1.66502,-0.462334 10205.3,10137.3,9271.29,5.0191,6.44861,-1.029,10.2232,1.46143,6.79866,-7.1328,-3.52906,-8.32347,-3.93806,2.03961,4.301,-3.73195,-3.92217,6.44854,2.90593,-2.49697,11.4551,-0.562561,1.57056,0.711111,-0.350636,-4.25263,3.76126,3.75639,3.70316,-1.79131,-3.47622 10205.7,10147.7,9278.59,5.83546,6.36501,-0.202118,7.16455,-12.9828,-12.4607,-27.3389,-3.33415,-9.60681,-6.26496,-0.539386,6.78879,-3.91681,-6.10831,9.8609,6.12423,0.502419,17.71,-2.72276,0.90307,5.89102,4.35576,1.47131,6.87862,9.08531,6.44279,-3.45175,-1.92878 10205.4,10153.7,9279.43,2.61204,3.79426,2.8599,4.2373,-6.30104,-6.55433,-17.9117,-2.30217,-4.33352,-8.56342,-2.54108,4.06241,-0.221565,-2.25183,3.87958,2.42384,1.7425,10.0636,-0.274803,1.38918,2.9688,2.49859,1.85002,3.57782,5.56749,4.25356,-1.57246,0.769565 10198.3,10155.2,9271.53,1.79363,-0.436721,3.46418,1.17919,-6.21503,-12.0337,-14.7144,-0.753172,-0.422946,-10.0673,-1.05729,0.16841,0.00393219,0.329848,3.06417,0.641188,1.13987,4.50086,-1.96838,-0.158451,2.22687,1.01485,-0.617827,-1.82684,0.837829,1.35672,-0.969077,2.83866 10187,10154.7,9258.9,0.357944,-3.85399,-0.403587,-0.905802,-6.94279,-16.6984,-17.7781,-0.22625,-1.87358,-4.80273,-0.208291,-3.41762,-1.38116,-0.435891,4.56144,1.47257,0.881539,4.31043,-2.35524,-0.63135,2.49929,2.73787,-0.3439,-0.967951,0.479767,-1.25236,-0.198644,2.70849 10175.5,10150.8,9245.55,-2.22289,-4.64417,-1.57873,-3.37822,-3.35046,-9.88201,-14.3071,0.168661,-0.756661,-2.69992,-1.57269,-4.61371,-0.741804,-0.794809,1.95045,1.34471,1.90438,0.670421,-1.36383,-0.0207592,1.95603,4.44548,1.70081,0.896225,1.96219,-2.68814,1.37985,1.21966 10163.9,10144.5,9233.39,-1.0609,-3.6573,-1.22008,-1.66234,-8.72059,-9.8591,-9.71449,-0.237702,2.4907,-0.383432,-2.45784,-2.52105,-0.451308,-0.95008,0.101755,0.998499,0.0147502,0.763548,-2.08901,-0.286814,2.08671,3.24587,1.98374,-1.03823,1.41551,-1.64013,0.866956,-0.452541 10152.5,10140.9,9224.11,1.58528,-1.3177,-2.21666,-0.770113,-12.1162,-14.2306,-0.877621,-0.372338,1.62768,2.76293,-0.69447,0.389726,-2.24466,-0.492948,-1.07534,1.2119,-2.84085,1.62365,-4.58137,-3.47859,2.38127,-0.58689,-1.20067,-5.12188,-1.38938,0.191315,-1.00868,-0.231626 10144.9,10141,9218.45,2.9188,-0.174985,-4.58083,-6.94645,-12.0718,-23.1781,-6.27315,-0.364715,-3.24703,1.70145,0.993811,-0.598274,-3.56103,-0.759525,0.496704,2.46032,-1.89983,0.597576,-2.01394,-2.93857,4.73883,-0.682548,-1.34504,-3.70636,-1.23983,0.0550942,-2.01066,1.58053 10141.8,10139.7,9215.32,1.06474,0.421951,-5.29652,-9.2234,8.36446,-5.7284,0.960531,-0.909556,-4.90704,0.770291,1.54135,-5.62095,-2.20122,-1.09503,-2.35206,-0.974175,-1.0101,-7.23319,3.01594,0.768168,2.39478,-1.32615,-1.6404,1.53725,-1.51813,-3.97654,-1.7665,0.833795 10141.4,10134.3,9214.23,0.86273,1.35397,-0.657898,-4.72598,2.71892,1.93911,-8.71178,0.127278,0.812447,5.14689,3.34014,-5.47575,-0.124804,-2.70815,-0.541837,-0.600256,1.53834,-3.53843,0.0605411,2.43643,0.689316,0.936364,1.45495,3.58725,0.917646,-4.12549,-2.16127,-1.91164 10145.6,10128.8,9217.09,0.035273,1.26692,3.11502,-4.96307,-6.78084,1.02172,-8.79811,2.69846,4.94751,11.3598,6.51275,-2.0705,0.657905,-2.59061,-0.35795,1.18908,3.42851,-3.05799,-3.41004,0.806424,0.399374,2.92706,4.4301,0.273598,0.553543,-1.76552,-0.755718,-3.46001 10157.5,10128.8,9225.31,0.248702,0.312336,2.57768,-4.36878,-7.1619,-0.049009,-3.2758,2.7151,1.99544,11.1247,7.80862,3.2311,1.05086,1.13953,0.117826,1.5885,2.6575,-2.74279,-2.82058,-0.206648,1.25493,1.71967,2.81266,-4.13773,-2.45207,2.50385,0.789243,-0.268176 10170.7,10133.1,9236.11,-2.23675,-0.885477,2.34602,-6.30375,3.19378,12.3402,5.26964,2.51006,1.86666,4.33237,6.63528,4.85198,3.48519,8.46812,-2.52066,-0.634166,3.57125,-6.40349,1.46869,0.818123,-1.68738,1.2743,1.91738,-0.951766,-0.403311,4.63843,3.18061,7.04436 10176.7,10136.2,9243.78,0.782244,0.338989,-0.179665,0.677035,-11.8864,-9.98092,-16.6014,-0.0876104,-1.39338,0.511794,2.05749,5.37285,2.64871,7.7119,4.8232,-1.23349,2.56586,8.98335,0.643413,1.73431,-0.63479,2.49537,-0.600719,2.26345,1.69812,6.71431,2.31721,8.10433 10176.8,10136.6,9245.84,-3.20567,1.13405,3.92668,-1.78597,-0.236073,-2.19382,-11.4115,3.08973,1.33702,-3.27145,0.727769,-0.100717,5.38921,8.19297,0.492232,-2.20151,5.25989,3.6589,4.08819,2.21554,-1.32513,3.54291,0.119275,3.23854,3.862,2.19948,5.28701,6.25834 10178.4,10137.4,9245.74,-5.53585,0.420645,5.85295,-4.47724,14.54,12.4497,8.36972,4.99424,2.57479,-4.3639,0.677018,-2.6813,6.67898,7.5884,-5.54187,-1.3688,4.05586,-6.15054,4.2909,-0.899213,-1.24567,1.90686,-0.469126,1.72139,5.00978,-1.65339,6.96518,3.71489 10184.8,10141.1,9247.89,-4.95644,-1.91401,3.7243,-7.95873,7.49028,6.40526,5.31843,3.53676,4.4376,-3.95261,0.746514,-2.92295,5.17495,5.09822,-5.56387,2.13589,1.74219,-7.51099,1.13636,-2.24892,-0.712168,1.40767,0.401594,-0.663717,6.22808,-1.51586,5.59537,1.86444 10195.1,10147.9,9253.27,-3.98,-3.06823,-2.05534,-6.10099,3.83685,4.55708,3.92119,0.928846,2.49159,0.0763172,1.14792,-2.88509,3.3624,3.14131,-4.76678,1.53759,-2.49281,-5.00974,0.3227,-1.57677,-2.36177,0.558465,1.76223,-0.153596,3.21585,-0.248642,3.44061,1.09292 10206.6,10155.3,9259.98,-4.64998,-1.64546,-4.6585,-6.92405,-1.23826,-1.4651,-7.80907,2.03872,0.322905,5.35637,2.9557,-1.90346,0.941137,2.90995,-2.25745,1.6362,-2.73525,-3.06893,0.361893,-0.410406,-1.95298,3.18373,4.96997,3.18307,2.09522,2.29277,1.29516,1.46329 10215.1,10159.8,9265.65,-5.64262,-2.22323,-2.32616,-8.62966,1.24852,3.53986,-7.11813,2.5704,-0.221435,0.41167,0.765415,-1.44792,2.10023,1.14341,-1.90736,0.761342,-0.0657556,-6.90094,4.60419,2.00852,-1.1143,4.44335,7.23913,4.6059,2.18355,1.92624,1.0442,1.06642 10218.9,10161,9269.98,-5.54728,-2.69742,0.623383,-4.54971,5.62832,12.115,1.60837,0.527375,0.225195,-4.35554,-1.09064,-1.69716,2.68584,-2.42078,-3.28377,-0.48855,1.46337,-7.59929,7.41232,3.78152,-1.52786,1.12019,5.14455,0.902689,0.791392,0.171231,1.01653,-2.1951 10225.1,10161.4,9274.87,-4.18459,-1.40959,4.0543,-3.78563,4.56469,13.1486,7.4468,1.32559,4.01602,-4.26528,2.47676,-0.706977,1.49841,-2.44619,-4.48237,0.314642,3.21848,-7.78537,6.45365,2.67192,-0.518631,-0.579868,3.1551,-3.30298,0.42352,0.385421,1.09082,-3.38628 10238.6,10163.7,9281.72,0.163978,0.29531,1.39945,-1.88245,0.770367,3.01996,6.47156,0.843119,3.05229,-2.89342,3.69162,1.01002,0.156961,-1.63668,-1.88068,0.459627,0.572044,-3.8789,6.07964,1.73877,1.04155,-0.952277,-0.352698,-3.89818,-1.13337,1.63306,0.655322,-3.05775 10252.3,10168.8,9289.58,1.69242,0.803041,0.969081,-1.57571,10.1963,10.1486,9.01137,-0.23779,2.45598,-11.8335,0.764195,0.347471,0.63322,0.818036,-2.67947,-0.48707,-0.0121974,-5.92175,4.75178,1.31186,-0.59319,-0.865273,-2.13114,-0.629395,-0.22624,0.187864,0.687159,-1.38416 10258.4,10175.1,9296.44,0.693656,-1.47018,1.57507,-4.07861,13.9151,7.913,3.87705,-2.41045,1.40643,-18.8401,-3.38044,-3.78137,0.444306,-0.142111,-3.19856,-0.633983,1.26609,-6.96487,4.03731,1.86282,-0.255938,0.885239,0.576534,4.16798,1.48633,-2.91027,0.44246,-1.26861 10259.2,10179.7,9301.13,-1.11281,-2.9356,3.48279,-4.07376,14.5961,4.75668,2.95063,-2.50321,1.99968,-15.2573,-3.94817,-6.19421,0.994523,-0.409685,-3.36826,-1.30752,2.89435,-7.11783,2.3961,1.75016,-0.287404,0.839505,2.32354,3.16514,0.431073,-4.23834,0.224613,-1.13459 10258.9,10180.8,9303.2,-3.70956,-2.93593,3.76222,-6.98265,14.1006,4.36509,3.13521,0.524873,3.4745,-8.19672,-0.812591,-7.54285,2.87285,0.165482,-4.34303,-3.00502,3.10194,-11.8146,3.48326,1.87454,-2.39007,-1.71717,-0.0308325,-3.00344,-3.10099,-5.07511,0.999296,-0.291248 10259.7,10178.9,9302.61,-2.50722,-0.863499,1.6361,-7.29671,5.65875,7.35687,6.74534,2.86707,2.5541,-4.10002,1.92641,-4.21325,3.79643,1.11564,-2.85299,-3.384,0.718232,-13.5344,2.15514,-0.378278,-3.09826,-4.48668,-4.09564,-6.07121,-4.62941,-4.63714,1.35609,1.33932 10264.3,10176.2,9300.58,-1.50986,-0.476834,0.153861,-9.03392,2.34462,9.76008,11.2624,0.958254,-0.70443,-6.3101,0.886002,-3.04957,4.20237,0.687347,-2.59931,-4.30057,-0.344332,-15.3463,3.30618,0.212706,-1.83037,-5.39362,-6.37009,-5.79293,-5.6463,-5.17005,1.45394,1.2199 10270.2,10175.5,9299.06,-1.8193,-1.62584,1.49621,-15.2891,-0.19176,0.694336,7.97111,-0.906134,-1.88497,-6.47048,-0.900237,-3.70282,1.23614,0.322582,-3.93212,-3.45866,1.71962,-16.8955,0.58688,-0.409914,-0.259588,-2.68512,-3.64588,-3.35838,-4.51583,-4.19392,0.240148,0.159851 10270.2,10179.6,9298.63,-1.90388,-3.42457,3.36972,-15.5947,6.83754,-2.72512,7.96959,-1.26132,-2.35887,-7.13988,-3.00989,-4.84946,-1.32472,-2.90407,-7.21556,-3.99747,1.63284,-18.121,1.49353,-0.486008,-0.289734,-2.44221,-2.61409,-4.74746,-6.81336,-4.22186,-0.397997,-3.01155 10263.1,10186.3,9296.94,0.1046,-2.95923,0.55802,-3.53552,11.956,6.06043,20.0157,-0.175478,-1.81809,-1.77528,-2.10279,-0.283075,-3.48288,-4.09089,-6.41457,-3.4926,-1.98205,-11.2644,1.51324,-2.56718,2.01317,-3.17178,-3.03644,-4.28621,-6.82533,-2.57386,-0.732198,-4.52782 10250.3,10186.7,9289.82,0.787893,-2.63004,-4.83671,4.59987,9.90165,5.11396,20.1712,-1.49013,-0.900383,3.2704,-1.38302,1.01612,-3.51797,-3.65748,-2.01906,-2.31487,-4.58178,-0.663723,4.99631,0.0846666,6.20019,-1.32911,-0.366123,-0.708005,-3.05462,-1.4169,-1.33549,-4.03837 10229.6,10174.2,9276.51,2.92922,1.43172,-8.45959,7.92191,9.82817,0.906035,15.1761,-5.66535,-4.80598,8.92318,-1.50732,0.863702,-4.19618,-1.72605,1.43049,-1.60336,-7.78679,7.9456,2.20311,0.976306,4.6808,-2.0774,-1.41618,1.52784,-1.00485,0.251303,-2.51818,-3.24837 10203.9,10154.8,9263.01,1.97737,4.88419,1.86761,-1.89071,16.8831,21.8027,18.6752,-2.85592,-0.407409,1.1857,1.57668,2.90834,1.42619,5.01683,-2.88862,1.13125,-1.02838,-3.77013,-1.83294,-0.874118,-1.82318,-1.06152,0.617181,1.34269,3.38069,1.15764,1.12216,1.38647 10184.5,10141.2,9256.68,5.24597,7.64832,2.18557,1.58328,4.92602,9.28816,-0.0172234,-2.70209,-2.36954,2.63625,2.45988,6.65341,1.30855,2.45772,0.884071,4.15289,-0.306199,0.501745,-3.91598,-0.843063,-3.78083,-0.751671,-0.908618,-0.353576,1.46737,4.59599,1.10914,-1.05414 10178.9,10140.4,9258.57,8.5511,8.38576,-0.704081,10.0442,3.87995,9.53107,4.06474,-2.33977,-3.33414,3.45052,0.769206,8.44243,0.151836,-0.110094,2.50423,3.89258,-1.86971,4.86933,-2.34618,0.208276,-3.54318,-0.382483,-0.444637,3.17545,1.86638,6.31308,-0.0788599,-2.11239 10182.7,10148,9263.52,7.664,6.75263,-0.540997,5.42972,-5.04193,-7.98425,-8.29464,-0.166299,-0.588527,3.31557,0.500806,4.72146,-2.51571,-1.43305,5.52369,5.671,1.03703,8.03067,0.0463032,4.16527,0.993743,2.27,2.01907,5.48701,6.28587,6.50446,-0.915646,-0.555951 10185.6,10156.6,9266.64,4.26252,2.60407,3.65205,1.35764,1.93964,-1.71464,3.62386,0.664968,2.07164,-1.84774,-1.41728,2.03742,-1.93901,-0.955849,2.55509,2.24827,3.4143,2.08534,1.52467,4.36357,2.40504,-0.149419,1.87333,2.56701,3.76988,3.58853,-0.290298,1.53656 10182.8,10164.1,9266.99,3.44774,1.00051,3.58435,5.06036,-3.20427,-1.32409,2.16178,-1.24869,0.986594,2.68824,-3.10496,3.75494,-3.03899,-1.36189,2.85639,-0.797041,2.25309,6.84226,-1.01807,1.45026,1.64915,-1.77668,1.47461,1.32051,0.0174875,3.15498,-1.91103,0.915561 10177.6,10169.5,9265.47,2.97062,0.742454,2.19308,3.39405,-10.2555,-6.11354,-8.35604,-2.29312,-0.492631,4.2024,-2.46282,2.85236,-2.05854,-1.07623,3.34902,-1.67951,1.43015,9.72371,1.0556,1.2093,0.0329592,0.933345,2.62882,4.14907,1.43657,2.25242,-2.21302,0.424466 10175.1,10171.1,9262.53,2.78573,0.66686,2.0545,2.76769,-2.38316,1.38611,1.33538,-1.98843,-1.22362,0.719734,-1.48276,0.571928,-0.303568,1.13172,0.533248,-2.57485,0.218063,4.75694,4.12677,1.25451,-2.29974,1.77459,2.18864,5.66448,2.31972,-0.197648,-0.423422,1.24127 10176.1,10170.7,9258.49,5.31438,0.737423,2.23937,7.15555,-6.03862,-6.93885,2.59027,-2.08985,-1.82474,1.76361,-1.51506,2.40133,-2.94977,1.13326,2.34185,-1.4691,-0.319475,6.55378,0.151184,-0.820336,-1.03183,0.737373,1.0173,1.60097,0.120988,0.706961,-1.06361,1.61191 10177.1,10171.1,9253.43,5.27989,0.124242,0.594136,6.40228,-14.4792,-17.9873,-7.83873,-2.70593,-2.84279,6.19952,-1.02819,4.22035,-3.89328,-0.655654,4.6427,-0.543649,-0.312946,7.67303,-3.34568,-2.99026,0.892734,0.193866,0.437901,-1.37172,-2.06494,3.10779,-2.09072,0.969194 10175,10171.9,9247.28,2.27598,-1.11333,-0.371999,2.70022,-5.44405,-1.24932,2.95574,-2.54561,-3.07604,2.81372,-0.48024,4.11824,2.04907,-0.370621,1.24343,-2.71039,-1.27809,-0.906837,-1.29061,-4.80376,-0.177684,-0.68347,-0.0356975,0.976652,-2.58184,2.60538,-0.53245,1.0079 10170.6,10171.1,9240.98,0.484599,0.0646839,-1.51326,2.89899,-3.4319,-0.213982,2.47953,-0.834731,-2.00581,5.72898,0.227883,2.67222,2.27602,0.0505934,1.31844,-2.26552,-2.6972,-0.975391,-0.869576,-3.70984,-1.26158,-0.292123,-0.590846,2.58737,-1.84822,1.62378,-0.526111,-0.491878 10166.9,10167.6,9236.09,0.964725,-0.0392702,-0.079079,4.19696,-8.77705,-7.3393,-5.33084,1.7816,1.00552,6.00308,-0.645333,1.80016,-0.345783,0.537513,3.29513,-0.258503,-1.94323,3.02276,-2.07851,-0.708951,-0.985472,0.42465,-0.0047685,-0.0149723,-1.37113,0.550535,-0.779034,-0.484969 10166.1,10161.5,9233.6,-0.598547,-1.76595,-1.06041,-0.952044,-3.22733,-6.25839,-1.71002,3.5389,3.14678,2.52469,-0.94774,-0.697306,-1.82073,1.8162,-0.398189,-0.0962201,-1.17773,-3.11075,-1.86249,-0.148137,-0.912351,0.0729367,0.372787,-1.52491,-1.99794,-1.67208,0.753712,1.02245 10167.9,10154.5,9233.85,1.32924,-0.579085,-4.09528,3.27081,-6.78357,-9.38603,-3.06915,1.95927,0.70163,2.46784,-0.635142,0.854662,-1.03664,2.44479,0.381434,0.976493,-2.1874,1.35415,-3.25712,-1.85514,0.202589,0.286026,0.720155,0.627719,-0.687001,-0.872865,1.21871,2.25385 10170.4,10147.3,9236.23,1.55419,0.655793,-3.90119,3.65032,-6.92144,-3.81534,-0.829364,1.59907,-0.150104,0.588015,0.212751,1.04803,3.09472,3.79829,-0.218751,1.11779,-1.55055,0.933332,-1.25266,-2.59487,0.647035,1.39731,2.58953,2.8589,1.80309,-1.43261,2.52993,2.79953 10171.9,10139.7,9239.22,2.16966,0.513128,-2.93705,2.73804,-10.8601,-4.50483,3.76187,1.03924,-0.676839,-1.4866,-1.19577,1.6866,5.98311,3.12642,0.0885709,0.9896,-0.594518,0.533618,0.379411,-3.82145,2.32664,2.22298,3.60721,3.05218,2.2889,-1.98702,2.79897,1.35025 10172.4,10133.5,9242.05,0.627291,0.905709,1.39363,2.99372,-15.425,-9.09382,2.11414,1.04226,2.10526,-4.39506,-2.77953,2.15891,6.66724,1.70369,-0.372333,1.40462,2.59187,2.26874,-0.378224,-3.69675,3.0335,2.25396,3.10192,0.0429504,0.10951,-0.799702,2.66794,-0.282681 10173.8,10130.2,9245.36,-1.33644,1.42161,3.11004,3.93858,-17.0646,-12.116,1.67239,1.94826,5.54306,-3.85205,-1.5475,2.52019,4.33814,1.15019,-0.541069,1.99129,3.05378,4.25369,-2.76731,-2.80645,1.85733,0.988299,2.88783,-1.97077,-2.83768,1.85125,2.84766,0.389147 10176.4,10130.9,9250,-3.53503,0.391503,-0.270572,1.95882,-15.1875,-18.5758,-1.42497,2.28845,5.40786,-2.12974,1.20821,0.911564,0.2788,0.0689856,-0.00271805,2.01928,-0.20812,3.23848,-1.98612,0.0245125,0.488358,-1.18054,1.47019,-3.47437,-4.6287,2.11498,2.20934,0.993318 10178.8,10135.9,9255.56,-3.20255,-0.268054,-3.48033,2.47099,-11.3536,-16.9308,2.01776,1.40976,1.56328,0.853625,1.89586,1.47109,-1.50849,0.167668,0.627511,1.41809,-4.21425,2.05546,-2.39209,-0.416193,0.276633,-1.50971,-0.820011,-1.25927,-1.76,0.153711,0.431209,1.48315 10181.2,10144.1,9260.31,-2.49125,-0.613263,-3.86482,0.287362,-9.17309,-14.1157,3.48478,0.196793,-1.25386,2.83848,0.198147,-0.0165582,0.471677,-0.139327,-0.216901,-0.966032,-5.2193,-1.40546,-0.977273,-1.2574,1.78779,0.134179,-1.72164,0.653388,0.313432,-3.37716,-0.587605,0.861387 10186.6,10151.1,9263.12,-0.0358474,0.714951,-5.47328,-0.875177,-17.5089,-13.8361,0.471247,0.643912,-2.41975,9.9458,0.993041,0.803296,-0.226386,0.0668295,2.19176,-1.16819,-4.40868,0.69383,-3.38706,-3.58218,3.07732,2.10253,1.79789,2.06744,1.83904,-2.15516,-1.67344,0.661882 10193.4,10152.2,9264.85,-2.78688,1.85556,-1.96216,-7.27433,-5.61022,0.625161,3.91544,2.78407,0.13042,8.01854,3.573,-2.43853,-1.07905,0.148792,-1.48277,-2.3792,0.378784,-7.05144,-1.06108,-1.76148,0.135824,1.71393,3.80312,-1.43656,0.702495,-1.95731,-0.703674,-0.33177 10196.9,10148.7,9267.46,1.41437,4.41491,0.0330121,-0.96198,-19.7539,-11.561,-5.49424,1.03618,-0.588315,13.1158,4.11913,1.82776,-4.02743,-1.24038,4.49417,2.16391,1.61464,5.33203,-6.2827,-3.22771,2.42673,4.53812,5.27571,1.95384,4.83592,2.15944,-2.23414,-0.0179182 10195.1,10146.6,9271.67,-0.599083,4.08109,5.56207,-0.651956,-1.899,4.41751,8.64946,-0.00765143,1.65381,7.40697,3.13743,0.528221,-1.17274,-0.333192,-1.34405,0.810869,3.04978,-1.96585,-3.00608,-1.02587,-0.427114,2.63482,2.33223,1.44749,2.70602,-0.508442,-0.782524,0.838544 10190.6,10149.1,9275.95,0.560997,3.32623,0.00253245,1.6273,-9.62681,-9.32197,-7.13248,-1.74244,-2.26773,10.279,2.01853,1.79006,-2.32577,-1.861,2.70102,2.63733,-0.668516,4.89049,-2.56801,1.67809,-0.682542,1.07859,-0.730879,1.04436,0.219305,1.04839,-1.30085,-0.204558 10188,10153.1,9277.72,-1.05102,1.4439,-1.2902,0.37219,3.61058,7.8905,-0.13638,-0.797121,-3.203,3.7144,-0.467361,1.43319,1.01941,-0.964803,1.27849,1.32106,-0.71757,-0.281666,1.82319,4.43107,-2.93419,-0.102775,-2.79816,1.60946,-0.350934,0.837113,0.975085,-0.206216 10189.3,10155.8,9275.17,1.71247,1.79065,-0.806826,4.2591,-1.07113,5.08033,-3.80833,-1.05846,-3.93516,4.86697,-2.48519,4.41458,1.0147,-2.04319,5.76698,3.04901,0.621182,6.18537,-0.471514,3.74338,0.0954557,1.78055,-2.23478,4.29533,3.28968,4.08665,-0.45381,-1.12752 10190.8,10155.9,9267.91,0.0885688,1.62773,3.97676,0.475719,6.50171,12.0036,4.17355,0.0800788,0.877184,4.13283,-1.66529,2.3731,1.22312,-1.52431,1.32333,1.30085,4.02821,0.00402446,-0.278254,3.83144,-0.00616006,1.70507,0.14686,2.05675,3.75234,3.42709,-1.13997,-2.28219 10186.5,10152.6,9257.34,-0.152071,1.1051,2.98089,-3.26014,-3.23874,0.545145,-3.74253,0.650653,4.32612,4.55661,-0.349067,0.443991,-1.54712,-2.37082,1.08068,1.11666,3.19332,0.114235,-4.77887,1.03262,0.526047,1.57427,1.96416,-1.21359,2.2522,2.81775,-2.19914,-3.20958 10175.9,10146,9246.33,-2.37365,-0.801223,1.8448,-4.49245,2.73452,3.45587,0.665856,0.804743,7.15539,-1.25789,-1.25952,-2.70716,-1.07845,-2.04441,-1.93328,-1.35806,1.5978,-5.1161,-5.79834,-0.925826,-2.80177,-1.15512,-1.39234,-4.88988,-2.71874,-0.727928,-1.17586,-2.55528 10163.6,10137.3,9237.87,-0.803469,-2.78044,-0.895544,-1.96323,-0.541223,-3.95959,-1.23923,0.0489646,5.82687,-0.842944,-2.20839,-1.37161,-0.868195,-0.366623,-0.326653,-0.542204,-0.442138,-3.06811,-5.05951,-1.77693,-2.56412,-2.0747,-5.18551,-5.90628,-3.59607,-1.51359,-1.0358,-0.0442413 10154.4,10129.1,9233.99,1.23915,-3.76005,-2.64612,0.723829,-3.148,-4.96491,0.57486,-0.202117,2.21428,-0.386009,-2.61213,0.591537,-0.420445,2.51457,0.848114,0.0155665,-2.8099,-0.688955,-1.65728,-1.68576,-0.314736,-2.37588,-7.30164,-5.93878,-1.09582,-1.08092,-1.23666,3.04974 10147.7,10124.3,9234.84,0.130569,-3.33534,-5.30783,0.228073,-1.79103,-2.90284,1.72325,0.336059,-1.67646,0.805152,-2.51359,-1.68843,-1.08056,2.79024,0.667811,-0.918425,-5.25023,-0.613583,-1.21144,-3.86108,1.12026,-2.87087,-6.96217,-3.74878,-0.871173,-1.99148,-1.4983,3.13726 10141.9,10125,9238.34,-2.3342,-3.74514,-6.28736,0.247636,2.71253,3.12847,7.57994,-0.0401623,-2.07147,0.481455,-3.97685,-4.46362,-0.415913,1.42821,-0.575486,-2.68041,-4.57327,-2.24353,-2.60028,-5.84863,0.625916,-3.42977,-3.6369,-0.844099,-3.5874,-4.64335,-0.985747,1.2717 10139.9,10130.2,9242.19,-1.31024,-4.72475,-7.14762,0.73153,1.45053,-5.53508,5.90136,-2.31863,0.194991,0.488804,-6.97821,-4.41928,-2.29074,-1.35009,0.919216,-2.89533,-3.25509,-0.799203,-1.99553,-4.14064,2.04707,-1.98553,-0.137078,-0.0166083,-4.9352,-5.40326,-1.67739,-1.42035 10146.2,10135.6,9246.04,1.48702,-3.36982,-6.22071,1.74719,2.56435,-13.0074,1.99705,-3.21561,2.91416,0.844878,-6.7988,-2.16439,-5.4962,-1.85975,2.13575,-1.59383,-2.91884,1.52462,-1.3314,-1.85117,3.6544,-0.430522,0.692754,-0.840642,-3.31251,-2.33908,-3.05762,-2.1983 10158.1,10136.1,9250.8,0.841737,-2.49661,-1.39476,-1.47649,15.6927,0.965199,10.869,-0.546861,4.02682,-3.15137,-2.65822,-1.05518,-4.77058,0.229656,-2.58261,-1.60934,-0.689737,-5.44364,-0.234473,-1.95479,2.60062,-0.769404,0.484685,-2.21476,-2.21659,-0.527818,-2.3356,-0.631119 10167.2,10131.4,9256.17,1.43756,-1.64599,0.0828565,1.10643,1.09851,-8.71597,-1.14743,1.16785,1.24835,1.69522,0.678389,1.91657,-5.73395,-1.26925,0.618759,0.671225,0.99422,2.5392,-3.14056,-3.00047,3.39733,-0.267724,0.865602,-1.72338,-1.28093,1.59131,-3.58079,-1.60917 10168.5,10125.9,9259.95,0.111755,-1.49369,1.18289,-0.284048,-1.52165,-7.82514,1.91577,2.83987,1.30957,4.34859,2.31828,0.547347,-5.35341,-2.95714,0.120479,-0.07344,1.25038,0.863374,-1.97606,-2.63292,2.99367,-1.51317,-0.192761,-1.94301,-2.34527,-0.816782,-4.15688,-3.69083 10164.7,10123.5,9260.03,2.54631,0.123647,1.85441,0.291179,-2.26534,-5.622,0.403256,2.75151,1.92159,5.45502,4.02912,0.277333,-3.49437,-2.59529,1.68451,1.03176,0.611114,1.05444,-1.37086,-0.762577,2.09659,-3.15435,-1.66892,-4.18628,-2.03484,-0.59484,-4.5361,-4.06338 10160.7,10123.9,9256.02,4.16394,1.15842,1.00215,-1.41089,3.00077,3.69915,2.12147,1.50602,1.11373,3.7783,5.12886,1.27055,-1.0735,0.163066,0.715848,1.75274,0.248762,-1.87449,-2.70607,-0.0821427,-0.982237,-3.91753,-0.603176,-5.15131,-1.55797,1.9122,-2.63806,-2.45448 10157.6,10124.8,9249.1,1.13904,0.752742,1.28292,-3.44794,5.87463,13.5955,-3.90547,0.053564,0.392376,-2.17549,4.02652,0.800942,2.14933,0.991305,-1.00534,1.93346,1.74799,-4.3887,-2.62983,2.12002,-3.97726,-2.37985,1.92724,-3.91126,-1.80145,3.29901,0.515867,-2.07875 10155.9,10125.9,9241.01,-1.21278,1.24353,0.0902419,-1.38693,3.90257,17.0687,-1.7671,-0.621263,-0.743581,-3.56603,3.19768,0.515647,2.83626,-0.394058,-0.965446,2.53295,1.02968,-3.73706,-0.646373,4.19926,-3.90665,0.100245,2.07717,0.65145,-0.4389,3.45695,1.30478,-2.26372 10156.9,10129,9233.19,-0.519545,3.45514,-0.128203,0.470911,-4.34917,11.6069,-5.37302,-0.249794,0.0908138,-1.64961,3.7305,0.887725,1.28233,-0.50548,0.651175,4.68216,0.481759,0.131141,2.83721,7.4517,-1.51906,2.02591,0.478488,2.8447,3.96564,4.21205,0.0189546,-1.26083 10160.2,10134.9,9226.61,0.334619,3.63902,-1.33005,0.500933,-0.0390483,15.3466,3.49804,-1.22599,-0.443012,-1.29729,1.85728,0.83413,0.663791,1.08815,-1.61332,2.35978,-1.91003,-1.54128,7.06018,8.52392,-0.0931056,-0.631766,-1.8937,1.21041,3.92464,3.0125,0.582016,-0.0552563 10165.1,10142,9222.12,-0.0501124,2.72845,-2.35233,0.461804,-3.24106,3.89637,-4.4752,-1.7395,-0.658087,1.46568,0.74815,1.9358,-1.37579,1.26993,0.248403,2.1501,-1.97865,2.84403,4.93078,6.34449,2.55208,-1.66616,-1.28941,-0.85475,2.44335,3.28626,0.575625,0.0867697 10169,10147.2,9219.92,-2.57524,1.55278,1.64717,-0.408592,2.78686,3.93608,-3.35557,-1.05071,0.358949,-1.71793,1.23509,0.730307,-0.807758,0.469476,-0.799756,2.26666,1.42763,2.57756,3.31921,4.24278,2.32673,-1.92157,-0.625841,-1.7385,0.55312,2.469,0.416022,0.102824 10167.7,10149.8,9219.39,-2.61236,0.265041,4.14099,-1.10443,5.68968,5.75872,0.437178,-1.27371,-1.44794,-5.50529,0.962099,-1.7594,-0.014506,-1.47838,-2.10998,2.88166,2.32266,2.31558,3.04189,2.76494,1.13588,-2.76241,-2.5749,-1.37983,-0.132212,1.62609,0.00182996,-0.567092 10161.2,10151.5,9219.88,-1.00231,0.225002,2.94421,2.03312,-0.355979,4.16591,-0.636307,-0.980578,-3.17075,-4.4683,-0.0413473,-0.96548,-0.194949,-0.798368,-1.08568,3.94015,1.20872,6.21739,0.493017,0.663456,-1.20346,-2.76074,-4.99576,-0.484664,1.27829,1.87168,-0.0347963,-0.649195 10155.5,10153.9,9220.83,-0.939771,0.647249,0.0634509,3.2582,-1.62031,4.0693,-0.997477,-0.169163,-4.01209,-4.20755,-1.14083,-0.040949,0.676499,1.0769,-0.637069,2.85891,0.53402,4.18699,0.666861,0.369829,-2.63692,-0.336214,-3.73798,1.47577,2.81105,-0.292838,0.0270106,-0.151526 10154.1,10157.5,9221.67,-1.65802,1.59847,-3.57612,1.52401,6.37221,4.48866,-1.46299,-0.915699,-6.98915,-0.340048,-0.952717,-2.18866,-0.811792,-0.642645,-0.622625,-0.300884,-1.00057,-1.15759,2.44751,2.6773,-1.823,1.29837,-1.91591,2.49204,1.93197,-3.59974,-1.91245,-2.4109 10154.4,10160.7,9221.98,-0.583463,-0.108757,-4.6507,-0.0693877,5.35637,4.425,-6.56889,-1.82597,-8.57191,2.85503,-1.05825,-2.33955,-3.22781,-4.76081,2.05753,-0.861931,-1.83229,-0.124382,0.503483,2.18131,1.30665,2.42826,0.824233,3.84653,2.09007,-3.3925,-4.31649,-3.96112 10153.4,10159.2,9221.68,-2.76485,-4.09131,-2.87698,-1.10712,12.5336,12.9839,-4.34652,-1.87041,-6.50663,-1.43881,-2.78497,-4.09349,-3.27711,-7.58611,-0.918956,-2.43732,-1.68029,-2.93885,1.37614,1.00354,-0.202025,0.252735,-1.35224,2.14941,-1.22668,-3.85694,-3.91196,-5.39514 10153.1,10150.6,9221.82,-3.95579,-6.11602,-1.95691,-0.571033,7.36799,2.23424,-8.23593,-1.15065,-2.89936,-3.34966,-3.42278,-4.92737,-4.22729,-7.57776,-1.53936,-2.4826,-0.485854,-2.05301,1.35048,0.235875,-0.851581,0.299046,-3.65228,0.452501,-2.53126,-4.14097,-3.0318,-6.032 10156.5,10138.1,9224.22,-1.72219,-4.81284,-2.04034,3.64429,-3.40667,-8.21149,-2.06758,-0.247629,0.240041,0.844032,-2.55693,-2.29071,-5.62686,-4.10255,0.955484,-2.58578,-0.573095,1.96046,-2.89531,-2.47853,1.00662,1.59082,-2.31097,1.60096,-0.355857,-3.59741,-2.54995,-3.16362 10162.5,10126.5,9229.66,-1.48624,-2.31864,-1.19917,5.07688,-2.15075,-4.48733,6.81643,1.19375,3.4529,3.66948,-1.49639,-1.71619,-5.51437,-1.29231,-0.407537,-4.604,-2.54282,0.0824236,-5.27449,-4.81883,0.767691,-1.39492,-2.55861,-0.325428,-1.75464,-3.59903,-1.89829,-0.732932 10167.7,10118.7,9237.56,-1.06333,-0.880843,-0.709075,2.8371,-10.0447,-10.4348,-2.5904,3.18465,5.97115,6.33779,-0.55058,-1.01646,-4.14332,-1.6247,-0.0193591,-4.01402,-3.73144,0.38443,-5.50468,-6.41294,-0.295721,-3.62009,-2.70822,-3.1355,-4.45086,-2.10376,-1.79258,-1.22716 10172.5,10116.9,9247.18,1.551,0.130326,-0.490568,5.87654,-14.5436,-8.35183,-0.790109,3.39107,4.7174,8.28156,-0.0057788,2.6686,-1.84943,-1.48071,1.03911,-4.0934,-3.48936,2.7605,-6.22541,-8.72046,-2.487,-3.9855,-3.15508,-4.85806,-6.30628,-0.1826,-2.22861,-1.91313 10179.7,10122.6,9257.78,1.5355,1.00586,-2.46594,5.55739,-10.6179,-9.89219,1.01847,2.02002,1.55047,10.3651,1.59035,2.3257,-3.02423,-0.681756,0.379055,-4.13859,-2.86252,2.65539,-7.09955,-8.4785,-1.80811,-2.44766,-3.84586,-6.08215,-4.18234,0.309597,-3.66089,-1.78168 10188.9,10134.4,9267.84,0.423127,-1.44673,-6.16369,2.54558,-3.2605,-10.2788,1.93481,-0.460125,-1.55478,7.53447,1.04311,-2.037,-5.33297,-0.715827,-0.912315,-4.00679,-5.27357,1.32517,-7.02947,-5.6844,2.49,-1.1701,-4.14164,-4.46692,0.160721,-1.23591,-5.46575,-0.678645 10196.3,10145.5,9275.21,0.204833,-4.851,-9.24744,3.38063,-3.90706,-1.89916,-0.318999,-3.05687,-4.83175,3.88926,-1.68472,-4.52857,-6.76493,0.053409,0.356074,-2.44354,-9.25902,3.95243,-8.99635,-3.68403,4.07743,-1.41439,-4.06526,0.784286,2.50666,-1.59161,-6.31937,0.0761621 10200.4,10148.5,9278.92,-3.06966,-5.752,-6.27773,-0.452092,4.18213,13.2473,-12.0757,-4.47092,-6.49884,-5.96616,-4.08975,-9.08064,-3.65565,-1.03612,-1.9757,-2.79369,-8.22081,-3.13926,-2.68074,1.98539,-1.47914,-4.27865,-6.82097,-0.0420558,-2.72616,-3.80964,-3.69263,-2.81706 10202.3,10144.3,9279.66,1.7621,-1.2767,-1.87182,1.61337,-6.80859,14.4514,-16.815,-2.07514,-4.63562,0.0307544,-1.49074,-2.29138,-1.18636,-1.08621,1.86862,0.689509,-4.2555,-0.913166,-4.04706,-1.13903,-2.95495,-1.4359,-3.45987,4.36607,0.619825,-1.53464,-2.06409,-2.58631 10201.6,10141.5,9277.89,2.73427,2.11183,3.79277,1.71546,-5.8859,13.3557,-11.3022,2.79327,2.37116,13.2011,3.98285,0.966107,0.039656,-0.715821,2.85166,2.34242,2.77476,-0.0888099,-4.98538,-3.4432,-1.83877,3.57211,2.68075,7.05565,6.45616,-1.54302,-1.24469,-1.49869 10196,10143.8,9273.55,-2.52737,0.202188,7.08167,-4.89952,6.71679,10.6699,0.756855,5.54471,7.25909,13.9583,6.39787,-2.37566,0.745793,-1.45474,-1.09404,0.910205,7.21143,-6.92492,-3.24203,-2.89701,-0.543452,6.07649,7.33376,6.57894,6.15484,-4.40884,0.0587056,-1.11052 10186.2,10147.8,9267.63,-4.31786,0.145523,8.74123,-1.12372,3.61382,5.90919,-2.20636,4.87121,7.93339,10.8223,5.77747,-1.02016,1.70524,-1.23974,-1.99873,1.22043,7.18349,-2.02393,-4.52471,-1.19367,-1.87015,5.60664,6.92162,5.30532,3.03549,-3.16865,1.33872,-1.3693 10178.3,10151.3,9262.07,-1.01371,-0.36759,7.07326,3.03463,-3.67644,6.41668,1.01659,3.32806,5.69645,6.11989,4.17302,3.13986,4.40199,0.31144,-2.58094,-0.0539033,4.16067,1.49299,-3.2753,-1.39228,-2.172,3.33149,4.19598,3.46064,0.616277,-0.818505,3.98959,0.698301 10177.2,10154.3,9257.94,2.09186,0.0766925,2.17884,5.08344,-13.9717,-0.882929,-3.84368,2.86526,4.57806,7.77504,4.75117,6.29349,4.58116,4.04706,1.06485,0.914494,1.84175,7.12093,-3.92066,-3.04038,-1.76589,1.29071,2.74094,1.46176,1.98937,3.12251,5.09485,3.84087 10179.4,10155.4,9254.74,0.187596,-0.882072,-0.665652,4.15319,-3.56212,6.25634,3.46947,2.99756,3.30879,0.859046,5.1349,3.91232,5.90056,6.60019,0.839946,-0.162343,-0.484405,2.65509,-1.8674,-3.50916,-5.10299,-1.60522,1.28388,-0.0295086,1.05,2.81748,5.21994,5.53563 10178.8,10153.1,9251.26,-1.91139,-0.154839,-0.832651,7.32065,-8.14661,3.20829,-4.61065,3.9011,1.20806,1.29028,6.11631,4.24084,4.66918,7.38927,3.1094,1.72009,-0.436683,6.06925,-3.83738,-3.64103,-8.35166,-0.222316,1.74303,3.43329,2.82215,3.91599,3.2218,6.05878 10175,10149.2,9246.46,-3.00223,-0.829219,2.18951,8.12634,-8.29635,3.98254,-2.55022,3.58933,0.0476173,2.00734,2.85452,5.13863,4.39434,5.86178,1.57419,0.321093,2.11151,4.62819,-0.677836,-1.98205,-7.44972,1.36379,2.52895,5.12261,2.10196,3.15929,2.77152,6.16477 10170.8,10147.7,9240.32,-2.09934,-1.33891,3.77143,6.49402,-6.43302,-0.0826344,0.87837,1.12061,0.421557,1.06025,-1.52903,5.64507,3.68263,3.49536,1.25096,-1.4957,2.92854,4.60413,2.40658,-0.645265,-3.32217,0.987715,2.60908,1.94117,-0.424246,2.85508,2.71473,4.88469 10167.3,10148.7,9234.04,-1.71112,-2.89318,3.67043,1.66277,3.35424,4.57631,10.1924,-0.35173,1.35064,-5.80931,-1.82085,3.64176,4.57117,2.2882,0.924739,-2.41648,2.22467,2.19365,5.80375,-0.426137,-2.32705,-0.919332,2.09081,-2.34116,-2.25007,1.71251,3.40172,3.5108 10165.7,10149.1,9229.23,-1.45001,-3.05548,2.45599,-0.349391,3.71978,4.53119,5.144,-0.0754888,2.20722,-6.90377,0.948441,2.13514,3.08117,1.83942,2.86791,-0.010419,2.66035,5.23219,5.6626,-0.804354,-2.37724,-1.67323,0.673861,-3.53649,-1.59081,1.76997,2.75549,2.29186 10167.4,10147.1,9226.8,-1.49928,-2.70714,1.88393,-0.842721,-0.225431,3.25531,1.41947,0.140255,3.21042,-3.88608,1.41104,1.86088,-0.091131,0.642157,1.94581,0.307133,3.18746,6.22574,4.30938,-1.01513,-1.1936,-1.8575,-0.588364,-1.42784,-2.08205,1.85519,1.46316,1.06047 10171.1,10143.9,9226.48,-2.01672,-2.40053,3.06391,-0.0599903,-8.34303,2.94718,-5.04409,-0.199276,4.0892,-3.68083,-0.226057,2.75547,-0.686676,-0.843757,0.670264,-0.458086,3.08212,7.11729,2.84836,0.933537,-1.50789,-1.59001,0.179663,0.0589795,-2.55704,3.42709,0.775783,0.360096 10175,10140.6,9227.89,-1.34782,-2.60865,2.14445,1.39294,-10.3608,4.5868,-8.2559,-1.78039,0.356678,-10.0047,-3.28868,2.87133,1.85333,-3.67234,1.53223,-1.27653,0.113475,6.97877,4.49731,3.38158,-3.24882,-2.09817,-0.213742,-0.816136,-3.92766,4.36792,1.46638,-0.25462 10179,10139.5,9231.01,-0.683001,-1.14693,0.835389,1.45465,-4.93888,6.92044,-3.2459,-1.76518,-2.11784,-11.5638,-3.99539,3.25477,2.97649,-3.54233,2.62301,-0.286071,-1.99677,5.44349,5.35012,2.55683,-3.04093,-1.82791,-1.42661,0.583625,-2.6178,3.43693,2.29735,-0.308687 10185.5,10142.2,9235.77,-0.0852919,0.0218383,0.522022,1.091,-4.00515,-0.71681,-2.72016,-1.24891,-1.4593,-5.53454,-2.81228,2.98724,1.40275,-1.35994,4.37674,1.00841,-2.02092,6.34309,4.01241,0.223476,0.719167,-0.617158,-1.79277,2.19906,-0.00915837,1.60933,1.1106,-0.276707 10194.7,10147.7,9242.28,-0.507821,-1.45713,1.82236,1.06383,0.990703,1.16431,3.40878,-1.35424,0.436421,-3.7364,-2.82733,0.844561,2.18188,1.42103,2.14788,-1.48658,-0.956157,3.31294,2.03859,-1.09837,2.11718,-0.147919,0.113767,0.665977,1.0134,-0.758268,0.662046,1.48327 10202.3,10153,9250.68,-0.953894,-1.28733,1.09826,0.183582,-2.63676,-4.1377,-2.89907,-0.851983,3.07691,-0.452803,-2.18838,0.00930997,2.87142,4.0314,0.911046,-1.55443,1.18147,4.24956,-2.48362,-1.23019,1.72571,2.11001,5.29268,-0.281886,3.31927,-0.100871,1.85826,4.09941 10205.4,10156.4,9259.89,-1.27754,0.134823,0.181405,0.430733,3.94306,1.54036,2.99815,-1.16285,4.70226,-4.24342,-1.81256,1.00154,4.93307,6.24027,-1.59843,-1.48742,2.34844,2.10305,-2.00905,-0.662325,0.626241,1.17997,6.74123,-1.67701,1.35772,0.491316,4.32271,6.53414 10204.9,10157.9,9267.94,0.0906612,2.16352,-0.379486,5.42194,2.73054,2.84047,-1.4914,-1.83181,4.02307,-5.15449,-0.262248,3.79351,5.21678,7.80905,0.384689,1.27337,2.9796,6.90988,1.28339,2.20996,-0.91791,-0.163496,3.78903,-1.75168,-0.655347,2.9127,4.88667,7.66747 10203.5,10159,9273.39,2.81598,1.22437,-0.368556,7.79675,3.42922,7.94279,4.57077,-0.708312,0.0968463,-6.10539,0.906129,5.55489,5.11842,8.21484,-0.0671665,1.22889,2.37144,6.24544,4.97372,3.9233,-2.49967,0.267274,-0.310124,1.09266,-0.410233,4.04567,4.74621,8.0612 10203.2,10162.2,9275.77,5.91857,0.355765,0.897437,11.4606,-3.5509,6.21936,2.57301,-0.0103725,-3.12789,-4.93913,0.601331,6.94209,5.77388,6.93334,1.15761,0.716978,2.28439,10.4648,4.58557,4.39511,-2.76356,2.73426,-1.51427,4.03252,2.99548,5.47757,3.66414,6.66569 10203.5,10167.2,9275.21,3.60261,-0.370029,0.212296,6.53742,-1.17501,1.39057,4.60494,-1.59955,-3.36286,-6.83681,-0.619753,2.05525,7.21718,4.0699,-0.311278,-1.80144,1.07578,6.02142,4.81799,3.05296,-1.94492,1.84126,-1.66326,1.40391,1.77364,2.95825,3.1993,3.61198 10203.2,10169.7,9272.52,1.94895,1.27875,-0.411546,7.45768,-3.75161,0.551798,7.13428,-3.82068,-2.61405,-4.51085,-0.839975,-0.654388,7.59238,3.63367,1.11679,-0.895324,0.0589114,6.72608,0.605615,-0.28023,-1.84675,-0.134175,-0.468956,-1.06577,2.10307,1.19208,2.14254,2.35948 10201,10166,9269.14,-0.454618,0.774031,2.06017,2.8462,-0.622985,0.18548,5.53147,-2.50822,-2.46147,-4.96779,0.0109421,-5.95039,4.88549,1.45711,-1.36876,0.21175,1.58667,0.959389,-1.72767,-0.999701,-1.91612,-0.271218,-0.271307,-3.60937,2.2528,-2.81471,1.29832,0.342989 10196.9,10158.5,9266.51,1.16537,-1.9421,4.60098,6.66208,-8.91079,-4.05041,0.977918,-0.375912,-2.52562,-2.44083,-1.83608,-5.04574,0.870179,-2.88837,0.903319,2.45464,2.77487,7.13809,-7.32993,-2.29902,0.410437,1.61472,1.76486,-2.68616,2.88565,-3.79142,-0.830458,-1.20118 10194.1,10152.5,9265.18,-4.11534,-5.864,4.81522,5.05616,0.145339,-4.93641,2.59855,0.656712,1.10696,-4.83177,-6.68192,-7.2593,-1.01756,-6.50992,-0.623669,0.165413,3.83811,5.84041,-5.84841,-0.103661,1.98729,0.416145,1.34348,-6.16515,-2.67871,-5.57128,-1.65554,-3.26762 10194.1,10148.4,9264.07,-6.59722,-4.92656,-2.01588,3.7417,0.726794,-18.2936,5.15057,-0.276157,1.50739,-0.538248,-8.52874,-4.00362,-4.55022,-5.27015,0.604573,-0.930054,-0.109161,8.19838,-8.17669,-2.1092,4.17484,-1.56197,-1.02102,-5.8341,-5.50376,-1.7134,-2.50895,-3.06608 10193.9,10142,9261.25,-7.62788,-2.98611,1.9356,-1.40885,17.3716,4.06957,22.1809,1.39972,5.64224,-7.94302,-5.59134,-1.45901,0.439725,1.11211,-6.73411,-3.11746,1.4598,-4.78344,-2.09513,-0.404037,0.473396,-4.22587,-2.43839,-5.70551,-5.26427,-0.515338,1.20082,0.113119 10190.4,10132.9,9256.55,-0.061965,0.47587,-3.01478,1.28661,-2.15014,-14.2047,7.89898,0.463674,0.911903,2.0883,-1.64338,3.11185,-2.21723,0.781415,-1.37312,0.396228,-1.38267,3.09944,-1.8496,-1.29836,2.6087,-3.15966,-2.03297,-3.33185,-3.23065,2.92606,0.328003,-0.0324179 10185,10126,9252.36,-0.460313,1.71643,-3.7396,-2.47922,-1.49725,-15.3645,-1.80975,0.715758,-0.981069,-0.691494,-0.794101,-0.106849,-2.08179,-0.30971,-1.53311,0.428815,-0.320026,-0.221114,2.28648,0.175576,3.04606,-1.33911,-0.290353,-5.37868,-3.63253,0.919151,0.306196,-0.421839 10178.6,10124.8,9251.04,-1.00256,1.33259,-4.2472,-1.03971,2.95821,-4.55752,1.84476,0.117356,-4.36831,-4.27268,-1.02576,-0.886254,0.661063,-0.0446314,-0.718596,-0.508343,-2.00182,-0.337999,2.57329,-0.613947,2.18595,0.685998,2.2221,-1.4549,-2.89677,-0.0111036,1.2411,0.83044 10170.8,10127.6,9252.97,-1.71108,0.0714348,-2.91875,-0.0818013,10.0027,5.28964,4.84662,0.115636,-5.97389,-2.97492,0.466922,-1.16018,3.14319,-0.484977,-0.73996,-1.40938,-2.86898,-1.18229,2.85098,1.59393,-0.709864,0.769892,0.0526875,0.667581,-4.09633,-0.130706,2.87503,0.28772 10163.4,10130.8,9256.69,-0.0482655,-0.561906,-4.41924,-1.93638,1.00001,-3.80859,-6.74655,-0.693966,-6.90741,3.83606,-0.443929,0.133173,1.32042,-4.12952,2.21239,-0.401666,-2.83084,1.48444,3.60821,4.7162,0.0479322,1.57325,-2.9423,0.781086,-3.57562,1.01359,1.5974,-1.03302 10159.1,10132.9,9259.9,0.830676,1.38376,-3.59798,1.88876,1.90766,6.33722,1.16568,-1.88109,-5.49532,7.56995,-3.97276,2.47056,-1.10217,-4.02745,0.530141,-1.80729,-2.44923,1.11112,6.04583,5.79514,-1.61378,0.146823,-4.31812,1.65679,-0.82556,0.385538,-1.6035,-0.921055 10159.8,10135.2,9260.63,-0.16576,1.00018,-5.12473,0.442361,0.505831,-5.64864,-2.63413,-2.52592,-5.46478,4.95174,-4.3147,0.782684,-5.73615,-4.82371,0.266276,-1.86669,-4.0481,-1.31822,9.03428,5.18538,0.835431,-1.04748,-4.21294,1.0615,-0.105573,-1.22812,-5.24566,-3.63422 10165.2,10138.1,9258.46,0.205477,-0.680098,-4.46762,5.26891,1.18115,-1.68502,7.13137,-1.22722,-4.01706,-1.7858,-0.511666,3.55446,-3.85553,-2.43205,1.3525,-0.694302,-4.16672,-0.729833,7.26617,2.38627,0.742375,-2.04911,-3.24066,2.72775,2.10783,0.115275,-4.78462,-4.34396 10171.6,10139.6,9254.61,-1.51268,-2.23477,-5.13237,-3.29461,-0.317239,-10.5071,-7.94002,1.87205,-2.15615,-2.57627,4.52526,1.46446,-2.39092,-3.68309,1.44927,1.27351,-2.10555,-3.67494,7.0263,3.64847,0.370668,0.612656,-2.452,4.76347,5.31087,1.21101,-2.18927,-4.86589 10174.6,10139.6,9250.85,-0.380976,0.430706,-4.77251,1.24603,3.57465,-3.14504,-10.8805,1.4131,-3.82203,6.1265,4.05681,1.86576,-2.69539,-3.84931,0.571097,0.0445532,-3.61574,1.0929,5.45496,4.67637,-2.69117,0.376736,-3.44843,8.26613,5.44059,2.39248,-1.35143,-3.43895 10173.2,10141.8,9247.9,-0.967231,0.660605,-0.333774,0.682442,10.1733,9.80472,-4.02844,0.296976,-2.0856,1.70749,0.105393,-0.302007,-2.02762,-1.68176,-2.57321,-1.85542,-2.20576,-3.56605,7.81712,4.57148,-0.717533,0.00661063,0.070936,7.88567,3.00205,-0.188925,-1.30646,-0.417109 10169.8,10147.8,9245.05,1.57911,1.89614,-1.23894,5.44327,1.1255,2.7455,0.888702,-2.69789,-2.29535,1.37374,-2.16695,0.277041,-2.61632,-0.168021,1.19527,-0.966804,-1.39634,2.02717,6.13068,1.74285,2.61838,-0.673957,2.42798,5.71141,1.0237,-0.190537,-2.48355,-0.424022 10166.9,10152.4,9241.4,1.48812,1.56883,0.00439658,-1.99079,-5.3945,-7.45076,-2.79497,-1.09824,0.438405,1.08335,0.567998,-2.12211,0.537132,0.235065,2.13962,0.850241,2.33283,0.11668,5.71046,0.316621,2.37782,1.5783,4.38674,4.44102,2.85837,-0.867284,0.197126,-0.632035 10166,10149.9,9237.21,3.10346,3.20745,-0.0787972,3.26164,-1.99167,1.15174,7.73898,0.388067,-1.3872,7.93093,2.89628,-0.846609,2.95243,1.10786,0.0356645,-0.191303,-1.48335,3.06518,0.833731,-2.48298,-2.62814,-0.329278,-0.0454046,4.84244,1.50962,-0.571214,2.28968,0.0896905 10169.4,10141.9,9233.72,1.54047,2.79665,0.872984,0.435893,0.341067,4.50191,6.31086,2.24353,0.0763229,5.33021,2.30696,-1.94916,2.28551,1.6759,-3.55737,-0.57595,-3.31446,-1.28349,0.109544,-0.911539,-3.08755,0.149125,-2.57658,2.65457,-0.759677,-1.72314,1.73795,1.22082 10175.5,10134.5,9231.85,3.08721,1.31195,-0.463831,-2.78365,-16.0641,-12.4959,-7.90321,1.44639,2.2521,2.09953,-0.628689,0.674957,-0.991746,0.999703,0.501374,1.08647,-1.9555,-0.457535,-1.969,0.140249,0.679574,4.05153,-1.26929,2.9472,1.23177,0.0460567,-1.18548,1.19414 10178.5,10132.3,9231.94,4.8578,-0.156201,-1.83619,3.45539,-10.5983,-4.40534,-3.25278,-1.48511,1.7839,1.07398,-3.79721,3.44697,-0.661031,-0.19397,1.51898,-2.78611,-1.58924,-1.02247,-4.03291,-0.779814,-2.72459,1.42865,-4.44874,1.96164,0.024013,0.769821,-1.68183,-1.09525 10176,10135.5,9234.24,3.98434,-2.9881,-1.82932,-3.45496,-4.37718,-1.32479,-6.81161,0.242295,3.63988,0.773917,-2.92089,1.50769,1.03257,-1.29175,0.607123,-3.32519,0.794345,-7.2134,-4.18473,-2.11878,-3.48641,2.04926,-1.83971,2.5711,1.8547,-0.444122,0.204744,-0.633906 10170.3,10141.1,9238.24,4.5574,-1.21766,-1.92884,-3.3891,-4.53289,-3.61119,-11.1428,0.87067,2.52674,6.28098,-0.916225,0.833349,-0.285056,-2.02874,2.83162,-0.822357,0.836116,-2.02452,-4.36166,-2.46534,-2.40599,3.53798,0.439996,2.8824,2.66576,-0.190266,-0.411649,-0.335746 10164.8,10146.9,9241.73,1.14271,0.21175,2.54403,-5.97996,8.86795,9.92082,0.583279,0.92891,3.1377,1.52082,0.653327,-2.04189,-0.909795,-1.88382,-1.45444,-1.72465,2.94817,-6.9659,0.661566,-0.779148,-2.33549,3.61435,1.90115,-0.709103,0.572663,-2.44443,-1.61985,-1.24632 10161.8,10151.9,9242.42,0.429305,-0.24402,1.54324,-0.758714,1.99988,2.30697,-0.150645,-1.67843,-0.372931,2.68223,0.974669,-2.18675,-3.69726,-3.84373,0.315076,-1.61503,2.02219,-0.439987,1.5067,0.347441,-0.468043,1.85512,2.51346,-3.61534,-1.61311,-1.68631,-4.32277,-3.31289 10160.6,10154.5,9240.5,-1.6783,-2.7916,3.79283,-1.46484,1.8842,7.0456,3.61276,-2.08564,-1.14902,-3.90469,1.00738,-2.71903,-1.12392,-2.56102,-0.564502,-1.26929,2.87817,-3.80446,2.16188,1.69189,-0.17359,-0.806729,4.45158,-4.99401,-1.9224,-2.1335,-3.41399,-1.5215 10158.8,10152.9,9238.94,-1.26294,-1.55708,2.47997,-0.37092,-5.35681,-1.99801,-4.61673,-3.19995,-3.63982,-3.59422,0.268397,-1.15304,1.21312,-1.94008,2.37467,0.463918,1.03699,-0.249188,1.94821,3.1095,0.656428,-1.26258,5.17342,-2.5293,-0.911564,-0.727538,-1.60047,-0.657086 10157.1,10148.4,9241.47,-0.729297,1.90628,1.50273,8.02209,4.5029,7.25435,-0.943104,-3.87229,-5.15977,-0.605295,-0.786266,-0.00624273,3.2036,-0.99694,1.83674,-0.424322,-0.759934,4.69506,3.12589,4.93905,-1.14094,-2.37706,0.896838,-1.15642,-2.07425,-0.341439,0.651623,-1.90525 10159.3,10145.1,9249.53,-3.61489,-0.368775,4.8318,0.654323,13.8953,20.2332,9.01061,0.740005,1.06482,-1.98312,1.43178,-2.39481,5.44965,2.23927,-2.07082,1.84445,3.36316,-2.3874,5.82791,5.13504,0.331121,1.17574,4.11636,2.46863,2.53744,-2.31289,3.73605,1.261 10166.4,10146.2,9260.39,-0.690065,-0.196533,2.57149,3.28245,1.26863,3.07282,2.3288,0.343504,0.7493,7.7189,2.47287,-2.19401,1.83016,1.49389,2.04941,5.57015,1.68587,7.37325,4.33035,3.86901,3.21355,1.31074,4.30838,4.34097,4.14204,-0.792683,1.91579,1.4487 10174.6,10153.3,9268.63,0.973864,0.288282,4.67663,-0.604468,1.35396,1.77193,6.1612,0.928573,3.56181,0.301872,1.61496,-1.94891,1.37811,1.784,-0.829802,4.5252,2.98522,2.05165,3.03006,0.33278,4.9167,0.692046,4.78248,3.89965,4.1223,-1.28055,0.902128,2.44014 10179.4,10165.9,9270.91,0.383028,0.372248,2.91142,5.26445,-4.52355,-0.481389,-1.47582,-0.0802922,4.09074,-3.4789,-1.84054,-0.641665,1.60157,2.15213,-0.406849,1.24052,1.05589,7.69175,-4.79723,-3.42058,1.48542,-2.69221,-0.604027,-2.8823,-1.41943,-0.386671,1.59434,1.71786 10180.9,10180.3,9268.76,-7.39108,-4.07938,1.96913,5.84801,-1.99672,13.1344,-8.45676,2.45664,8.74322,0.00440195,-3.70354,-4.02376,5.09873,7.07674,-2.94009,-6.27334,-2.18896,9.06615,-15.5002,-6.518,-12.659,-9.2251,-8.78964,-16.0646,-15.2285,-1.36974,7.28841,2.96689 \ No newline at end of file +10125.9,10112.8,9219.5,-7.39443,-8.74936,7.28395,13.7953,32.2328,32.4809,18.958,-12.2383,-6.86466,-23.0912,-16.425,-5.70842,11.2467,-1.58574,-4.53717,-17.3842,0.912601,13.0428,2.44622,2.08875,-8.74373,-9.47217,-6.87574,-8.11158,-14.54,0.414787,6.04424,0.540389 10136.8,10115.1,9222.54,-0.120582,-1.94906,6.92247,4.75197,11.0735,0.972766,10.2285,0.717545,-1.04488,-7.64424,-2.10875,-2.44368,1.52535,-1.14131,-1.72589,-1.1247,-0.993354,2.98318,1.29855,2.0688,1.00297,0.135373,-3.25325,-3.12065,0.913296,-1.7868,1.58829,-0.735248 10148,10122.2,9228.62,4.24336,-0.689111,5.12782,0.132862,-6.64526,-14.7952,5.19361,3.68198,2.77598,-0.691866,1.07559,1.71444,-1.30287,-2.75746,1.74208,4.75944,1.80799,-0.064464,2.37174,1.09905,3.5756,2.98064,-0.238711,0.822007,5.07188,-0.864496,-0.208741,-1.31367 10156.6,10132.2,9236.11,-0.047434,-1.79438,-0.767925,-3.78683,-2.46365,-12.9433,2.00586,-0.48292,1.16216,0.113706,-0.639879,-0.0445654,-2.82995,-2.22008,1.46544,3.70217,2.84476,-3.32792,6.701,0.982599,0.145487,0.0501163,-1.16747,-0.630382,-0.0550437,-0.0563951,0.0449386,-0.715988 10162.9,10141.8,9243.46,-0.3687,0.640608,-2.93969,-0.37466,-5.42813,-8.55527,-4.70566,-3.62351,-3.94857,0.847112,0.357187,1.39279,-3.07124,0.779726,5.12671,3.62277,2.86265,3.44378,5.49842,0.895482,-2.1777,0.14728,-0.491475,-0.0257423,-0.32504,2.28464,-0.610659,2.01955 10168.7,10149.5,9249.62,-0.272231,3.00751,-2.20783,-5.50238,-1.65733,-2.39574,-6.82249,-1.5591,-5.38806,-0.315138,2.41171,-0.227563,-0.306796,1.26618,4.45885,3.55662,3.14737,-0.0497907,2.76691,1.04757,-2.50276,3.25334,1.90194,3.54754,3.2308,0.393197,0.115407,1.88919 10175.3,10155.8,9253.09,0.271133,3.11725,-1.24188,-5.32432,6.94595,5.40219,2.63329,1.77742,-0.434798,3.20784,3.1926,-2.12653,1.4207,-0.162939,1.57116,1.20026,2.14004,-4.36978,-0.074248,0.344989,-2.79157,3.57441,2.795,6.81971,4.61981,-3.15395,-0.556388,-0.951462 10181,10160.9,9253.62,-1.52186,-1.02665,-1.31765,-8.89055,1.45638,-6.40533,-8.20284,3.42071,6.34151,7.32703,2.81444,-5.56924,-2.07761,-2.82472,1.75969,1.56549,2.59032,-4.99642,-0.861721,0.661704,1.27294,4.24609,5.72265,7.93181,6.46356,-4.54558,-2.93302,-2.55741 10182,10163.1,9253.53,-4.12759,-5.01517,-1.383,-11.7032,7.03273,-0.354258,-4.14846,2.56836,5.49077,2.70724,-0.00938943,-7.91268,-3.33257,-3.77932,-2.70035,-1.95288,1.51899,-10.5021,0.604386,1.13765,2.8031,0.719838,5.10986,5.4321,3.01561,-5.05514,-2.51591,-2.29453 10178.9,10161.7,9255.33,-2.09727,-3.23639,-0.971464,-6.47564,-1.86208,1.47429,-8.69004,2.23012,2.64935,4.20852,-0.00802028,-4.11236,-1.54808,-1.73414,-2.21966,-2.31888,0.521142,-4.49634,-1.66003,1.37105,1.47741,-1.17943,3.52554,2.31201,0.381259,-1.24137,-0.930002,-0.860505 10176.3,10158.2,9258.8,-2.87976,-1.16821,-1.15587,-7.36873,-2.70663,3.69409,-6.23946,3.17083,3.67683,5.95472,2.6739,-2.5798,1.61294,2.31642,-4.31408,-1.6647,-0.422612,-6.13843,-0.39141,1.92345,-2.82275,-0.742784,1.68164,-0.706688,-1.87652,0.172975,1.51911,1.04727 10176.2,10155.4,9261.93,-1.79655,0.511159,-2.91648,-1.19976,-6.01265,2.43062,-4.91165,1.64787,2.485,6.04132,2.79139,1.36683,2.36631,4.70105,-3.09068,-0.875835,-2.73203,-1.04036,0.0279962,0.57264,-4.70596,0.399049,0.109101,0.540718,-2.52779,1.90878,1.47212,2.48712 10177,10154.3,9263.36,-2.06935,1.47151,-1.59814,1.1621,-8.21806,2.74994,-4.8666,1.6535,2.86737,3.56179,1.87379,3.98852,2.20191,7.00018,-2.12026,-0.322149,-0.459427,1.99009,-0.386875,-1.65524,-2.88602,2.5405,3.09752,5.52644,1.72241,3.28467,2.06659,4.48929 10176.7,10153.6,9262.97,-2.47996,0.0736981,-1.18826,-1.40068,-2.38119,-1.33094,-3.87199,0.498621,1.31667,-0.952908,0.481976,0.0885501,1.11339,4.67043,-2.37383,-2.32579,0.991108,-0.25346,2.41941,-1.44295,0.0394728,1.67752,2.73018,4.10445,2.29859,0.993454,2.7469,3.39394 10174.9,10153,9261.77,-0.957748,-0.455644,0.885525,1.7746,0.0437147,0.878291,0.0855234,-0.572903,1.39546,0.00119098,1.69176,-1.96049,0.156938,2.84845,-1.18488,-2.65197,1.35428,1.98606,1.65427,-0.643756,-1.03602,-0.0406435,-0.236011,-0.961959,1.28125,-0.464305,1.75539,1.84618 10173.4,10153.5,9261.3,-0.583682,-0.792331,1.36077,0.644185,-3.55594,-0.618864,-4.88099,-0.136266,1.51362,2.73872,3.65897,-2.63062,0.416981,0.735765,0.533665,-0.326252,1.0146,2.83848,2.16063,2.30307,-2.01136,0.638055,-0.22921,-3.19692,0.947596,-0.379132,0.678065,0.747812 10174.5,10155.7,9262.24,-0.685336,0.856591,-2.63545,-0.959601,3.25442,0.791955,-2.20612,0.263046,-1.34292,4.47114,2.99912,-2.56858,-0.21931,-1.56389,-0.808263,0.311028,-2.34261,-0.965718,1.98615,3.50723,-1.41951,-0.258476,-1.16227,-1.73014,0.372641,-0.118946,-0.422557,-1.3986 10179.6,10157.8,9264.01,2.59538,3.68921,-1.9033,3.99249,0.109215,-1.86778,-4.51336,0.591929,-1.29086,1.52475,1.01934,0.773735,0.0652847,-3.00075,1.79923,2.1369,-2.11635,3.17035,-1.87907,2.19309,0.880052,-0.480886,-1.94369,-0.204693,1.63785,1.43004,-2.081,-3.24652 10186.9,10157.6,9265.4,2.10402,4.02633,0.884264,0.1708,-3.27208,-4.9215,-1.0364,1.60796,1.70888,-1.43476,1.10519,1.26841,0.0627916,-2.97727,1.13683,2.82663,-0.301705,-0.592683,-3.81587,-0.70989,1.60855,0.103857,-2.48043,-1.22737,-0.312858,1.31617,-1.91269,-3.98886 10192.2,10155.4,9265.29,1.6824,4.26755,1.57687,1.43194,-5.98808,-2.25097,0.153789,0.168572,0.879003,1.68604,0.75956,3.65922,-0.869793,-2.49312,0.497574,2.41553,-1.34226,-0.127659,-3.59295,-1.56547,0.88849,-0.785242,-4.24845,-5.15572,-4.81836,2.77035,-1.44493,-3.44434 10193.6,10153.7,9263.38,1.6491,4.80854,1.08823,5.10222,-5.26833,5.52263,-0.997094,-0.959485,-1.52356,6.15147,0.897033,7.60472,-1.50848,-0.576994,0.845199,3.25263,-2.21353,2.36454,-2.11918,-0.480371,1.405,-1.24949,-1.88424,-5.50221,-4.39822,4.6832,-0.575266,-0.350337 10193.7,10153.5,9260.14,0.371243,3.4575,-0.922956,2.86612,3.70316,4.4652,-2.35097,-2.08567,-4.55866,2.05406,0.20181,5.48777,-0.851734,-0.932792,0.852325,2.66059,-2.76402,-0.836483,3.32512,2.58318,3.54953,-1.82575,1.03107,-3.58566,-4.1055,2.71087,0.64122,1.16036 10193.4,10154.1,9256.45,0.655998,2.95689,-0.961572,2.95967,6.90968,-0.0847335,-1.13659,-2.64581,-3.78971,-2.43015,-0.722449,3.08777,-0.234356,-0.603156,1.30068,1.14368,-2.23215,0.241084,3.91588,3.38796,4.07024,-1.08082,1.15617,-0.375163,-2.54369,1.29418,0.795869,1.31402 10190.3,10152.8,9253.2,2.59279,1.93007,1.93861,4.82647,-1.84288,-5.84018,-7.03235,-2.16958,-0.8999,-4.4747,-1.99497,2.40008,0.0349671,-0.825783,2.00993,-0.184404,-0.576706,6.30193,1.43455,3.63536,2.34484,0.148851,-1.22127,-0.718508,-0.716753,1.50537,0.412978,0.73252 10185.2,10148.2,9250.73,1.88291,-0.127643,2.41457,0.38457,3.28565,2.40364,1.07674,-0.352091,-0.192694,-2.80281,-2.45121,-0.746935,0.454781,-0.345492,-2.38393,-2.35152,-0.468918,-0.28004,0.207449,2.6636,-1.39254,-2.09536,-4.44811,-4.48824,-2.93117,-0.770421,1.19,0.219788 10183,10142.2,9248.93,3.78484,0.701338,-0.71552,3.48407,0.454755,4.3743,3.68099,-0.668556,-3.42636,5.52772,-1.23863,-0.405148,0.665698,1.06479,-0.0251586,-0.48849,-0.847741,1.4814,-5.36764,-0.405219,-1.51485,-3.88226,-5.12764,-5.33767,-4.3365,-1.173,0.417418,0.415356 10185.4,10138.4,9247.93,3.11727,0.196163,-2.018,0.721283,-2.5075,-1.06349,0.331823,-1.2182,-4.01712,4.78444,0.452166,-2.16432,0.55673,1.61447,1.16718,1.44415,0.569846,-0.812131,-8.14324,-2.91296,2.43154,-1.45218,-0.730675,-1.0947,-2.25658,-3.52675,-0.361214,1.09266 10188,10139,9248.05,1.52249,-1.16117,-2.4591,-2.41492,-0.35832,-7.48161,-0.0490082,-2.1421,-3.52013,0.903896,-0.958215,-5.8036,-2.36788,-0.368615,-1.88998,-1.40466,-1.28791,-4.79995,-5.58563,-3.57656,4.13739,-0.274441,1.53352,2.93946,-1.96753,-6.76034,-1.87752,-0.324793 10186.8,10142.9,9249.23,2.29541,-0.414867,0.263844,-2.42527,-9.23597,-12.7958,-5.40665,-1.3296,-0.255947,1.05195,-3.09731,-3.83996,-4.40177,-0.0123634,-1.79533,-2.22933,-1.59891,-1.58539,-4.29444,-3.24283,2.73497,0.939395,2.25632,3.98042,0.672842,-4.87272,-3.0871,0.140664 10183.8,10146.3,9250.93,1.04007,-0.107056,-0.719832,-5.17314,-6.41206,-13.4527,-3.51115,-1.82372,-1.0661,0.164654,-4.87432,-3.16371,-3.16216,0.547311,-2.31938,-3.32366,-2.59406,-3.07878,1.07584,0.135595,-0.15385,-0.198986,-1.76614,-0.364142,-1.44816,-3.17832,-0.666637,0.539005 10182.5,10148.1,9252.57,1.58315,0.552138,-2.38854,1.84879,-2.25441,-6.8381,0.208721,-2.73312,-3.19332,-2.49192,-4.21087,0.445019,0.0651566,2.67403,-0.780414,-2.43461,-3.10543,1.48742,-0.123359,0.0321366,-2.00728,-1.30717,-5.02137,-5.05394,-3.39985,-0.233706,2.10556,1.51466 10182.7,10149.6,9253.33,0.671616,-1.8801,-5.19861,1.6691,-0.386439,-6.73637,0.390118,-1.36276,-2.8229,-3.74619,-1.53148,0.15594,0.934737,1.96014,-1.35363,-0.924511,-3.00858,0.653744,-1.84706,-3.59509,-0.247233,0.962108,-1.40552,-3.28119,-2.22432,0.0626129,2.48273,0.969888 10182.9,10150.9,9252.01,0.0166707,-2.52456,-5.48285,2.26653,-2.03587,-6.50283,-1.00325,0.264499,-1.46362,-0.822672,-1.11829,0.403605,-0.734484,-0.382999,-0.186567,1.24812,-2.13095,1.80897,-2.82131,-6.15356,2.54337,2.39696,2.51379,2.41699,0.307725,-0.195503,-0.252349,-0.890546 10182.1,10151,9248.33,-1.21698,-1.52567,-2.334,0.102378,3.74418,-1.36756,3.51501,1.50357,-1.80774,-0.855037,-2.71284,0.0746735,-1.2904,-2.37263,-0.326812,1.37779,0.0811662,-2.04277,0.452769,-4.37491,4.60025,0.785458,0.944597,2.57121,-0.443829,-1.9031,-1.78376,-2.25217 10180.2,10149.4,9243.85,-0.498632,0.815261,-1.05027,1.32586,2.65892,-5.17029,-0.588453,1.63481,-3.33979,4.4087,-1.26981,2.01576,-3.03953,-3.66687,1.33091,1.62961,0.568999,0.53543,0.477935,-1.78405,3.91722,-1.12653,-3.07327,-2.27103,-2.21119,-0.0469714,-3.05949,-3.83303 10176.1,10146.3,9240.54,-0.464849,1.25223,-1.14736,-0.645201,4.96922,-0.805424,1.85313,1.43677,-1.45072,6.22509,1.54511,2.89442,-3.56094,-4.35854,-0.476689,0.39343,-0.929162,-1.07774,0.941846,-0.57756,0.363373,-1.13491,-1.30865,-3.06369,-1.8739,2.47973,-3.19611,-5.38414 10169.3,10142.4,9238.91,2.28739,1.91951,-0.759834,1.17008,-1.10807,0.137649,-1.76481,-0.427729,-0.592675,2.50623,0.607717,4.10404,-2.20382,-5.11375,1.80008,0.383348,-3.40396,4.33491,0.605228,-0.0871236,0.185566,0.480246,2.74078,1.48145,2.07534,4.96863,-2.65852,-5.78272 10162.1,10139,9238.14,2.03262,2.32633,0.46709,-2.26524,5.80967,5.85587,5.67759,0.185696,-0.246666,-0.787877,-0.201738,0.61348,-0.542043,-3.51173,0.345287,-0.426571,-4.01566,0.315299,2.10005,-0.391753,2.39343,1.28396,3,4.99164,5.3145,2.31592,0.0224444,-4.14279 10158.4,10136.9,9237.31,2.77556,2.83113,1.37245,1.19159,2.19923,-2.0116,3.1913,1.03754,-0.929092,0.870894,1.00256,-0.624392,-0.561338,-2.99529,2.23674,0.823539,-1.63024,3.75817,0.298891,-1.18515,4.54738,1.25951,1.91277,3.57793,5.44217,0.785618,0.025315,-3.27161 10158.5,10135.5,9236.37,0.0672571,0.761886,2.35427,-0.889999,6.73976,-1.98269,8.45302,1.1398,0.0604089,-1.15193,1.32222,-2.47069,0.131408,-3.48238,-0.669944,0.753279,3.07189,-2.04262,0.174304,-2.32107,2.83224,0.708328,3.23848,0.984911,2.384,-1.28385,-0.548071,-3.32946 10160.6,10134.8,9236.46,-0.783525,0.239203,0.00548465,1.88108,6.83171,-2.89703,7.27976,-2.71585,-1.47417,2.12383,-1.04536,-1.14095,0.145875,-4.3962,-0.139564,0.781551,3.40043,-0.28834,-0.343608,-2.36391,0.0938093,-0.36295,1.0276,-0.578692,-0.619797,-0.489157,-1.92106,-4.163 10166.1,10135,9239.02,0.124276,1.29463,-1.44975,3.21172,2.53479,-3.38317,-0.20102,-4.72755,-2.14129,5.53743,-1.24849,0.994366,0.436372,-3.09635,2.19121,1.13794,1.52365,3.0586,0.622146,-0.699363,0.103461,0.316277,-1.73095,-0.195395,0.490618,1.44514,-2.50878,-3.62472 10175.6,10136.9,9243.9,1.67228,1.70099,-0.125799,2.04051,6.74509,2.05118,7.82124,-3.08565,-1.70842,3.37127,-0.160655,1.32998,0.57087,-1.46351,1.80831,-0.585194,-0.267853,0.719624,2.12333,-0.931791,2.61407,0.519467,-1.78038,1.70819,2.66646,1.47407,-2.48388,-2.6294 10184.4,10140.5,9249.09,4.05746,1.49391,3.1491,4.74869,1.42089,-7.65297,4.6083,-1.50292,-0.681543,0.792377,-1.54194,2.19467,-1.449,-2.54459,5.38937,-0.0662613,0.683022,6.46847,-1.151,-2.09676,5.40097,0.0884146,-0.584039,0.411805,2.87021,2.70096,-3.69024,-2.72328 10185.2,10143.8,9252.71,2.20708,-1.9117,6.2705,-1.38994,9.88462,0.984595,14.8745,1.09177,3.01497,-6.59006,-3.06879,0.864155,-0.352553,-2.42934,1.6214,-0.899998,2.90809,-2.62154,-0.748965,-1.78716,3.1828,-0.76616,1.51574,-1.80336,0.759499,1.08543,-1.48814,-0.830864 10176.5,10145.2,9254.8,3.08758,-1.24415,2.30133,1.5123,4.9996,-2.25743,5.71269,0.326257,0.862459,-5.32366,-2.15784,1.98295,-0.769376,-3.24456,1.73394,-1.18022,0.303592,1.19388,-1.18318,1.1848,-0.484859,-3.12715,-2.31674,-4.16244,-1.41399,2.32149,-1.0187,-1.70219 10164.6,10145.4,9256.92,1.59078,-1.06701,-0.557541,-2.88977,3.22953,-0.245042,-0.474481,0.0498212,-1.16809,-8.33134,-0.306573,0.38113,0.242976,-2.39828,-1.29092,-1.68013,-0.127576,-1.94114,1.03024,1.7825,-1.44807,-2.86352,-4.13379,-1.78466,1.5241,1.16147,-0.513496,-2.30027 10156.4,10145.9,9260.21,0.0333157,-1.40254,-1.63643,-2.63202,2.15792,2.8366,-1.32406,-2.25364,-4.61227,-7.74587,-1.005,0.107792,-0.131513,-2.0428,-1.28031,-1.65736,-0.0589992,-0.767749,0.0451012,-1.23948,0.334266,-2.05544,-5.74107,1.40617,2.47259,0.129519,-1.22605,-3.50154 10152.5,10145.2,9264.25,-2.23854,-3.34598,0.871046,-4.48776,-5.12246,-0.367558,-7.49548,-3.04105,-2.99035,-3.84367,-2.67766,1.19195,0.695189,-1.99211,2.38266,0.800284,2.92667,1.82052,-0.796218,-1.82753,3.43662,1.60186,-2.49788,2.02216,2.59346,0.975508,-0.397427,-2.78437 10148.6,10141.1,9267.56,-4.64613,-5.4569,3.80281,-6.22039,0.554038,5.00519,-0.395733,-3.04225,0.570141,-6.95862,-4.49105,-0.00732036,3.78285,-2.09066,1.46914,-0.873643,3.95228,-2.08532,2.8568,0.749314,1.78963,1.02579,-0.808831,-1.60113,-1.17483,0.544949,1.95805,-1.27827 10142.4,10134.6,9268.73,-4.02228,-5.3818,4.39201,-6.57399,-2.68308,-0.146626,-0.297909,-1.28233,3.72363,-10.5635,-3.46562,-0.498293,3.92457,-1.10422,0.725311,-0.888612,3.1725,-1.82837,4.64182,1.32637,-0.56378,0.781271,3.29557,-0.557202,-0.712584,0.587691,2.76212,1.05325 10137.8,10128,9266.83,-2.98689,-3.62614,2.49614,-3.78405,5.33483,-3.24499,-1.4797,-1.49474,0.75769,-13.0722,-3.57543,-1.73535,1.13307,-2.81826,-2.67056,-2.75063,-0.407379,-1.38965,7.67619,2.2374,-2.93415,-2.1994,0.956463,-2.25511,-4.42128,-0.889014,2.30781,-0.144069 10139.6,10121.2,9261.84,-1.19244,-2.09691,-1.17019,-2.92359,1.84257,-9.64131,-8.2266,-2.48032,-2.29368,-7.41116,-3.60172,0.404837,-2.31741,-3.52505,-1.14341,-1.1367,-2.22469,2.93998,5.91064,0.841518,-1.68308,-1.06298,-0.398387,-1.68239,-3.53445,0.38234,1.02165,-0.403129 10146.2,10113.8,9255.3,-3.35595,-3.34535,-1.74811,-10.4556,3.60927,-0.776329,-3.08604,-1.29687,0.835023,-5.76979,-1.7646,-2.22816,-1.31439,-0.382083,-1.73312,-0.792276,0.206848,-4.1992,4.29806,-0.830575,-1.71405,1.40452,2.00247,0.106559,-0.768805,-1.08451,1.11784,1.22578 10152.4,10107.8,9249.87,-2.49869,-3.87311,-1.98238,-6.90342,-1.23671,2.90852,2.97754,-0.581043,2.81778,-2.71728,-1.21684,-5.07044,0.497485,2.01224,-0.365556,-1.64542,1.17956,-3.76085,-0.573467,-2.58111,-2.12663,0.378165,4.18795,1.24581,-1.36196,-2.87649,0.482267,1.63454 10154.8,10107.2,9247.27,-4.01788,-5.39388,-1.72161,-10.3153,-0.251037,-1.57831,1.61553,1.18147,5.7765,-0.599766,-1.22598,-10.0294,0.895145,2.02015,-4.45992,-2.58818,2.98391,-9.45103,-1.41902,-1.29446,-0.55725,-0.180421,6.94249,-0.594659,-3.53394,-6.50742,1.38112,1.51458 10153,10112.2,9246.76,-3.24249,-5.01072,-2.02956,-7.46567,0.0264794,-1.5224,-3.31193,1.53111,5.32332,2.5335,0.40251,-7.05633,-0.711568,2.89381,-5.39998,-1.36446,2.04786,-7.02942,-4.53297,-0.88262,-0.357391,0.595822,6.5409,-2.84395,-2.64994,-5.7378,1.39939,2.97985 10148.7,10119,9246.16,-3.96002,-4.42756,-3.26432,-8.69557,4.03628,0.616301,-3.92147,2.76458,1.652,2.17356,4.22927,-4.5247,-2.33417,3.89508,-5.29918,-0.309883,-0.288513,-8.36711,-3.09529,-0.126421,-1.8539,2.38545,3.61409,-1.26649,0.429596,-4.19612,1.45711,3.95651 10145,10125.2,9244.17,-1.75695,-0.511195,-1.73883,-3.34742,-1.26592,5.24499,-3.03549,2.78645,-2.1334,0.220919,5.88292,0.160927,-1.7455,5.37331,-1.59599,1.91312,-0.631146,-3.16886,-2.94994,0.34822,-3.01289,2.84951,0.356135,3.47859,4.18276,-0.12287,0.984563,3.64398 10143.1,10130.2,9241.27,-1.71615,1.12867,1.04805,-6.57347,2.41341,16.2593,7.00371,0.924589,-2.71609,-6.2656,3.57183,0.37743,1.96421,5.66573,-2.3041,2.26799,0.668846,-8.32571,2.30148,2.66333,-1.75615,2.71555,1.44408,6.00224,4.85886,0.685304,3.03234,2.82015 10140.7,10134.4,9239.05,-1.25992,2.46902,-0.556969,-2.76672,5.45596,12.4649,8.36959,-2.49709,-3.8708,-1.40646,1.38854,1.37064,2.12007,3.84209,0.459629,2.15086,-1.24194,-4.15365,4.52043,5.4809,0.876317,0.656659,-1.01116,2.09458,1.65028,2.77599,3.21635,0.381243 10133.6,10137.8,9238.32,-2.22442,1.37094,-0.787327,-1.05469,3.55443,5.14715,-0.0509983,-0.0905216,0.72894,3.96149,2.38061,1.75467,3.09083,4.18358,2.79613,3.29833,0.325666,-0.671704,6.07566,7.72379,3.13564,0.655668,-2.59152,-1.76199,1.58102,4.45884,3.34631,0.480564 10121.1,10140.7,9238.2,-2.17367,-0.866588,-2.79273,0.692199,10.1863,9.97874,6.04483,2.66482,1.76948,2.61332,1.9281,-1.1243,5.03132,3.85731,-0.443337,0.284932,-0.868815,-3.31091,8.51065,6.49177,2.23459,-1.67042,-3.77735,-2.781,-0.902713,1.50205,4.04064,0.197185 10110.8,10144,9237.47,0.303664,0.966366,-2.65365,4.69141,3.98147,5.09796,4.57488,3.26927,0.562439,5.41174,1.92471,-1.15766,3.6349,2.42314,-0.0874924,-0.0560302,-1.22366,1.9914,3.44357,1.69106,1.98031,-1.32375,-0.576816,-1.03349,0.269332,-0.300454,3.28264,-0.458562 10110.3,10147.7,9235.48,1.28867,0.940385,2.1165,-0.581377,-0.643187,-2.16313,1.69237,2.47912,1.37859,3.32286,1.26412,-0.720553,2.36863,-1.25903,0.0706914,0.944374,2.2859,0.229574,1.5842,-0.12766,4.43122,1.34327,3.34673,-0.404948,2.87655,-1.67866,3.04869,-0.25307 10116.7,10150.7,9232.33,0.394714,-0.833445,4.94793,-6.11826,9.22151,2.99358,11.1041,1.15853,2.93899,0.397365,0.0221406,-0.0976144,-1.13452,-3.42557,-3.72862,0.476803,3.69054,-8.12164,2.48493,0.363106,3.87676,0.504363,0.972674,-1.44388,2.15926,-0.828986,1.75931,-0.549928 10121.4,10152.8,9229.14,1.29508,-0.757006,3.12597,-1.6729,7.62364,-0.936804,6.48918,-1.03742,1.86227,-0.262351,-0.75051,2.31301,-4.8422,-4.5034,-2.66476,0.578808,1.27532,-2.04282,3.45288,3.01897,0.564668,-1.21876,-3.06331,-2.70583,0.257935,3.52846,-1.56111,-1.5308 10121.6,10152.4,9226.86,0.677648,0.378414,1.31475,-2.61018,4.91454,0.37514,2.86121,-0.193973,1.93324,-4.63591,1.10695,3.14457,-2.96694,-2.19304,-2.99025,0.50097,0.165722,-0.200595,6.85438,4.63234,-2.47705,0.342532,-1.30419,-0.141339,1.63084,4.32707,-1.19328,0.76139 10120.5,10149.2,9225.49,0.499478,1.88224,-2.14427,-2.77288,10.6927,1.71766,6.49787,0.43981,0.0705592,-5.13201,2.57263,1.48076,-1.20267,-0.591255,-4.74193,-1.79266,-1.46188,-3.42451,8.04316,3.54243,-2.30088,0.0710442,-2.83238,0.653942,0.240506,0.904871,0.430945,1.6283 10121.2,10144.8,9224.89,1.35965,2.80608,-1.94166,1.75583,0.26227,-8.26437,0.567312,1.6259,1.60009,0.0627174,2.62631,2.65738,-1.31444,1.36503,-0.138702,-0.303116,1.07964,0.805711,0.6712,-0.0379901,0.596301,1.49046,-2.9437,-0.0854658,1.7116,1.14138,0.19577,2.11315 10121.7,10140,9224.64,-0.625981,1.46152,0.571473,-0.708952,-3.97306,-7.60183,3.54876,2.52756,3.43643,-3.37318,1.25185,1.95327,-0.430742,1.99167,1.38528,0.439469,3.35733,-3.21518,-3.33649,-3.33716,1.63613,2.87364,0.216347,-1.19264,2.34646,1.38095,0.250252,2.26893 10117.5,10135.7,9223.59,-0.644241,3.50756,1.18011,1.32346,-4.09529,-1.15572,8.91836,0.864807,0.810206,-4.21922,0.85698,1.54667,-0.984211,1.49262,0.424346,0.272079,0.55043,-3.11065,-4.92549,-5.21789,0.616593,0.933381,0.453042,-0.907799,0.816878,0.888407,-1.07882,0.897744 10109,10134,9221.44,1.24811,3.97674,3.11247,-1.16572,-9.20759,1.26864,10.07,0.861166,0.629341,-5.07074,1.84156,0.554677,0.501606,2.3508,-1.99158,1.42546,-0.0624237,-4.75601,-4.11731,-5.27973,3.12042,0.927954,2.01431,1.91643,2.26937,-2.42322,-1.85499,2.11246 10103,10135.6,9219.87,2.2046,4.10281,1.87105,-2.44462,-1.81059,2.73657,16.517,1.49188,0.862687,-1.50652,2.91423,-2.27191,-0.311967,3.16828,-6.05317,-0.647296,-0.600809,-9.86797,-3.317,-4.05579,3.51099,-1.77799,-1.17227,0.17711,-2.12588,-5.86398,-2.08211,1.43944 10103.9,10138.7,9220.3,3.77174,5.49059,1.2637,1.03751,-12.6254,-6.24364,0.90728,3.65224,3.71822,2.59825,4.31988,1.86088,-2.62582,4.43061,-1.00461,2.10803,1.47555,-3.28777,-8.18549,-4.31695,2.95113,-1.34785,0.676274,-1.38936,-3.04336,-1.37001,-2.35773,2.00922 10108.6,10140.8,9221.82,-0.70593,3.90046,-1.14247,-3.0764,-1.47295,-1.10809,-0.510284,3.79285,2.60078,-1.28697,3.77566,2.32766,-3.54475,2.99719,-1.20306,1.33262,-0.719923,-9.06449,-7.33119,-4.80493,-0.721145,-2.4024,1.79362,-1.97223,-5.04385,0.0875954,-1.73778,0.950888 10113.1,10142.1,9223.55,-1.06377,0.843971,-1.44889,-5.32939,2.69029,-3.83385,-5.63119,0.535717,-1.61039,-5.59267,1.26514,2.05707,-3.31026,-0.958826,1.33732,1.46551,-3.13585,-9.66605,-6.00234,-4.35532,-0.26599,-0.831562,2.98878,0.128679,-2.54674,-0.278737,-3.58409,-1.324 10120.7,10142.9,9227.01,3.56995,1.04759,3.75113,-1.7421,5.12807,3.1454,2.38504,-1.62768,-2.93793,-5.71266,-0.530001,2.84448,-2.04436,-1.31251,2.17243,2.11298,-0.867238,-7.66197,-6.87331,-3.32769,-0.373459,-0.116178,2.03689,0.379397,-0.00605166,-0.182103,-4.1657,-1.22794 10135.1,10142.1,9232.63,4.13322,3.14571,5.42112,-9.50857,6.61076,-1.5265,-1.3563,-0.229734,-0.953633,-2.39287,0.0907423,-2.25912,-2.95494,-0.622513,-0.878638,3.11006,2.20909,-12.7591,-4.65267,-0.652931,-0.508727,-0.484787,-1.43884,-3.89903,-1.68783,-1.20607,-1.47415,-0.30987 10150.6,10139.9,9237.26,7.08686,7.1115,3.05908,-7.31514,-2.75139,-6.15754,-6.75994,1.34201,0.583247,1.72791,0.0586144,-1.05549,-2.23348,1.35232,0.957745,3.9225,0.27845,-7.28043,-8.71747,-3.21629,1.12263,-1.08286,-3.72117,-4.10901,-0.817087,-0.319549,-0.171801,1.86899 10161.3,10137.9,9238.2,5.45348,5.872,0.0360833,-8.71486,1.68904,-1.57501,-9.84544,2.70784,2.39605,-1.45535,-0.548901,-2.93743,2.31592,2.21738,-0.0678836,1.75621,-1.90485,-7.83172,-5.34721,-0.902631,2.89369,0.938874,1.08004,0.946796,3.39736,-3.2386,1.23533,3.43628 10168.7,10135,9236.89,1.9988,3.16081,-0.959961,-1.65775,15.8147,12.2058,-6.43511,1.69639,2.59198,-2.06327,-0.47323,-4.35241,3.77438,3.79233,-2.16153,-2.08622,-2.56136,-3.89096,-0.736348,5.49778,-0.475583,0.770127,3.05002,3.17719,3.81221,-4.99556,1.59718,3.01185 10178.3,10131.2,9237.28,0.818385,-0.233269,1.46873,6.63122,10.9706,17.5879,-3.54675,0.677416,3.72244,0.655626,-0.201865,-1.16835,1.57109,5.42876,-0.444523,-1.12764,-0.256929,5.62565,-1.99386,6.4084,-2.47406,1.18593,3.2834,3.0293,3.51573,-2.53776,0.959038,3.23253 10193.3,10130.2,9242.16,-2.48525,-2.35837,2.98987,5.98816,11.4719,15.9039,-4.84232,-0.825315,2.54659,1.43064,-0.659643,-2.96556,0.571285,2.41784,-2.00371,-0.757574,1.41844,6.37057,1.42823,7.71148,-4.93994,-1.54988,-0.232174,-1.34349,-1.26249,-2.05601,1.26179,0.464125 10210.2,10133.3,9250.5,-0.302459,-1.69801,0.843368,2.30597,6.15326,11.0157,-5.9274,-1.05244,-1.68469,-0.278629,-0.694935,-0.891837,1.23651,-0.21345,-0.305015,-0.0987808,0.160233,4.91775,0.166271,3.92353,-3.88399,-2.55526,0.198425,-0.923912,-1.86728,-0.552523,1.22445,1.15572 10221,10137.3,9258.6,-1.56339,-0.256664,0.840544,-1.61826,11.0061,14.4706,-2.59098,0.449882,-1.65171,-1.89163,-1.35949,-1.40198,3.60618,0.270121,-1.02351,-1.1912,0.778059,-0.110922,0.867721,2.27546,-5.20223,-2.14642,1.17716,-1.36266,-2.51971,-1.10085,2.42789,2.32548 10222.9,10141.6,9264.61,-4.74868,-0.212232,1.05283,-1.29221,10.744,4.75459,-2.81401,0.644295,0.850172,0.179994,-3.01777,-4.30435,2.71079,-1.12735,-1.29174,-2.07496,1.34575,1.0376,2.5823,1.95702,-4.5778,-1.28586,-0.494008,-4.39926,-5.46478,-2.40477,1.70545,-0.546783 10222.5,10148.7,9269.02,-3.49502,-0.678579,-0.213247,8.06515,8.4472,0.736921,12.8231,-0.680516,1.09355,1.44143,-3.62765,-2.08929,0.194595,-2.35671,-0.392866,-2.86869,-0.655593,6.76095,0.52286,-1.94996,-0.69629,-1.94695,-3.05311,-3.36287,-5.8798,-2.04553,-0.962602,-2.08692 10226.3,10155.2,9271.48,-1.96969,-0.131236,-7.34816,10.3469,1.43629,-18.1274,6.28789,-1.94889,-4.21799,9.10578,-0.96868,-0.513386,-5.07894,-4.75252,3.07715,-1.21549,-4.62974,12.6049,-2.11208,-4.5134,4.07597,-2.26695,-5.31607,-0.080814,-4.75562,0.0499323,-2.60796,-2.05158 10230.1,10151.7,9270.27,-0.441668,1.99564,-2.24149,10.4542,-4.09391,-6.45561,-1.77752,0.712394,-1.02642,8.25875,2.54249,4.31177,-1.67116,1.28898,3.90167,2.27301,-0.292013,13.1856,-3.31394,-4.23242,0.509949,-0.582218,-1.55254,1.54596,0.383257,3.15094,0.659781,3.83919 10224.9,10138.7,9266.49,4.67287,5.1299,-1.26323,13.4301,-10.2745,-9.49416,-12.2719,-1.18436,-2.87586,6.16837,2.83569,6.07774,-2.8315,2.00898,6.40272,2.01559,-1.86315,15.8694,-4.72684,-3.25468,-2.65905,-3.311,-6.24296,-4.21139,-3.70695,4.80612,0.395122,1.76566 10212.8,10131.4,9265.67,3.01888,4.86272,2.80549,9.41976,5.08199,16.7307,3.01517,-1.39232,-0.901598,-3.17761,2.70511,2.89126,0.206015,2.09237,1.79821,0.427067,-0.286912,4.97158,1.88506,1.52106,-4.78901,-3.10639,-5.19696,-1.88352,-1.17405,1.76068,1.66502,-0.462334 10205.3,10137.3,9271.29,5.0191,6.44861,-1.029,10.2232,1.46143,6.79866,-7.1328,-3.52906,-8.32347,-3.93806,2.03961,4.301,-3.73195,-3.92217,6.44854,2.90593,-2.49697,11.4551,-0.562561,1.57056,0.711111,-0.350636,-4.25263,3.76126,3.75639,3.70316,-1.79131,-3.47622 10205.7,10147.7,9278.59,5.83546,6.36501,-0.202118,7.16455,-12.9828,-12.4607,-27.3389,-3.33415,-9.60681,-6.26496,-0.539386,6.78879,-3.91681,-6.10831,9.8609,6.12423,0.502419,17.71,-2.72276,0.90307,5.89102,4.35576,1.47131,6.87862,9.08531,6.44279,-3.45175,-1.92878 10205.4,10153.7,9279.43,2.61204,3.79426,2.8599,4.2373,-6.30104,-6.55433,-17.9117,-2.30217,-4.33352,-8.56342,-2.54108,4.06241,-0.221565,-2.25183,3.87958,2.42384,1.7425,10.0636,-0.274803,1.38918,2.9688,2.49859,1.85002,3.57782,5.56749,4.25356,-1.57246,0.769565 10198.3,10155.2,9271.53,1.79363,-0.436721,3.46418,1.17919,-6.21503,-12.0337,-14.7144,-0.753172,-0.422946,-10.0673,-1.05729,0.16841,0.00393219,0.329848,3.06417,0.641188,1.13987,4.50086,-1.96838,-0.158451,2.22687,1.01485,-0.617827,-1.82684,0.837829,1.35672,-0.969077,2.83866 10187,10154.7,9258.9,0.357944,-3.85399,-0.403587,-0.905802,-6.94279,-16.6984,-17.7781,-0.22625,-1.87358,-4.80273,-0.208291,-3.41762,-1.38116,-0.435891,4.56144,1.47257,0.881539,4.31043,-2.35524,-0.63135,2.49929,2.73787,-0.3439,-0.967951,0.479767,-1.25236,-0.198644,2.70849 10175.5,10150.8,9245.55,-2.22289,-4.64417,-1.57873,-3.37822,-3.35046,-9.88201,-14.3071,0.168661,-0.756661,-2.69992,-1.57269,-4.61371,-0.741804,-0.794809,1.95045,1.34471,1.90438,0.670421,-1.36383,-0.0207592,1.95603,4.44548,1.70081,0.896225,1.96219,-2.68814,1.37985,1.21966 10163.9,10144.5,9233.39,-1.0609,-3.6573,-1.22008,-1.66234,-8.72059,-9.8591,-9.71449,-0.237702,2.4907,-0.383432,-2.45784,-2.52105,-0.451308,-0.95008,0.101755,0.998499,0.0147502,0.763548,-2.08901,-0.286814,2.08671,3.24587,1.98374,-1.03823,1.41551,-1.64013,0.866956,-0.452541 10152.5,10140.9,9224.11,1.58528,-1.3177,-2.21666,-0.770113,-12.1162,-14.2306,-0.877621,-0.372338,1.62768,2.76293,-0.69447,0.389726,-2.24466,-0.492948,-1.07534,1.2119,-2.84085,1.62365,-4.58137,-3.47859,2.38127,-0.58689,-1.20067,-5.12188,-1.38938,0.191315,-1.00868,-0.231626 10144.9,10141,9218.45,2.9188,-0.174985,-4.58083,-6.94645,-12.0718,-23.1781,-6.27315,-0.364715,-3.24703,1.70145,0.993811,-0.598274,-3.56103,-0.759525,0.496704,2.46032,-1.89983,0.597576,-2.01394,-2.93857,4.73883,-0.682548,-1.34504,-3.70636,-1.23983,0.0550942,-2.01066,1.58053 10141.8,10139.7,9215.32,1.06474,0.421951,-5.29652,-9.2234,8.36446,-5.7284,0.960531,-0.909556,-4.90704,0.770291,1.54135,-5.62095,-2.20122,-1.09503,-2.35206,-0.974175,-1.0101,-7.23319,3.01594,0.768168,2.39478,-1.32615,-1.6404,1.53725,-1.51813,-3.97654,-1.7665,0.833795 10141.4,10134.3,9214.23,0.86273,1.35397,-0.657898,-4.72598,2.71892,1.93911,-8.71178,0.127278,0.812447,5.14689,3.34014,-5.47575,-0.124804,-2.70815,-0.541837,-0.600256,1.53834,-3.53843,0.0605411,2.43643,0.689316,0.936364,1.45495,3.58725,0.917646,-4.12549,-2.16127,-1.91164 10145.6,10128.8,9217.09,0.035273,1.26692,3.11502,-4.96307,-6.78084,1.02172,-8.79811,2.69846,4.94751,11.3598,6.51275,-2.0705,0.657905,-2.59061,-0.35795,1.18908,3.42851,-3.05799,-3.41004,0.806424,0.399374,2.92706,4.4301,0.273598,0.553543,-1.76552,-0.755718,-3.46001 10157.5,10128.8,9225.31,0.248702,0.312336,2.57768,-4.36878,-7.1619,-0.049009,-3.2758,2.7151,1.99544,11.1247,7.80862,3.2311,1.05086,1.13953,0.117826,1.5885,2.6575,-2.74279,-2.82058,-0.206648,1.25493,1.71967,2.81266,-4.13773,-2.45207,2.50385,0.789243,-0.268176 10170.7,10133.1,9236.11,-2.23675,-0.885477,2.34602,-6.30375,3.19378,12.3402,5.26964,2.51006,1.86666,4.33237,6.63528,4.85198,3.48519,8.46812,-2.52066,-0.634166,3.57125,-6.40349,1.46869,0.818123,-1.68738,1.2743,1.91738,-0.951766,-0.403311,4.63843,3.18061,7.04436 10176.7,10136.2,9243.78,0.782244,0.338989,-0.179665,0.677035,-11.8864,-9.98092,-16.6014,-0.0876104,-1.39338,0.511794,2.05749,5.37285,2.64871,7.7119,4.8232,-1.23349,2.56586,8.98335,0.643413,1.73431,-0.63479,2.49537,-0.600719,2.26345,1.69812,6.71431,2.31721,8.10433 10176.8,10136.6,9245.84,-3.20567,1.13405,3.92668,-1.78597,-0.236073,-2.19382,-11.4115,3.08973,1.33702,-3.27145,0.727769,-0.100717,5.38921,8.19297,0.492232,-2.20151,5.25989,3.6589,4.08819,2.21554,-1.32513,3.54291,0.119275,3.23854,3.862,2.19948,5.28701,6.25834 10178.4,10137.4,9245.74,-5.53585,0.420645,5.85295,-4.47724,14.54,12.4497,8.36972,4.99424,2.57479,-4.3639,0.677018,-2.6813,6.67898,7.5884,-5.54187,-1.3688,4.05586,-6.15054,4.2909,-0.899213,-1.24567,1.90686,-0.469126,1.72139,5.00978,-1.65339,6.96518,3.71489 10184.8,10141.1,9247.89,-4.95644,-1.91401,3.7243,-7.95873,7.49028,6.40526,5.31843,3.53676,4.4376,-3.95261,0.746514,-2.92295,5.17495,5.09822,-5.56387,2.13589,1.74219,-7.51099,1.13636,-2.24892,-0.712168,1.40767,0.401594,-0.663717,6.22808,-1.51586,5.59537,1.86444 10195.1,10147.9,9253.27,-3.98,-3.06823,-2.05534,-6.10099,3.83685,4.55708,3.92119,0.928846,2.49159,0.0763172,1.14792,-2.88509,3.3624,3.14131,-4.76678,1.53759,-2.49281,-5.00974,0.3227,-1.57677,-2.36177,0.558465,1.76223,-0.153596,3.21585,-0.248642,3.44061,1.09292 10206.6,10155.3,9259.98,-4.64998,-1.64546,-4.6585,-6.92405,-1.23826,-1.4651,-7.80907,2.03872,0.322905,5.35637,2.9557,-1.90346,0.941137,2.90995,-2.25745,1.6362,-2.73525,-3.06893,0.361893,-0.410406,-1.95298,3.18373,4.96997,3.18307,2.09522,2.29277,1.29516,1.46329 10215.1,10159.8,9265.65,-5.64262,-2.22323,-2.32616,-8.62966,1.24852,3.53986,-7.11813,2.5704,-0.221435,0.41167,0.765415,-1.44792,2.10023,1.14341,-1.90736,0.761342,-0.0657556,-6.90094,4.60419,2.00852,-1.1143,4.44335,7.23913,4.6059,2.18355,1.92624,1.0442,1.06642 10218.9,10161,9269.98,-5.54728,-2.69742,0.623383,-4.54971,5.62832,12.115,1.60837,0.527375,0.225195,-4.35554,-1.09064,-1.69716,2.68584,-2.42078,-3.28377,-0.48855,1.46337,-7.59929,7.41232,3.78152,-1.52786,1.12019,5.14455,0.902689,0.791392,0.171231,1.01653,-2.1951 10225.1,10161.4,9274.87,-4.18459,-1.40959,4.0543,-3.78563,4.56469,13.1486,7.4468,1.32559,4.01602,-4.26528,2.47676,-0.706977,1.49841,-2.44619,-4.48237,0.314642,3.21848,-7.78537,6.45365,2.67192,-0.518631,-0.579868,3.1551,-3.30298,0.42352,0.385421,1.09082,-3.38628 10238.6,10163.7,9281.72,0.163978,0.29531,1.39945,-1.88245,0.770367,3.01996,6.47156,0.843119,3.05229,-2.89342,3.69162,1.01002,0.156961,-1.63668,-1.88068,0.459627,0.572044,-3.8789,6.07964,1.73877,1.04155,-0.952277,-0.352698,-3.89818,-1.13337,1.63306,0.655322,-3.05775 10252.3,10168.8,9289.58,1.69242,0.803041,0.969081,-1.57571,10.1963,10.1486,9.01137,-0.23779,2.45598,-11.8335,0.764195,0.347471,0.63322,0.818036,-2.67947,-0.48707,-0.0121974,-5.92175,4.75178,1.31186,-0.59319,-0.865273,-2.13114,-0.629395,-0.22624,0.187864,0.687159,-1.38416 10258.4,10175.1,9296.44,0.693656,-1.47018,1.57507,-4.07861,13.9151,7.913,3.87705,-2.41045,1.40643,-18.8401,-3.38044,-3.78137,0.444306,-0.142111,-3.19856,-0.633983,1.26609,-6.96487,4.03731,1.86282,-0.255938,0.885239,0.576534,4.16798,1.48633,-2.91027,0.44246,-1.26861 10259.2,10179.7,9301.13,-1.11281,-2.9356,3.48279,-4.07376,14.5961,4.75668,2.95063,-2.50321,1.99968,-15.2573,-3.94817,-6.19421,0.994523,-0.409685,-3.36826,-1.30752,2.89435,-7.11783,2.3961,1.75016,-0.287404,0.839505,2.32354,3.16514,0.431073,-4.23834,0.224613,-1.13459 10258.9,10180.8,9303.2,-3.70956,-2.93593,3.76222,-6.98265,14.1006,4.36509,3.13521,0.524873,3.4745,-8.19672,-0.812591,-7.54285,2.87285,0.165482,-4.34303,-3.00502,3.10194,-11.8146,3.48326,1.87454,-2.39007,-1.71717,-0.0308325,-3.00344,-3.10099,-5.07511,0.999296,-0.291248 10259.7,10178.9,9302.61,-2.50722,-0.863499,1.6361,-7.29671,5.65875,7.35687,6.74534,2.86707,2.5541,-4.10002,1.92641,-4.21325,3.79643,1.11564,-2.85299,-3.384,0.718232,-13.5344,2.15514,-0.378278,-3.09826,-4.48668,-4.09564,-6.07121,-4.62941,-4.63714,1.35609,1.33932 10264.3,10176.2,9300.58,-1.50986,-0.476834,0.153861,-9.03392,2.34462,9.76008,11.2624,0.958254,-0.70443,-6.3101,0.886002,-3.04957,4.20237,0.687347,-2.59931,-4.30057,-0.344332,-15.3463,3.30618,0.212706,-1.83037,-5.39362,-6.37009,-5.79293,-5.6463,-5.17005,1.45394,1.2199 10270.2,10175.5,9299.06,-1.8193,-1.62584,1.49621,-15.2891,-0.19176,0.694336,7.97111,-0.906134,-1.88497,-6.47048,-0.900237,-3.70282,1.23614,0.322582,-3.93212,-3.45866,1.71962,-16.8955,0.58688,-0.409914,-0.259588,-2.68512,-3.64588,-3.35838,-4.51583,-4.19392,0.240148,0.159851 10270.2,10179.6,9298.63,-1.90388,-3.42457,3.36972,-15.5947,6.83754,-2.72512,7.96959,-1.26132,-2.35887,-7.13988,-3.00989,-4.84946,-1.32472,-2.90407,-7.21556,-3.99747,1.63284,-18.121,1.49353,-0.486008,-0.289734,-2.44221,-2.61409,-4.74746,-6.81336,-4.22186,-0.397997,-3.01155 10263.1,10186.3,9296.94,0.1046,-2.95923,0.55802,-3.53552,11.956,6.06043,20.0157,-0.175478,-1.81809,-1.77528,-2.10279,-0.283075,-3.48288,-4.09089,-6.41457,-3.4926,-1.98205,-11.2644,1.51324,-2.56718,2.01317,-3.17178,-3.03644,-4.28621,-6.82533,-2.57386,-0.732198,-4.52782 10250.3,10186.7,9289.82,0.787893,-2.63004,-4.83671,4.59987,9.90165,5.11396,20.1712,-1.49013,-0.900383,3.2704,-1.38302,1.01612,-3.51797,-3.65748,-2.01906,-2.31487,-4.58178,-0.663723,4.99631,0.0846666,6.20019,-1.32911,-0.366123,-0.708005,-3.05462,-1.4169,-1.33549,-4.03837 10229.6,10174.2,9276.51,2.92922,1.43172,-8.45959,7.92191,9.82817,0.906035,15.1761,-5.66535,-4.80598,8.92318,-1.50732,0.863702,-4.19618,-1.72605,1.43049,-1.60336,-7.78679,7.9456,2.20311,0.976306,4.6808,-2.0774,-1.41618,1.52784,-1.00485,0.251303,-2.51818,-3.24837 10203.9,10154.8,9263.01,1.97737,4.88419,1.86761,-1.89071,16.8831,21.8027,18.6752,-2.85592,-0.407409,1.1857,1.57668,2.90834,1.42619,5.01683,-2.88862,1.13125,-1.02838,-3.77013,-1.83294,-0.874118,-1.82318,-1.06152,0.617181,1.34269,3.38069,1.15764,1.12216,1.38647 10184.5,10141.2,9256.68,5.24597,7.64832,2.18557,1.58328,4.92602,9.28816,-0.0172234,-2.70209,-2.36954,2.63625,2.45988,6.65341,1.30855,2.45772,0.884071,4.15289,-0.306199,0.501745,-3.91598,-0.843063,-3.78083,-0.751671,-0.908618,-0.353576,1.46737,4.59599,1.10914,-1.05414 10178.9,10140.4,9258.57,8.5511,8.38576,-0.704081,10.0442,3.87995,9.53107,4.06474,-2.33977,-3.33414,3.45052,0.769206,8.44243,0.151836,-0.110094,2.50423,3.89258,-1.86971,4.86933,-2.34618,0.208276,-3.54318,-0.382483,-0.444637,3.17545,1.86638,6.31308,-0.0788599,-2.11239 10182.7,10148,9263.52,7.664,6.75263,-0.540997,5.42972,-5.04193,-7.98425,-8.29464,-0.166299,-0.588527,3.31557,0.500806,4.72146,-2.51571,-1.43305,5.52369,5.671,1.03703,8.03067,0.0463032,4.16527,0.993743,2.27,2.01907,5.48701,6.28587,6.50446,-0.915646,-0.555951 10185.6,10156.6,9266.64,4.26252,2.60407,3.65205,1.35764,1.93964,-1.71464,3.62386,0.664968,2.07164,-1.84774,-1.41728,2.03742,-1.93901,-0.955849,2.55509,2.24827,3.4143,2.08534,1.52467,4.36357,2.40504,-0.149419,1.87333,2.56701,3.76988,3.58853,-0.290298,1.53656 10182.8,10164.1,9266.99,3.44774,1.00051,3.58435,5.06036,-3.20427,-1.32409,2.16178,-1.24869,0.986594,2.68824,-3.10496,3.75494,-3.03899,-1.36189,2.85639,-0.797041,2.25309,6.84226,-1.01807,1.45026,1.64915,-1.77668,1.47461,1.32051,0.0174875,3.15498,-1.91103,0.915561 10177.6,10169.5,9265.47,2.97062,0.742454,2.19308,3.39405,-10.2555,-6.11354,-8.35604,-2.29312,-0.492631,4.2024,-2.46282,2.85236,-2.05854,-1.07623,3.34902,-1.67951,1.43015,9.72371,1.0556,1.2093,0.0329592,0.933345,2.62882,4.14907,1.43657,2.25242,-2.21302,0.424466 10175.1,10171.1,9262.53,2.78573,0.66686,2.0545,2.76769,-2.38316,1.38611,1.33538,-1.98843,-1.22362,0.719734,-1.48276,0.571928,-0.303568,1.13172,0.533248,-2.57485,0.218063,4.75694,4.12677,1.25451,-2.29974,1.77459,2.18864,5.66448,2.31972,-0.197648,-0.423422,1.24127 10176.1,10170.7,9258.49,5.31438,0.737423,2.23937,7.15555,-6.03862,-6.93885,2.59027,-2.08985,-1.82474,1.76361,-1.51506,2.40133,-2.94977,1.13326,2.34185,-1.4691,-0.319475,6.55378,0.151184,-0.820336,-1.03183,0.737373,1.0173,1.60097,0.120988,0.706961,-1.06361,1.61191 10177.1,10171.1,9253.43,5.27989,0.124242,0.594136,6.40228,-14.4792,-17.9873,-7.83873,-2.70593,-2.84279,6.19952,-1.02819,4.22035,-3.89328,-0.655654,4.6427,-0.543649,-0.312946,7.67303,-3.34568,-2.99026,0.892734,0.193866,0.437901,-1.37172,-2.06494,3.10779,-2.09072,0.969194 10175,10171.9,9247.28,2.27598,-1.11333,-0.371999,2.70022,-5.44405,-1.24932,2.95574,-2.54561,-3.07604,2.81372,-0.48024,4.11824,2.04907,-0.370621,1.24343,-2.71039,-1.27809,-0.906837,-1.29061,-4.80376,-0.177684,-0.68347,-0.0356975,0.976652,-2.58184,2.60538,-0.53245,1.0079 10170.6,10171.1,9240.98,0.484599,0.0646839,-1.51326,2.89899,-3.4319,-0.213982,2.47953,-0.834731,-2.00581,5.72898,0.227883,2.67222,2.27602,0.0505934,1.31844,-2.26552,-2.6972,-0.975391,-0.869576,-3.70984,-1.26158,-0.292123,-0.590846,2.58737,-1.84822,1.62378,-0.526111,-0.491878 10166.9,10167.6,9236.09,0.964725,-0.0392702,-0.079079,4.19696,-8.77705,-7.3393,-5.33084,1.7816,1.00552,6.00308,-0.645333,1.80016,-0.345783,0.537513,3.29513,-0.258503,-1.94323,3.02276,-2.07851,-0.708951,-0.985472,0.42465,-0.0047685,-0.0149723,-1.37113,0.550535,-0.779034,-0.484969 10166.1,10161.5,9233.6,-0.598547,-1.76595,-1.06041,-0.952044,-3.22733,-6.25839,-1.71002,3.5389,3.14678,2.52469,-0.94774,-0.697306,-1.82073,1.8162,-0.398189,-0.0962201,-1.17773,-3.11075,-1.86249,-0.148137,-0.912351,0.0729367,0.372787,-1.52491,-1.99794,-1.67208,0.753712,1.02245 10167.9,10154.5,9233.85,1.32924,-0.579085,-4.09528,3.27081,-6.78357,-9.38603,-3.06915,1.95927,0.70163,2.46784,-0.635142,0.854662,-1.03664,2.44479,0.381434,0.976493,-2.1874,1.35415,-3.25712,-1.85514,0.202589,0.286026,0.720155,0.627719,-0.687001,-0.872865,1.21871,2.25385 10170.4,10147.3,9236.23,1.55419,0.655793,-3.90119,3.65032,-6.92144,-3.81534,-0.829364,1.59907,-0.150104,0.588015,0.212751,1.04803,3.09472,3.79829,-0.218751,1.11779,-1.55055,0.933332,-1.25266,-2.59487,0.647035,1.39731,2.58953,2.8589,1.80309,-1.43261,2.52993,2.79953 10171.9,10139.7,9239.22,2.16966,0.513128,-2.93705,2.73804,-10.8601,-4.50483,3.76187,1.03924,-0.676839,-1.4866,-1.19577,1.6866,5.98311,3.12642,0.0885709,0.9896,-0.594518,0.533618,0.379411,-3.82145,2.32664,2.22298,3.60721,3.05218,2.2889,-1.98702,2.79897,1.35025 10172.4,10133.5,9242.05,0.627291,0.905709,1.39363,2.99372,-15.425,-9.09382,2.11414,1.04226,2.10526,-4.39506,-2.77953,2.15891,6.66724,1.70369,-0.372333,1.40462,2.59187,2.26874,-0.378224,-3.69675,3.0335,2.25396,3.10192,0.0429504,0.10951,-0.799702,2.66794,-0.282681 10173.8,10130.2,9245.36,-1.33644,1.42161,3.11004,3.93858,-17.0646,-12.116,1.67239,1.94826,5.54306,-3.85205,-1.5475,2.52019,4.33814,1.15019,-0.541069,1.99129,3.05378,4.25369,-2.76731,-2.80645,1.85733,0.988299,2.88783,-1.97077,-2.83768,1.85125,2.84766,0.389147 10176.4,10130.9,9250,-3.53503,0.391503,-0.270572,1.95882,-15.1875,-18.5758,-1.42497,2.28845,5.40786,-2.12974,1.20821,0.911564,0.2788,0.0689856,-0.00271805,2.01928,-0.20812,3.23848,-1.98612,0.0245125,0.488358,-1.18054,1.47019,-3.47437,-4.6287,2.11498,2.20934,0.993318 10178.8,10135.9,9255.56,-3.20255,-0.268054,-3.48033,2.47099,-11.3536,-16.9308,2.01776,1.40976,1.56328,0.853625,1.89586,1.47109,-1.50849,0.167668,0.627511,1.41809,-4.21425,2.05546,-2.39209,-0.416193,0.276633,-1.50971,-0.820011,-1.25927,-1.76,0.153711,0.431209,1.48315 10181.2,10144.1,9260.31,-2.49125,-0.613263,-3.86482,0.287362,-9.17309,-14.1157,3.48478,0.196793,-1.25386,2.83848,0.198147,-0.0165582,0.471677,-0.139327,-0.216901,-0.966032,-5.2193,-1.40546,-0.977273,-1.2574,1.78779,0.134179,-1.72164,0.653388,0.313432,-3.37716,-0.587605,0.861387 10186.6,10151.1,9263.12,-0.0358474,0.714951,-5.47328,-0.875177,-17.5089,-13.8361,0.471247,0.643912,-2.41975,9.9458,0.993041,0.803296,-0.226386,0.0668295,2.19176,-1.16819,-4.40868,0.69383,-3.38706,-3.58218,3.07732,2.10253,1.79789,2.06744,1.83904,-2.15516,-1.67344,0.661882 10193.4,10152.2,9264.85,-2.78688,1.85556,-1.96216,-7.27433,-5.61022,0.625161,3.91544,2.78407,0.13042,8.01854,3.573,-2.43853,-1.07905,0.148792,-1.48277,-2.3792,0.378784,-7.05144,-1.06108,-1.76148,0.135824,1.71393,3.80312,-1.43656,0.702495,-1.95731,-0.703674,-0.33177 10196.9,10148.7,9267.46,1.41437,4.41491,0.0330121,-0.96198,-19.7539,-11.561,-5.49424,1.03618,-0.588315,13.1158,4.11913,1.82776,-4.02743,-1.24038,4.49417,2.16391,1.61464,5.33203,-6.2827,-3.22771,2.42673,4.53812,5.27571,1.95384,4.83592,2.15944,-2.23414,-0.0179182 10195.1,10146.6,9271.67,-0.599083,4.08109,5.56207,-0.651956,-1.899,4.41751,8.64946,-0.00765143,1.65381,7.40697,3.13743,0.528221,-1.17274,-0.333192,-1.34405,0.810869,3.04978,-1.96585,-3.00608,-1.02587,-0.427114,2.63482,2.33223,1.44749,2.70602,-0.508442,-0.782524,0.838544 10190.6,10149.1,9275.95,0.560997,3.32623,0.00253245,1.6273,-9.62681,-9.32197,-7.13248,-1.74244,-2.26773,10.279,2.01853,1.79006,-2.32577,-1.861,2.70102,2.63733,-0.668516,4.89049,-2.56801,1.67809,-0.682542,1.07859,-0.730879,1.04436,0.219305,1.04839,-1.30085,-0.204558 10188,10153.1,9277.72,-1.05102,1.4439,-1.2902,0.37219,3.61058,7.8905,-0.13638,-0.797121,-3.203,3.7144,-0.467361,1.43319,1.01941,-0.964803,1.27849,1.32106,-0.71757,-0.281666,1.82319,4.43107,-2.93419,-0.102775,-2.79816,1.60946,-0.350934,0.837113,0.975085,-0.206216 10189.3,10155.8,9275.17,1.71247,1.79065,-0.806826,4.2591,-1.07113,5.08033,-3.80833,-1.05846,-3.93516,4.86697,-2.48519,4.41458,1.0147,-2.04319,5.76698,3.04901,0.621182,6.18537,-0.471514,3.74338,0.0954557,1.78055,-2.23478,4.29533,3.28968,4.08665,-0.45381,-1.12752 10190.8,10155.9,9267.91,0.0885688,1.62773,3.97676,0.475719,6.50171,12.0036,4.17355,0.0800788,0.877184,4.13283,-1.66529,2.3731,1.22312,-1.52431,1.32333,1.30085,4.02821,0.00402446,-0.278254,3.83144,-0.00616006,1.70507,0.14686,2.05675,3.75234,3.42709,-1.13997,-2.28219 10186.5,10152.6,9257.34,-0.152071,1.1051,2.98089,-3.26014,-3.23874,0.545145,-3.74253,0.650653,4.32612,4.55661,-0.349067,0.443991,-1.54712,-2.37082,1.08068,1.11666,3.19332,0.114235,-4.77887,1.03262,0.526047,1.57427,1.96416,-1.21359,2.2522,2.81775,-2.19914,-3.20958 10175.9,10146,9246.33,-2.37365,-0.801223,1.8448,-4.49245,2.73452,3.45587,0.665856,0.804743,7.15539,-1.25789,-1.25952,-2.70716,-1.07845,-2.04441,-1.93328,-1.35806,1.5978,-5.1161,-5.79834,-0.925826,-2.80177,-1.15512,-1.39234,-4.88988,-2.71874,-0.727928,-1.17586,-2.55528 10163.6,10137.3,9237.87,-0.803469,-2.78044,-0.895544,-1.96323,-0.541223,-3.95959,-1.23923,0.0489646,5.82687,-0.842944,-2.20839,-1.37161,-0.868195,-0.366623,-0.326653,-0.542204,-0.442138,-3.06811,-5.05951,-1.77693,-2.56412,-2.0747,-5.18551,-5.90628,-3.59607,-1.51359,-1.0358,-0.0442413 10154.4,10129.1,9233.99,1.23915,-3.76005,-2.64612,0.723829,-3.148,-4.96491,0.57486,-0.202117,2.21428,-0.386009,-2.61213,0.591537,-0.420445,2.51457,0.848114,0.0155665,-2.8099,-0.688955,-1.65728,-1.68576,-0.314736,-2.37588,-7.30164,-5.93878,-1.09582,-1.08092,-1.23666,3.04974 10147.7,10124.3,9234.84,0.130569,-3.33534,-5.30783,0.228073,-1.79103,-2.90284,1.72325,0.336059,-1.67646,0.805152,-2.51359,-1.68843,-1.08056,2.79024,0.667811,-0.918425,-5.25023,-0.613583,-1.21144,-3.86108,1.12026,-2.87087,-6.96217,-3.74878,-0.871173,-1.99148,-1.4983,3.13726 10141.9,10125,9238.34,-2.3342,-3.74514,-6.28736,0.247636,2.71253,3.12847,7.57994,-0.0401623,-2.07147,0.481455,-3.97685,-4.46362,-0.415913,1.42821,-0.575486,-2.68041,-4.57327,-2.24353,-2.60028,-5.84863,0.625916,-3.42977,-3.6369,-0.844099,-3.5874,-4.64335,-0.985747,1.2717 10139.9,10130.2,9242.19,-1.31024,-4.72475,-7.14762,0.73153,1.45053,-5.53508,5.90136,-2.31863,0.194991,0.488804,-6.97821,-4.41928,-2.29074,-1.35009,0.919216,-2.89533,-3.25509,-0.799203,-1.99553,-4.14064,2.04707,-1.98553,-0.137078,-0.0166083,-4.9352,-5.40326,-1.67739,-1.42035 10146.2,10135.6,9246.04,1.48702,-3.36982,-6.22071,1.74719,2.56435,-13.0074,1.99705,-3.21561,2.91416,0.844878,-6.7988,-2.16439,-5.4962,-1.85975,2.13575,-1.59383,-2.91884,1.52462,-1.3314,-1.85117,3.6544,-0.430522,0.692754,-0.840642,-3.31251,-2.33908,-3.05762,-2.1983 10158.1,10136.1,9250.8,0.841737,-2.49661,-1.39476,-1.47649,15.6927,0.965199,10.869,-0.546861,4.02682,-3.15137,-2.65822,-1.05518,-4.77058,0.229656,-2.58261,-1.60934,-0.689737,-5.44364,-0.234473,-1.95479,2.60062,-0.769404,0.484685,-2.21476,-2.21659,-0.527818,-2.3356,-0.631119 10167.2,10131.4,9256.17,1.43756,-1.64599,0.0828565,1.10643,1.09851,-8.71597,-1.14743,1.16785,1.24835,1.69522,0.678389,1.91657,-5.73395,-1.26925,0.618759,0.671225,0.99422,2.5392,-3.14056,-3.00047,3.39733,-0.267724,0.865602,-1.72338,-1.28093,1.59131,-3.58079,-1.60917 10168.5,10125.9,9259.95,0.111755,-1.49369,1.18289,-0.284048,-1.52165,-7.82514,1.91577,2.83987,1.30957,4.34859,2.31828,0.547347,-5.35341,-2.95714,0.120479,-0.07344,1.25038,0.863374,-1.97606,-2.63292,2.99367,-1.51317,-0.192761,-1.94301,-2.34527,-0.816782,-4.15688,-3.69083 10164.7,10123.5,9260.03,2.54631,0.123647,1.85441,0.291179,-2.26534,-5.622,0.403256,2.75151,1.92159,5.45502,4.02912,0.277333,-3.49437,-2.59529,1.68451,1.03176,0.611114,1.05444,-1.37086,-0.762577,2.09659,-3.15435,-1.66892,-4.18628,-2.03484,-0.59484,-4.5361,-4.06338 10160.7,10123.9,9256.02,4.16394,1.15842,1.00215,-1.41089,3.00077,3.69915,2.12147,1.50602,1.11373,3.7783,5.12886,1.27055,-1.0735,0.163066,0.715848,1.75274,0.248762,-1.87449,-2.70607,-0.0821427,-0.982237,-3.91753,-0.603176,-5.15131,-1.55797,1.9122,-2.63806,-2.45448 10157.6,10124.8,9249.1,1.13904,0.752742,1.28292,-3.44794,5.87463,13.5955,-3.90547,0.053564,0.392376,-2.17549,4.02652,0.800942,2.14933,0.991305,-1.00534,1.93346,1.74799,-4.3887,-2.62983,2.12002,-3.97726,-2.37985,1.92724,-3.91126,-1.80145,3.29901,0.515867,-2.07875 10155.9,10125.9,9241.01,-1.21278,1.24353,0.0902419,-1.38693,3.90257,17.0687,-1.7671,-0.621263,-0.743581,-3.56603,3.19768,0.515647,2.83626,-0.394058,-0.965446,2.53295,1.02968,-3.73706,-0.646373,4.19926,-3.90665,0.100245,2.07717,0.65145,-0.4389,3.45695,1.30478,-2.26372 10156.9,10129,9233.19,-0.519545,3.45514,-0.128203,0.470911,-4.34917,11.6069,-5.37302,-0.249794,0.0908138,-1.64961,3.7305,0.887725,1.28233,-0.50548,0.651175,4.68216,0.481759,0.131141,2.83721,7.4517,-1.51906,2.02591,0.478488,2.8447,3.96564,4.21205,0.0189546,-1.26083 10160.2,10134.9,9226.61,0.334619,3.63902,-1.33005,0.500933,-0.0390483,15.3466,3.49804,-1.22599,-0.443012,-1.29729,1.85728,0.83413,0.663791,1.08815,-1.61332,2.35978,-1.91003,-1.54128,7.06018,8.52392,-0.0931056,-0.631766,-1.8937,1.21041,3.92464,3.0125,0.582016,-0.0552563 10165.1,10142,9222.12,-0.0501124,2.72845,-2.35233,0.461804,-3.24106,3.89637,-4.4752,-1.7395,-0.658087,1.46568,0.74815,1.9358,-1.37579,1.26993,0.248403,2.1501,-1.97865,2.84403,4.93078,6.34449,2.55208,-1.66616,-1.28941,-0.85475,2.44335,3.28626,0.575625,0.0867697 10169,10147.2,9219.92,-2.57524,1.55278,1.64717,-0.408592,2.78686,3.93608,-3.35557,-1.05071,0.358949,-1.71793,1.23509,0.730307,-0.807758,0.469476,-0.799756,2.26666,1.42763,2.57756,3.31921,4.24278,2.32673,-1.92157,-0.625841,-1.7385,0.55312,2.469,0.416022,0.102824 10167.7,10149.8,9219.39,-2.61236,0.265041,4.14099,-1.10443,5.68968,5.75872,0.437178,-1.27371,-1.44794,-5.50529,0.962099,-1.7594,-0.014506,-1.47838,-2.10998,2.88166,2.32266,2.31558,3.04189,2.76494,1.13588,-2.76241,-2.5749,-1.37983,-0.132212,1.62609,0.00182996,-0.567092 10161.2,10151.5,9219.88,-1.00231,0.225002,2.94421,2.03312,-0.355979,4.16591,-0.636307,-0.980578,-3.17075,-4.4683,-0.0413473,-0.96548,-0.194949,-0.798368,-1.08568,3.94015,1.20872,6.21739,0.493017,0.663456,-1.20346,-2.76074,-4.99576,-0.484664,1.27829,1.87168,-0.0347963,-0.649195 10155.5,10153.9,9220.83,-0.939771,0.647249,0.0634509,3.2582,-1.62031,4.0693,-0.997477,-0.169163,-4.01209,-4.20755,-1.14083,-0.040949,0.676499,1.0769,-0.637069,2.85891,0.53402,4.18699,0.666861,0.369829,-2.63692,-0.336214,-3.73798,1.47577,2.81105,-0.292838,0.0270106,-0.151526 10154.1,10157.5,9221.67,-1.65802,1.59847,-3.57612,1.52401,6.37221,4.48866,-1.46299,-0.915699,-6.98915,-0.340048,-0.952717,-2.18866,-0.811792,-0.642645,-0.622625,-0.300884,-1.00057,-1.15759,2.44751,2.6773,-1.823,1.29837,-1.91591,2.49204,1.93197,-3.59974,-1.91245,-2.4109 10154.4,10160.7,9221.98,-0.583463,-0.108757,-4.6507,-0.0693877,5.35637,4.425,-6.56889,-1.82597,-8.57191,2.85503,-1.05825,-2.33955,-3.22781,-4.76081,2.05753,-0.861931,-1.83229,-0.124382,0.503483,2.18131,1.30665,2.42826,0.824233,3.84653,2.09007,-3.3925,-4.31649,-3.96112 10153.4,10159.2,9221.68,-2.76485,-4.09131,-2.87698,-1.10712,12.5336,12.9839,-4.34652,-1.87041,-6.50663,-1.43881,-2.78497,-4.09349,-3.27711,-7.58611,-0.918956,-2.43732,-1.68029,-2.93885,1.37614,1.00354,-0.202025,0.252735,-1.35224,2.14941,-1.22668,-3.85694,-3.91196,-5.39514 10153.1,10150.6,9221.82,-3.95579,-6.11602,-1.95691,-0.571033,7.36799,2.23424,-8.23593,-1.15065,-2.89936,-3.34966,-3.42278,-4.92737,-4.22729,-7.57776,-1.53936,-2.4826,-0.485854,-2.05301,1.35048,0.235875,-0.851581,0.299046,-3.65228,0.452501,-2.53126,-4.14097,-3.0318,-6.032 10156.5,10138.1,9224.22,-1.72219,-4.81284,-2.04034,3.64429,-3.40667,-8.21149,-2.06758,-0.247629,0.240041,0.844032,-2.55693,-2.29071,-5.62686,-4.10255,0.955484,-2.58578,-0.573095,1.96046,-2.89531,-2.47853,1.00662,1.59082,-2.31097,1.60096,-0.355857,-3.59741,-2.54995,-3.16362 10162.5,10126.5,9229.66,-1.48624,-2.31864,-1.19917,5.07688,-2.15075,-4.48733,6.81643,1.19375,3.4529,3.66948,-1.49639,-1.71619,-5.51437,-1.29231,-0.407537,-4.604,-2.54282,0.0824236,-5.27449,-4.81883,0.767691,-1.39492,-2.55861,-0.325428,-1.75464,-3.59903,-1.89829,-0.732932 10167.7,10118.7,9237.56,-1.06333,-0.880843,-0.709075,2.8371,-10.0447,-10.4348,-2.5904,3.18465,5.97115,6.33779,-0.55058,-1.01646,-4.14332,-1.6247,-0.0193591,-4.01402,-3.73144,0.38443,-5.50468,-6.41294,-0.295721,-3.62009,-2.70822,-3.1355,-4.45086,-2.10376,-1.79258,-1.22716 10172.5,10116.9,9247.18,1.551,0.130326,-0.490568,5.87654,-14.5436,-8.35183,-0.790109,3.39107,4.7174,8.28156,-0.0057788,2.6686,-1.84943,-1.48071,1.03911,-4.0934,-3.48936,2.7605,-6.22541,-8.72046,-2.487,-3.9855,-3.15508,-4.85806,-6.30628,-0.1826,-2.22861,-1.91313 10179.7,10122.6,9257.78,1.5355,1.00586,-2.46594,5.55739,-10.6179,-9.89219,1.01847,2.02002,1.55047,10.3651,1.59035,2.3257,-3.02423,-0.681756,0.379055,-4.13859,-2.86252,2.65539,-7.09955,-8.4785,-1.80811,-2.44766,-3.84586,-6.08215,-4.18234,0.309597,-3.66089,-1.78168 10188.9,10134.4,9267.84,0.423127,-1.44673,-6.16369,2.54558,-3.2605,-10.2788,1.93481,-0.460125,-1.55478,7.53447,1.04311,-2.037,-5.33297,-0.715827,-0.912315,-4.00679,-5.27357,1.32517,-7.02947,-5.6844,2.49,-1.1701,-4.14164,-4.46692,0.160721,-1.23591,-5.46575,-0.678645 10196.3,10145.5,9275.21,0.204833,-4.851,-9.24744,3.38063,-3.90706,-1.89916,-0.318999,-3.05687,-4.83175,3.88926,-1.68472,-4.52857,-6.76493,0.053409,0.356074,-2.44354,-9.25902,3.95243,-8.99635,-3.68403,4.07743,-1.41439,-4.06526,0.784286,2.50666,-1.59161,-6.31937,0.0761621 10200.4,10148.5,9278.92,-3.06966,-5.752,-6.27773,-0.452092,4.18213,13.2473,-12.0757,-4.47092,-6.49884,-5.96616,-4.08975,-9.08064,-3.65565,-1.03612,-1.9757,-2.79369,-8.22081,-3.13926,-2.68074,1.98539,-1.47914,-4.27865,-6.82097,-0.0420558,-2.72616,-3.80964,-3.69263,-2.81706 10202.3,10144.3,9279.66,1.7621,-1.2767,-1.87182,1.61337,-6.80859,14.4514,-16.815,-2.07514,-4.63562,0.0307544,-1.49074,-2.29138,-1.18636,-1.08621,1.86862,0.689509,-4.2555,-0.913166,-4.04706,-1.13903,-2.95495,-1.4359,-3.45987,4.36607,0.619825,-1.53464,-2.06409,-2.58631 10201.6,10141.5,9277.89,2.73427,2.11183,3.79277,1.71546,-5.8859,13.3557,-11.3022,2.79327,2.37116,13.2011,3.98285,0.966107,0.039656,-0.715821,2.85166,2.34242,2.77476,-0.0888099,-4.98538,-3.4432,-1.83877,3.57211,2.68075,7.05565,6.45616,-1.54302,-1.24469,-1.49869 10196,10143.8,9273.55,-2.52737,0.202188,7.08167,-4.89952,6.71679,10.6699,0.756855,5.54471,7.25909,13.9583,6.39787,-2.37566,0.745793,-1.45474,-1.09404,0.910205,7.21143,-6.92492,-3.24203,-2.89701,-0.543452,6.07649,7.33376,6.57894,6.15484,-4.40884,0.0587056,-1.11052 10186.2,10147.8,9267.63,-4.31786,0.145523,8.74123,-1.12372,3.61382,5.90919,-2.20636,4.87121,7.93339,10.8223,5.77747,-1.02016,1.70524,-1.23974,-1.99873,1.22043,7.18349,-2.02393,-4.52471,-1.19367,-1.87015,5.60664,6.92162,5.30532,3.03549,-3.16865,1.33872,-1.3693 10178.3,10151.3,9262.07,-1.01371,-0.36759,7.07326,3.03463,-3.67644,6.41668,1.01659,3.32806,5.69645,6.11989,4.17302,3.13986,4.40199,0.31144,-2.58094,-0.0539033,4.16067,1.49299,-3.2753,-1.39228,-2.172,3.33149,4.19598,3.46064,0.616277,-0.818505,3.98959,0.698301 10177.2,10154.3,9257.94,2.09186,0.0766925,2.17884,5.08344,-13.9717,-0.882929,-3.84368,2.86526,4.57806,7.77504,4.75117,6.29349,4.58116,4.04706,1.06485,0.914494,1.84175,7.12093,-3.92066,-3.04038,-1.76589,1.29071,2.74094,1.46176,1.98937,3.12251,5.09485,3.84087 10179.4,10155.4,9254.74,0.187596,-0.882072,-0.665652,4.15319,-3.56212,6.25634,3.46947,2.99756,3.30879,0.859046,5.1349,3.91232,5.90056,6.60019,0.839946,-0.162343,-0.484405,2.65509,-1.8674,-3.50916,-5.10299,-1.60522,1.28388,-0.0295086,1.05,2.81748,5.21994,5.53563 10178.8,10153.1,9251.26,-1.91139,-0.154839,-0.832651,7.32065,-8.14661,3.20829,-4.61065,3.9011,1.20806,1.29028,6.11631,4.24084,4.66918,7.38927,3.1094,1.72009,-0.436683,6.06925,-3.83738,-3.64103,-8.35166,-0.222316,1.74303,3.43329,2.82215,3.91599,3.2218,6.05878 10175,10149.2,9246.46,-3.00223,-0.829219,2.18951,8.12634,-8.29635,3.98254,-2.55022,3.58933,0.0476173,2.00734,2.85452,5.13863,4.39434,5.86178,1.57419,0.321093,2.11151,4.62819,-0.677836,-1.98205,-7.44972,1.36379,2.52895,5.12261,2.10196,3.15929,2.77152,6.16477 10170.8,10147.7,9240.32,-2.09934,-1.33891,3.77143,6.49402,-6.43302,-0.0826344,0.87837,1.12061,0.421557,1.06025,-1.52903,5.64507,3.68263,3.49536,1.25096,-1.4957,2.92854,4.60413,2.40658,-0.645265,-3.32217,0.987715,2.60908,1.94117,-0.424246,2.85508,2.71473,4.88469 10167.3,10148.7,9234.04,-1.71112,-2.89318,3.67043,1.66277,3.35424,4.57631,10.1924,-0.35173,1.35064,-5.80931,-1.82085,3.64176,4.57117,2.2882,0.924739,-2.41648,2.22467,2.19365,5.80375,-0.426137,-2.32705,-0.919332,2.09081,-2.34116,-2.25007,1.71251,3.40172,3.5108 10165.7,10149.1,9229.23,-1.45001,-3.05548,2.45599,-0.349391,3.71978,4.53119,5.144,-0.0754888,2.20722,-6.90377,0.948441,2.13514,3.08117,1.83942,2.86791,-0.010419,2.66035,5.23219,5.6626,-0.804354,-2.37724,-1.67323,0.673861,-3.53649,-1.59081,1.76997,2.75549,2.29186 10167.4,10147.1,9226.8,-1.49928,-2.70714,1.88393,-0.842721,-0.225431,3.25531,1.41947,0.140255,3.21042,-3.88608,1.41104,1.86088,-0.091131,0.642157,1.94581,0.307133,3.18746,6.22574,4.30938,-1.01513,-1.1936,-1.8575,-0.588364,-1.42784,-2.08205,1.85519,1.46316,1.06047 10171.1,10143.9,9226.48,-2.01672,-2.40053,3.06391,-0.0599903,-8.34303,2.94718,-5.04409,-0.199276,4.0892,-3.68083,-0.226057,2.75547,-0.686676,-0.843757,0.670264,-0.458086,3.08212,7.11729,2.84836,0.933537,-1.50789,-1.59001,0.179663,0.0589795,-2.55704,3.42709,0.775783,0.360096 10175,10140.6,9227.89,-1.34782,-2.60865,2.14445,1.39294,-10.3608,4.5868,-8.2559,-1.78039,0.356678,-10.0047,-3.28868,2.87133,1.85333,-3.67234,1.53223,-1.27653,0.113475,6.97877,4.49731,3.38158,-3.24882,-2.09817,-0.213742,-0.816136,-3.92766,4.36792,1.46638,-0.25462 10179,10139.5,9231.01,-0.683001,-1.14693,0.835389,1.45465,-4.93888,6.92044,-3.2459,-1.76518,-2.11784,-11.5638,-3.99539,3.25477,2.97649,-3.54233,2.62301,-0.286071,-1.99677,5.44349,5.35012,2.55683,-3.04093,-1.82791,-1.42661,0.583625,-2.6178,3.43693,2.29735,-0.308687 10185.5,10142.2,9235.77,-0.0852919,0.0218383,0.522022,1.091,-4.00515,-0.71681,-2.72016,-1.24891,-1.4593,-5.53454,-2.81228,2.98724,1.40275,-1.35994,4.37674,1.00841,-2.02092,6.34309,4.01241,0.223476,0.719167,-0.617158,-1.79277,2.19906,-0.00915837,1.60933,1.1106,-0.276707 10194.7,10147.7,9242.28,-0.507821,-1.45713,1.82236,1.06383,0.990703,1.16431,3.40878,-1.35424,0.436421,-3.7364,-2.82733,0.844561,2.18188,1.42103,2.14788,-1.48658,-0.956157,3.31294,2.03859,-1.09837,2.11718,-0.147919,0.113767,0.665977,1.0134,-0.758268,0.662046,1.48327 10202.3,10153,9250.68,-0.953894,-1.28733,1.09826,0.183582,-2.63676,-4.1377,-2.89907,-0.851983,3.07691,-0.452803,-2.18838,0.00930997,2.87142,4.0314,0.911046,-1.55443,1.18147,4.24956,-2.48362,-1.23019,1.72571,2.11001,5.29268,-0.281886,3.31927,-0.100871,1.85826,4.09941 10205.4,10156.4,9259.89,-1.27754,0.134823,0.181405,0.430733,3.94306,1.54036,2.99815,-1.16285,4.70226,-4.24342,-1.81256,1.00154,4.93307,6.24027,-1.59843,-1.48742,2.34844,2.10305,-2.00905,-0.662325,0.626241,1.17997,6.74123,-1.67701,1.35772,0.491316,4.32271,6.53414 10204.9,10157.9,9267.94,0.0906612,2.16352,-0.379486,5.42194,2.73054,2.84047,-1.4914,-1.83181,4.02307,-5.15449,-0.262248,3.79351,5.21678,7.80905,0.384689,1.27337,2.9796,6.90988,1.28339,2.20996,-0.91791,-0.163496,3.78903,-1.75168,-0.655347,2.9127,4.88667,7.66747 10203.5,10159,9273.39,2.81598,1.22437,-0.368556,7.79675,3.42922,7.94279,4.57077,-0.708312,0.0968463,-6.10539,0.906129,5.55489,5.11842,8.21484,-0.0671665,1.22889,2.37144,6.24544,4.97372,3.9233,-2.49967,0.267274,-0.310124,1.09266,-0.410233,4.04567,4.74621,8.0612 10203.2,10162.2,9275.77,5.91857,0.355765,0.897437,11.4606,-3.5509,6.21936,2.57301,-0.0103725,-3.12789,-4.93913,0.601331,6.94209,5.77388,6.93334,1.15761,0.716978,2.28439,10.4648,4.58557,4.39511,-2.76356,2.73426,-1.51427,4.03252,2.99548,5.47757,3.66414,6.66569 10203.5,10167.2,9275.21,3.60261,-0.370029,0.212296,6.53742,-1.17501,1.39057,4.60494,-1.59955,-3.36286,-6.83681,-0.619753,2.05525,7.21718,4.0699,-0.311278,-1.80144,1.07578,6.02142,4.81799,3.05296,-1.94492,1.84126,-1.66326,1.40391,1.77364,2.95825,3.1993,3.61198 10203.2,10169.7,9272.52,1.94895,1.27875,-0.411546,7.45768,-3.75161,0.551798,7.13428,-3.82068,-2.61405,-4.51085,-0.839975,-0.654388,7.59238,3.63367,1.11679,-0.895324,0.0589114,6.72608,0.605615,-0.28023,-1.84675,-0.134175,-0.468956,-1.06577,2.10307,1.19208,2.14254,2.35948 10201,10166,9269.14,-0.454618,0.774031,2.06017,2.8462,-0.622985,0.18548,5.53147,-2.50822,-2.46147,-4.96779,0.0109421,-5.95039,4.88549,1.45711,-1.36876,0.21175,1.58667,0.959389,-1.72767,-0.999701,-1.91612,-0.271218,-0.271307,-3.60937,2.2528,-2.81471,1.29832,0.342989 10196.9,10158.5,9266.51,1.16537,-1.9421,4.60098,6.66208,-8.91079,-4.05041,0.977918,-0.375912,-2.52562,-2.44083,-1.83608,-5.04574,0.870179,-2.88837,0.903319,2.45464,2.77487,7.13809,-7.32993,-2.29902,0.410437,1.61472,1.76486,-2.68616,2.88565,-3.79142,-0.830458,-1.20118 10194.1,10152.5,9265.18,-4.11534,-5.864,4.81522,5.05616,0.145339,-4.93641,2.59855,0.656712,1.10696,-4.83177,-6.68192,-7.2593,-1.01756,-6.50992,-0.623669,0.165413,3.83811,5.84041,-5.84841,-0.103661,1.98729,0.416145,1.34348,-6.16515,-2.67871,-5.57128,-1.65554,-3.26762 10194.1,10148.4,9264.07,-6.59722,-4.92656,-2.01588,3.7417,0.726794,-18.2936,5.15057,-0.276157,1.50739,-0.538248,-8.52874,-4.00362,-4.55022,-5.27015,0.604573,-0.930054,-0.109161,8.19838,-8.17669,-2.1092,4.17484,-1.56197,-1.02102,-5.8341,-5.50376,-1.7134,-2.50895,-3.06608 10193.9,10142,9261.25,-7.62788,-2.98611,1.9356,-1.40885,17.3716,4.06957,22.1809,1.39972,5.64224,-7.94302,-5.59134,-1.45901,0.439725,1.11211,-6.73411,-3.11746,1.4598,-4.78344,-2.09513,-0.404037,0.473396,-4.22587,-2.43839,-5.70551,-5.26427,-0.515338,1.20082,0.113119 10190.4,10132.9,9256.55,-0.061965,0.47587,-3.01478,1.28661,-2.15014,-14.2047,7.89898,0.463674,0.911903,2.0883,-1.64338,3.11185,-2.21723,0.781415,-1.37312,0.396228,-1.38267,3.09944,-1.8496,-1.29836,2.6087,-3.15966,-2.03297,-3.33185,-3.23065,2.92606,0.328003,-0.0324179 10185,10126,9252.36,-0.460313,1.71643,-3.7396,-2.47922,-1.49725,-15.3645,-1.80975,0.715758,-0.981069,-0.691494,-0.794101,-0.106849,-2.08179,-0.30971,-1.53311,0.428815,-0.320026,-0.221114,2.28648,0.175576,3.04606,-1.33911,-0.290353,-5.37868,-3.63253,0.919151,0.306196,-0.421839 10178.6,10124.8,9251.04,-1.00256,1.33259,-4.2472,-1.03971,2.95821,-4.55752,1.84476,0.117356,-4.36831,-4.27268,-1.02576,-0.886254,0.661063,-0.0446314,-0.718596,-0.508343,-2.00182,-0.337999,2.57329,-0.613947,2.18595,0.685998,2.2221,-1.4549,-2.89677,-0.0111036,1.2411,0.83044 10170.8,10127.6,9252.97,-1.71108,0.0714348,-2.91875,-0.0818013,10.0027,5.28964,4.84662,0.115636,-5.97389,-2.97492,0.466922,-1.16018,3.14319,-0.484977,-0.73996,-1.40938,-2.86898,-1.18229,2.85098,1.59393,-0.709864,0.769892,0.0526875,0.667581,-4.09633,-0.130706,2.87503,0.28772 10163.4,10130.8,9256.69,-0.0482655,-0.561906,-4.41924,-1.93638,1.00001,-3.80859,-6.74655,-0.693966,-6.90741,3.83606,-0.443929,0.133173,1.32042,-4.12952,2.21239,-0.401666,-2.83084,1.48444,3.60821,4.7162,0.0479322,1.57325,-2.9423,0.781086,-3.57562,1.01359,1.5974,-1.03302 10159.1,10132.9,9259.9,0.830676,1.38376,-3.59798,1.88876,1.90766,6.33722,1.16568,-1.88109,-5.49532,7.56995,-3.97276,2.47056,-1.10217,-4.02745,0.530141,-1.80729,-2.44923,1.11112,6.04583,5.79514,-1.61378,0.146823,-4.31812,1.65679,-0.82556,0.385538,-1.6035,-0.921055 10159.8,10135.2,9260.63,-0.16576,1.00018,-5.12473,0.442361,0.505831,-5.64864,-2.63413,-2.52592,-5.46478,4.95174,-4.3147,0.782684,-5.73615,-4.82371,0.266276,-1.86669,-4.0481,-1.31822,9.03428,5.18538,0.835431,-1.04748,-4.21294,1.0615,-0.105573,-1.22812,-5.24566,-3.63422 10165.2,10138.1,9258.46,0.205477,-0.680098,-4.46762,5.26891,1.18115,-1.68502,7.13137,-1.22722,-4.01706,-1.7858,-0.511666,3.55446,-3.85553,-2.43205,1.3525,-0.694302,-4.16672,-0.729833,7.26617,2.38627,0.742375,-2.04911,-3.24066,2.72775,2.10783,0.115275,-4.78462,-4.34396 10171.6,10139.6,9254.61,-1.51268,-2.23477,-5.13237,-3.29461,-0.317239,-10.5071,-7.94002,1.87205,-2.15615,-2.57627,4.52526,1.46446,-2.39092,-3.68309,1.44927,1.27351,-2.10555,-3.67494,7.0263,3.64847,0.370668,0.612656,-2.452,4.76347,5.31087,1.21101,-2.18927,-4.86589 10174.6,10139.6,9250.85,-0.380976,0.430706,-4.77251,1.24603,3.57465,-3.14504,-10.8805,1.4131,-3.82203,6.1265,4.05681,1.86576,-2.69539,-3.84931,0.571097,0.0445532,-3.61574,1.0929,5.45496,4.67637,-2.69117,0.376736,-3.44843,8.26613,5.44059,2.39248,-1.35143,-3.43895 10173.2,10141.8,9247.9,-0.967231,0.660605,-0.333774,0.682442,10.1733,9.80472,-4.02844,0.296976,-2.0856,1.70749,0.105393,-0.302007,-2.02762,-1.68176,-2.57321,-1.85542,-2.20576,-3.56605,7.81712,4.57148,-0.717533,0.00661063,0.070936,7.88567,3.00205,-0.188925,-1.30646,-0.417109 10169.8,10147.8,9245.05,1.57911,1.89614,-1.23894,5.44327,1.1255,2.7455,0.888702,-2.69789,-2.29535,1.37374,-2.16695,0.277041,-2.61632,-0.168021,1.19527,-0.966804,-1.39634,2.02717,6.13068,1.74285,2.61838,-0.673957,2.42798,5.71141,1.0237,-0.190537,-2.48355,-0.424022 10166.9,10152.4,9241.4,1.48812,1.56883,0.00439658,-1.99079,-5.3945,-7.45076,-2.79497,-1.09824,0.438405,1.08335,0.567998,-2.12211,0.537132,0.235065,2.13962,0.850241,2.33283,0.11668,5.71046,0.316621,2.37782,1.5783,4.38674,4.44102,2.85837,-0.867284,0.197126,-0.632035 10166,10149.9,9237.21,3.10346,3.20745,-0.0787972,3.26164,-1.99167,1.15174,7.73898,0.388067,-1.3872,7.93093,2.89628,-0.846609,2.95243,1.10786,0.0356645,-0.191303,-1.48335,3.06518,0.833731,-2.48298,-2.62814,-0.329278,-0.0454046,4.84244,1.50962,-0.571214,2.28968,0.0896905 10169.4,10141.9,9233.72,1.54047,2.79665,0.872984,0.435893,0.341067,4.50191,6.31086,2.24353,0.0763229,5.33021,2.30696,-1.94916,2.28551,1.6759,-3.55737,-0.57595,-3.31446,-1.28349,0.109544,-0.911539,-3.08755,0.149125,-2.57658,2.65457,-0.759677,-1.72314,1.73795,1.22082 10175.5,10134.5,9231.85,3.08721,1.31195,-0.463831,-2.78365,-16.0641,-12.4959,-7.90321,1.44639,2.2521,2.09953,-0.628689,0.674957,-0.991746,0.999703,0.501374,1.08647,-1.9555,-0.457535,-1.969,0.140249,0.679574,4.05153,-1.26929,2.9472,1.23177,0.0460567,-1.18548,1.19414 10178.5,10132.3,9231.94,4.8578,-0.156201,-1.83619,3.45539,-10.5983,-4.40534,-3.25278,-1.48511,1.7839,1.07398,-3.79721,3.44697,-0.661031,-0.19397,1.51898,-2.78611,-1.58924,-1.02247,-4.03291,-0.779814,-2.72459,1.42865,-4.44874,1.96164,0.024013,0.769821,-1.68183,-1.09525 10176,10135.5,9234.24,3.98434,-2.9881,-1.82932,-3.45496,-4.37718,-1.32479,-6.81161,0.242295,3.63988,0.773917,-2.92089,1.50769,1.03257,-1.29175,0.607123,-3.32519,0.794345,-7.2134,-4.18473,-2.11878,-3.48641,2.04926,-1.83971,2.5711,1.8547,-0.444122,0.204744,-0.633906 10170.3,10141.1,9238.24,4.5574,-1.21766,-1.92884,-3.3891,-4.53289,-3.61119,-11.1428,0.87067,2.52674,6.28098,-0.916225,0.833349,-0.285056,-2.02874,2.83162,-0.822357,0.836116,-2.02452,-4.36166,-2.46534,-2.40599,3.53798,0.439996,2.8824,2.66576,-0.190266,-0.411649,-0.335746 10164.8,10146.9,9241.73,1.14271,0.21175,2.54403,-5.97996,8.86795,9.92082,0.583279,0.92891,3.1377,1.52082,0.653327,-2.04189,-0.909795,-1.88382,-1.45444,-1.72465,2.94817,-6.9659,0.661566,-0.779148,-2.33549,3.61435,1.90115,-0.709103,0.572663,-2.44443,-1.61985,-1.24632 10161.8,10151.9,9242.42,0.429305,-0.24402,1.54324,-0.758714,1.99988,2.30697,-0.150645,-1.67843,-0.372931,2.68223,0.974669,-2.18675,-3.69726,-3.84373,0.315076,-1.61503,2.02219,-0.439987,1.5067,0.347441,-0.468043,1.85512,2.51346,-3.61534,-1.61311,-1.68631,-4.32277,-3.31289 10160.6,10154.5,9240.5,-1.6783,-2.7916,3.79283,-1.46484,1.8842,7.0456,3.61276,-2.08564,-1.14902,-3.90469,1.00738,-2.71903,-1.12392,-2.56102,-0.564502,-1.26929,2.87817,-3.80446,2.16188,1.69189,-0.17359,-0.806729,4.45158,-4.99401,-1.9224,-2.1335,-3.41399,-1.5215 10158.8,10152.9,9238.94,-1.26294,-1.55708,2.47997,-0.37092,-5.35681,-1.99801,-4.61673,-3.19995,-3.63982,-3.59422,0.268397,-1.15304,1.21312,-1.94008,2.37467,0.463918,1.03699,-0.249188,1.94821,3.1095,0.656428,-1.26258,5.17342,-2.5293,-0.911564,-0.727538,-1.60047,-0.657086 10157.1,10148.4,9241.47,-0.729297,1.90628,1.50273,8.02209,4.5029,7.25435,-0.943104,-3.87229,-5.15977,-0.605295,-0.786266,-0.00624273,3.2036,-0.99694,1.83674,-0.424322,-0.759934,4.69506,3.12589,4.93905,-1.14094,-2.37706,0.896838,-1.15642,-2.07425,-0.341439,0.651623,-1.90525 10159.3,10145.1,9249.53,-3.61489,-0.368775,4.8318,0.654323,13.8953,20.2332,9.01061,0.740005,1.06482,-1.98312,1.43178,-2.39481,5.44965,2.23927,-2.07082,1.84445,3.36316,-2.3874,5.82791,5.13504,0.331121,1.17574,4.11636,2.46863,2.53744,-2.31289,3.73605,1.261 10166.4,10146.2,9260.39,-0.690065,-0.196533,2.57149,3.28245,1.26863,3.07282,2.3288,0.343504,0.7493,7.7189,2.47287,-2.19401,1.83016,1.49389,2.04941,5.57015,1.68587,7.37325,4.33035,3.86901,3.21355,1.31074,4.30838,4.34097,4.14204,-0.792683,1.91579,1.4487 10174.6,10153.3,9268.63,0.973864,0.288282,4.67663,-0.604468,1.35396,1.77193,6.1612,0.928573,3.56181,0.301872,1.61496,-1.94891,1.37811,1.784,-0.829802,4.5252,2.98522,2.05165,3.03006,0.33278,4.9167,0.692046,4.78248,3.89965,4.1223,-1.28055,0.902128,2.44014 10179.4,10165.9,9270.91,0.383028,0.372248,2.91142,5.26445,-4.52355,-0.481389,-1.47582,-0.0802922,4.09074,-3.4789,-1.84054,-0.641665,1.60157,2.15213,-0.406849,1.24052,1.05589,7.69175,-4.79723,-3.42058,1.48542,-2.69221,-0.604027,-2.8823,-1.41943,-0.386671,1.59434,1.71786 10180.9,10180.3,9268.76,-7.39108,-4.07938,1.96913,5.84801,-1.99672,13.1344,-8.45676,2.45664,8.74322,0.00440195,-3.70354,-4.02376,5.09873,7.07674,-2.94009,-6.27334,-2.18896,9.06615,-15.5002,-6.518,-12.659,-9.2251,-8.78964,-16.0646,-15.2285,-1.36974,7.28841,2.96689 \ No newline at end of file diff --git a/nipype/testing/data/jsongrabber.txt b/nipype/testing/data/jsongrabber.txt index 4554d7beb4..c81d99fa2a 100644 --- a/nipype/testing/data/jsongrabber.txt +++ b/nipype/testing/data/jsongrabber.txt @@ -1 +1 @@ -{"param2": 4, "param1": "exampleStr"} \ No newline at end of file +{"param2": 4, "param1": "exampleStr"} diff --git a/nipype/testing/data/lta1.lta b/nipype/testing/data/lta1.lta new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/lta2.lta b/nipype/testing/data/lta2.lta new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/tests/realign_json.json b/nipype/testing/data/realign_json.json similarity index 99% rename from nipype/interfaces/tests/realign_json.json rename to nipype/testing/data/realign_json.json index 12a5b41a0a..5bf1936476 100644 --- a/nipype/interfaces/tests/realign_json.json +++ b/nipype/testing/data/realign_json.json @@ -31,4 +31,4 @@ "write_mask": null, "write_which": null, "write_wrap": null -} \ No newline at end of file +} diff --git a/nipype/testing/data/smri_ants_registration_settings.json b/nipype/testing/data/smri_ants_registration_settings.json index 455a9c6ef1..53f33e33e2 100644 --- a/nipype/testing/data/smri_ants_registration_settings.json +++ b/nipype/testing/data/smri_ants_registration_settings.json @@ -84,7 +84,6 @@ true, true ], - "terminal_output": "stream", "write_composite_transform": true, "initialize_transforms_per_stage": false, "num_threads": 1, @@ -178,4 +177,4 @@ ], "dimension": 3, "collapse_output_transforms": false -} \ No newline at end of file +} diff --git a/nipype/testing/data/spminfo b/nipype/testing/data/spminfo index a24b0a57c4..32317debc4 100644 --- a/nipype/testing/data/spminfo +++ b/nipype/testing/data/spminfo @@ -6,9 +6,9 @@ try, end; spm_path = spm('dir'); fprintf(1, 'NIPYPE %s', spm_path); - + ,catch ME, fprintf(2,'MATLAB code threw an exception:\n'); fprintf(2,'%s\n',ME.message); if length(ME.stack) ~= 0, fprintf(2,'File:%s\nName:%s\nLine:%d\n',ME.stack.file,ME.stack.name,ME.stack.line);, end; -end; \ No newline at end of file +end; diff --git a/nipype/testing/data/struct_to_template.mat b/nipype/testing/data/struct_to_template.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/tbss_dir/do_not_delete.txt b/nipype/testing/data/tbss_dir/do_not_delete.txt index a1df420e34..9c5c450dfa 100644 --- a/nipype/testing/data/tbss_dir/do_not_delete.txt +++ b/nipype/testing/data/tbss_dir/do_not_delete.txt @@ -1 +1 @@ -This file has to be here because git ignores empty folders. \ No newline at end of file +This file has to be here because git ignores empty folders. diff --git a/nipype/testing/fixtures.py b/nipype/testing/fixtures.py index 2a405742f7..550346d1db 100644 --- a/nipype/testing/fixtures.py +++ b/nipype/testing/fixtures.py @@ -39,66 +39,57 @@ def nifti_image_files(outdir, filelist, shape): @pytest.fixture() def create_files_in_directory(request, tmpdir): - outdir = str(tmpdir) - cwd = os.getcwd() - os.chdir(outdir) + cwd = tmpdir.chdir() filelist = ['a.nii', 'b.nii'] - nifti_image_files(outdir, filelist, shape=(3,3,3,4)) + nifti_image_files(tmpdir.strpath, filelist, shape=(3,3,3,4)) def change_directory(): - os.chdir(cwd) + cwd.chdir() request.addfinalizer(change_directory) - return (filelist, outdir) + return (filelist, tmpdir.strpath) @pytest.fixture() def create_analyze_pair_file_in_directory(request, tmpdir): - outdir = str(tmpdir) - cwd = os.getcwd() - os.chdir(outdir) + cwd = tmpdir.chdir() filelist = ['a.hdr'] - analyze_pair_image_files(outdir, filelist, shape=(3, 3, 3, 4)) + analyze_pair_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) def change_directory(): - os.chdir(cwd) + cwd.chdir() request.addfinalizer(change_directory) - return (filelist, outdir) + return (filelist, tmpdir.strpath) @pytest.fixture() def create_files_in_directory_plus_dummy_file(request, tmpdir): - outdir = str(tmpdir) - cwd = os.getcwd() - os.chdir(outdir) + cwd = tmpdir.chdir() filelist = ['a.nii', 'b.nii'] - nifti_image_files(outdir, filelist, shape=(3,3,3,4)) + nifti_image_files(tmpdir.strpath, filelist, shape=(3,3,3,4)) - with open(os.path.join(outdir, 'reg.dat'), 'wt') as fp: - fp.write('dummy file') + tmpdir.join('reg.dat').write('dummy file') filelist.append('reg.dat') def change_directory(): - os.chdir(cwd) + cwd.chdir() request.addfinalizer(change_directory) - return (filelist, outdir) + return (filelist, tmpdir.strpath) @pytest.fixture() def create_surf_file_in_directory(request, tmpdir): - outdir = str(tmpdir) - cwd = os.getcwd() - os.chdir(outdir) + cwd = tmpdir.chdir() surf = 'lh.a.nii' - nifti_image_files(outdir, filelist=surf, shape=(1, 100, 1)) + nifti_image_files(tmpdir.strpath, filelist=surf, shape=(1, 100, 1)) def change_directory(): - os.chdir(cwd) + cwd.chdir() request.addfinalizer(change_directory) - return (surf, outdir) + return (surf, tmpdir.strpath) def set_output_type(fsl_output_type): @@ -115,18 +106,15 @@ def set_output_type(fsl_output_type): @pytest.fixture(params=[None]+list(Info.ftypes)) def create_files_in_directory_plus_output_type(request, tmpdir): func_prev_type = set_output_type(request.param) - - testdir = str(tmpdir) - origdir = os.getcwd() - os.chdir(testdir) + origdir = tmpdir.chdir() filelist = ['a.nii', 'b.nii'] - nifti_image_files(testdir, filelist, shape=(3,3,3,4)) + nifti_image_files(tmpdir.strpath, filelist, shape=(3,3,3,4)) out_ext = Info.output_type_to_ext(Info.output_type()) def fin(): set_output_type(func_prev_type) - os.chdir(origdir) + origdir.chdir() request.addfinalizer(fin) - return (filelist, testdir, out_ext) + return (filelist, tmpdir.strpath, out_ext) diff --git a/nipype/testing/tests/test_utils.py b/nipype/testing/tests/test_utils.py index e2ca3a32de..838c3d167a 100644 --- a/nipype/testing/tests/test_utils.py +++ b/nipype/testing/tests/test_utils.py @@ -17,8 +17,8 @@ def test_tempfatfs(): except (IOError, OSError): warnings.warn("Cannot mount FAT filesystems with FUSE") else: - with fatfs as tmpdir: - assert os.path.exists(tmpdir) + with fatfs as tmp_dir: + assert os.path.exists(tmp_dir) @patch('subprocess.check_call', MagicMock( side_effect=subprocess.CalledProcessError('',''))) diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 42998861e7..15dfe0f447 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -11,40 +11,44 @@ ''' from __future__ import print_function, division, unicode_literals, absolute_import import os -import shutil +import sys import errno +import atexit from warnings import warn -from io import StringIO from distutils.version import LooseVersion -from simplejson import load, dump +import configparser import numpy as np -from builtins import str, object, open +from builtins import bytes, str, object, open +from simplejson import load, dump from future import standard_library -standard_library.install_aliases() -import configparser +from .misc import str2bool from ..external import portalocker +standard_library.install_aliases() + + +CONFIG_DEPRECATIONS = { + 'profile_runtime': ('monitoring.enabled', '1.0'), + 'filemanip_level': ('logging.utils_level', '1.0'), +} NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0') -# Get home directory in platform-agnostic way -homedir = os.path.expanduser('~') -default_cfg = """ +DEFAULT_CONFIG_TPL = """\ [logging] workflow_level = INFO -filemanip_level = INFO +utils_level = INFO interface_level = INFO log_to_file = false -log_directory = %s +log_directory = {log_dir} log_size = 16384000 log_rotate = 4 [execution] create_report = true -crashdump_dir = %s -display_variable = :1 +crashdump_dir = {crashdump_dir} hash_method = timestamp job_finished_timeout = 5 keep_inputs = false @@ -64,11 +68,15 @@ parameterize_dirs = true poll_sleep_duration = 2 xvfb_max_wait = 10 -profile_runtime = false + +[monitoring] +enabled = false +sample_frequency = 1 +summary_append = true [check] interval = 1209600 -""" % (homedir, os.getcwd()) +""".format def mkdir_p(path): @@ -82,39 +90,59 @@ def mkdir_p(path): class NipypeConfig(object): - """Base nipype config class - """ + """Base nipype config class""" def __init__(self, *args, **kwargs): self._config = configparser.ConfigParser() + self._cwd = None + config_dir = os.path.expanduser('~/.nipype') - mkdir_p(config_dir) - old_config_file = os.path.expanduser('~/.nipype.cfg') - new_config_file = os.path.join(config_dir, 'nipype.cfg') - # To be deprecated in two releases - if os.path.exists(old_config_file): - if os.path.exists(new_config_file): - msg = ("Detected presence of both old (%s, used by versions " - "< 0.5.2) and new (%s) config files. This version will " - "proceed with the new one. We advise to merge settings " - "and remove old config file if you are not planning to " - "use previous releases of nipype.") % (old_config_file, - new_config_file) - warn(msg) - else: - warn("Moving old config file from: %s to %s" % (old_config_file, - new_config_file)) - shutil.move(old_config_file, new_config_file) self.data_file = os.path.join(config_dir, 'nipype.json') - self._config.readfp(StringIO(default_cfg)) - self._config.read([new_config_file, old_config_file, 'nipype.cfg']) + + self.set_default_config() + self._display = None + self._resource_monitor = None + + if os.path.exists(config_dir): + self._config.read([os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) + + for option in CONFIG_DEPRECATIONS: + for section in ['execution', 'logging', 'monitoring']: + if self.has_option(section, option): + new_section, new_option = CONFIG_DEPRECATIONS[option][0].split('.') + if not self.has_option(new_section, new_option): + # Warn implicit in get + self.set(new_section, new_option, self.get(section, option)) + + @property + def cwd(self): + """Cache current working directory ASAP""" + # Run getcwd only once, preventing multiproc to finish + # with error having changed to the wrong path + if self._cwd is None: + try: + self._cwd = os.getcwd() + except OSError: + warn('Trying to run Nipype from a nonexistent directory "{}".'.format( + os.getenv('PWD', 'unknown')), RuntimeWarning) + raise + return self._cwd def set_default_config(self): - self._config.readfp(StringIO(default_cfg)) + """Read default settings template and set into config object""" + default_cfg = DEFAULT_CONFIG_TPL( + log_dir=os.path.expanduser('~'), # Get $HOME in a platform-agnostic way + crashdump_dir=self.cwd # Read cached cwd + ) + + try: + self._config.read_string(default_cfg) # Python >= 3.2 + except AttributeError: + from io import StringIO + self._config.readfp(StringIO(default_cfg)) def enable_debug_mode(self): - """Enables debug configuration - """ + """Enables debug configuration""" self._config.set('execution', 'stop_on_first_crash', 'true') self._config.set('execution', 'remove_unnecessary_outputs', 'false') self._config.set('execution', 'keep_inputs', 'true') @@ -129,19 +157,39 @@ def set_log_dir(self, log_dir): """ self._config.set('logging', 'log_directory', log_dir) - def get(self, section, option): - return self._config.get(section, option) + def get(self, section, option, default=None): + """Get an option""" + if option in CONFIG_DEPRECATIONS: + msg = ('Config option "%s" has been deprecated as of nipype %s. Please use ' + '"%s" instead.') % (option, CONFIG_DEPRECATIONS[option][1], + CONFIG_DEPRECATIONS[option][0]) + warn(msg) + section, option = CONFIG_DEPRECATIONS[option][0].split('.') + + if self._config.has_option(section, option): + return self._config.get(section, option) + return default def set(self, section, option, value): + """Set new value on option""" if isinstance(value, bool): value = str(value) + if option in CONFIG_DEPRECATIONS: + msg = ('Config option "%s" has been deprecated as of nipype %s. Please use ' + '"%s" instead.') % (option, CONFIG_DEPRECATIONS[option][1], + CONFIG_DEPRECATIONS[option][0]) + warn(msg) + section, option = CONFIG_DEPRECATIONS[option][0].split('.') + return self._config.set(section, option, value) def getboolean(self, section, option): + """Get a boolean option from section""" return self._config.getboolean(section, option) def has_option(self, section, option): + """Check if option exists in section""" return self._config.has_option(section, option) @property @@ -149,6 +197,7 @@ def _sections(self): return self._config._sections def get_data(self, key): + """Read options file""" if not os.path.exists(self.data_file): return None with open(self.data_file, 'rt') as file: @@ -159,17 +208,23 @@ def get_data(self, key): return None def save_data(self, key, value): + """Store config flie""" datadict = {} if os.path.exists(self.data_file): with open(self.data_file, 'rt') as file: portalocker.lock(file, portalocker.LOCK_EX) datadict = load(file) + else: + dirname = os.path.dirname(self.data_file) + if not os.path.exists(dirname): + mkdir_p(dirname) with open(self.data_file, 'wt') as file: portalocker.lock(file, portalocker.LOCK_EX) datadict[key] = value dump(datadict, file) def update_config(self, config_dict): + """Extend internal dictionary with config_dict""" for section in ['execution', 'logging', 'check']: if section in config_dict: for key, val in list(config_dict[section].items()): @@ -177,9 +232,123 @@ def update_config(self, config_dict): self._config.set(section, key, str(val)) def update_matplotlib(self): + """Set backend on matplotlib from options""" import matplotlib matplotlib.use(self.get('execution', 'matplotlib_backend')) def enable_provenance(self): + """Sets provenance storing on""" self._config.set('execution', 'write_provenance', 'true') self._config.set('execution', 'hash_method', 'content') + + @property + def resource_monitor(self): + """Check if resource_monitor is available""" + if self._resource_monitor is not None: + return self._resource_monitor + + # Cache config from nipype config + self.resource_monitor = str2bool(self._config.get( + 'monitoring', 'enabled')) or False + return self._resource_monitor + + @resource_monitor.setter + def resource_monitor(self, value): + # Accept string true/false values + if isinstance(value, (str, bytes)): + value = str2bool(value.lower()) + + if value is False: + self._resource_monitor = False + elif value is True: + if not self._resource_monitor: + # Before setting self._resource_monitor check psutil availability + self._resource_monitor = False + try: + import psutil + self._resource_monitor = LooseVersion( + psutil.__version__) >= LooseVersion('5.0') + except ImportError: + pass + finally: + if not self._resource_monitor: + warn('Could not enable the resource monitor: psutil>=5.0' + ' could not be imported.') + self._config.set('monitoring', 'enabled', + ('%s' % self._resource_monitor).lower()) + + def enable_resource_monitor(self): + """Sets the resource monitor on""" + self.resource_monitor = True + + def get_display(self): + """Returns the first display available""" + + # Check if an Xorg server is listening + # import subprocess as sp + # if not hasattr(sp, 'DEVNULL'): + # setattr(sp, 'DEVNULL', os.devnull) + # x_listening = bool(sp.call('ps au | grep -v grep | grep -i xorg', + # shell=True, stdout=sp.DEVNULL)) + + if self._display is not None: + return ':%d' % self._display.new_display + + sysdisplay = None + if self._config.has_option('execution', 'display_variable'): + sysdisplay = self._config.get('execution', 'display_variable') + + sysdisplay = sysdisplay or os.getenv('DISPLAY') + if sysdisplay: + from collections import namedtuple + + def _mock(): + pass + + # Store a fake Xvfb object + ndisp = int(sysdisplay.split(':')[-1]) + Xvfb = namedtuple('Xvfb', ['new_display', 'stop']) + self._display = Xvfb(ndisp, _mock) + return sysdisplay + else: + if 'darwin' in sys.platform: + raise RuntimeError( + 'Xvfb requires root permissions to run in OSX. Please ' + 'make sure that an X server is listening and set the ' + 'appropriate config on either $DISPLAY or nipype\'s ' + '"display_variable" config. Valid X servers include ' + 'VNC, XQuartz, or manually started Xvfb.') + + # If $DISPLAY is empty, it confuses Xvfb so unset + if sysdisplay == '': + del os.environ['DISPLAY'] + try: + from xvfbwrapper import Xvfb + except ImportError: + raise RuntimeError( + 'A display server was required, but $DISPLAY is not defined ' + 'and Xvfb could not be imported.') + + self._display = Xvfb(nolisten='tcp') + self._display.start() + + # Older versions of xvfbwrapper used vdisplay_num + if not hasattr(self._display, 'new_display'): + setattr(self._display, 'new_display', + self._display.vdisplay_num) + + return ':%d' % self._display.new_display + + def stop_display(self): + """Closes the display if started""" + if self._display is not None: + from .. import logging + self._display.stop() + logging.getLogger('interface').debug('Closing display (if virtual)') + + +@atexit.register +def free_display(): + """Stop virtual display (if it is up)""" + from .. import config + config.stop_display() diff --git a/nipype/utils/docparse.py b/nipype/utils/docparse.py index ebf52d06d3..0d6bce7d45 100644 --- a/nipype/utils/docparse.py +++ b/nipype/utils/docparse.py @@ -254,6 +254,7 @@ def get_doc(cmd, opt_map, help_flag=None, trap_error=True): """ res = CommandLine('which %s' % cmd.split(' ')[0], + resource_monitor=False, terminal_output='allatonce').run() cmd_path = res.runtime.stdout.strip() if cmd_path == '': @@ -330,6 +331,7 @@ def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True): """ res = CommandLine('which %s' % cmd.split(' ')[0], + resource_monitor=False, terminal_output='allatonce').run() cmd_path = res.runtime.stdout.strip() if cmd_path == '': diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py index 4c18a66f8f..8731aa32eb 100644 --- a/nipype/utils/draw_gantt_chart.py +++ b/nipype/utils/draw_gantt_chart.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -"""Module to draw an html gantt chart from logfile produced by -callback_log.log_nodes_cb() +""" +Module to draw an html gantt chart from logfile produced by +``nipype.utils.profiler.log_nodes_cb()`` """ from __future__ import print_function, division, unicode_literals, absolute_import @@ -11,7 +12,6 @@ import random import datetime import simplejson as json -from dateutil import parser from builtins import str, range, open # Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict from future import standard_library @@ -102,61 +102,14 @@ def log_to_dict(logfile): ''' # Init variables - #keep track of important vars - nodes_list = [] #all the parsed nodes - unifinished_nodes = [] #all start nodes that dont have a finish yet - with open(logfile, 'r') as content: - #read file separating each line - content = content.read() - lines = content.split('\n') - - for l in lines: - #try to parse each line and transform in a json dict. - #if the line has a bad format, just skip - node = None - try: - node = json.loads(l) - except ValueError: - pass - - if not node: - continue - - #if it is a start node, add to unifinished nodes - if 'start' in node: - node['start'] = parser.parse(node['start']) - unifinished_nodes.append(node) - - #if it is end node, look in uninished nodes for matching start - #remove from unifinished list and add to node list - elif 'finish' in node: - node['finish'] = parser.parse(node['finish']) - #because most nodes are small, we look backwards in the unfinished list - for s in range(len(unifinished_nodes)): - aux = unifinished_nodes[s] - #found the end for node start, copy over info - if aux['id'] == node['id'] and aux['name'] == node['name'] \ - and aux['start'] < node['finish']: - node['start'] = aux['start'] - node['duration'] = \ - (node['finish'] - node['start']).total_seconds() - - unifinished_nodes.remove(aux) - nodes_list.append(node) - break - - #finished parsing - #assume nodes without finish didn't finish running. - #set their finish to last node run - last_node = nodes_list[-1] - for n in unifinished_nodes: - n['finish'] = last_node['finish'] - n['duration'] = (n['finish'] - n['start']).total_seconds() - nodes_list.append(n) - - # Return list of nodes - return nodes_list + # read file separating each line + lines = content.readlines() + + nodes_list = [json.loads(l) for l in lines] + + # Return list of nodes + return nodes_list def calculate_resource_timeseries(events, resource): @@ -453,7 +406,7 @@ def generate_gantt_chart(logfile, cores, minute_scale=10, ----- # import logging # import logging.handlers - # from nipype.pipeline.plugins.callback_log import log_nodes_cb + # from nipype.utils.profiler import log_nodes_cb # log_filename = 'callback.log' # logger = logging.getLogger('callback') diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 59b269e943..d87f498d00 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -5,29 +5,31 @@ """ from __future__ import print_function, division, unicode_literals, absolute_import -from builtins import str, bytes, open - -from future import standard_library -standard_library.install_aliases() import sys import pickle -import subprocess +import errno +import subprocess as sp import gzip import hashlib +import locale from hashlib import md5 import os +import os.path as op import re import shutil import posixpath import simplejson as json import numpy as np +from builtins import str, bytes, open + from .. import logging, config from .misc import is_container -from ..interfaces.traits_extension import isdefined +from future import standard_library +standard_library.install_aliases() -fmlogger = logging.getLogger("filemanip") +fmlogger = logging.getLogger('utils') related_filetype_sets = [ @@ -62,21 +64,21 @@ def split_filename(fname): -------- >>> from nipype.utils.filemanip import split_filename >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') - >>> pth # doctest: +ALLOW_UNICODE + >>> pth '/home/data' - >>> fname # doctest: +ALLOW_UNICODE + >>> fname 'subject' - >>> ext # doctest: +ALLOW_UNICODE + >>> ext '.nii.gz' """ special_extensions = [".nii.gz", ".tar.gz"] - pth = os.path.dirname(fname) - fname = os.path.basename(fname) + pth = op.dirname(fname) + fname = op.basename(fname) ext = None for special_ext in special_extensions: @@ -87,10 +89,11 @@ def split_filename(fname): fname = fname[:-ext_len] break if not ext: - fname, ext = os.path.splitext(fname) + fname, ext = op.splitext(fname) return pth, fname, ext + def to_str(value): """ Manipulates ordered dicts before they are hashed (Py2/3 compat.) @@ -102,6 +105,7 @@ def to_str(value): retval = to_str_py27(value) return retval + def to_str_py27(value): """ Encode dictionary for python 2 @@ -119,7 +123,7 @@ def to_str_py27(value): venc = to_str_py27(val) if venc.startswith(("u'", 'u"')): venc = venc[1:] - retval+= entry(kenc, venc) + retval += entry(kenc, venc) retval += '}' return retval @@ -146,6 +150,7 @@ def to_str_py27(value): retval = retval[1:] return retval + def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): """Manipulates path and name of input filename @@ -169,16 +174,22 @@ def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): >>> from nipype.utils.filemanip import fname_presuffix >>> fname = 'foo.nii.gz' - >>> fname_presuffix(fname,'pre','post','/tmp') # doctest: +ALLOW_UNICODE + >>> fname_presuffix(fname,'pre','post','/tmp') '/tmp/prefoopost.nii.gz' + >>> from nipype.interfaces.base import Undefined + >>> fname_presuffix(fname, 'pre', 'post', Undefined) == fname_presuffix(fname, 'pre', 'post') + True + """ pth, fname, ext = split_filename(fname) if not use_ext: ext = '' - if newpath and isdefined(newpath): - pth = os.path.abspath(newpath) - return os.path.join(pth, prefix + fname + suffix + ext) + + # No need for isdefined: bool(Undefined) evaluates to False + if newpath: + pth = op.abspath(newpath) + return op.join(pth, prefix + fname + suffix + ext) def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True): @@ -196,14 +207,14 @@ def hash_rename(filename, hashvalue): """ path, name, ext = split_filename(filename) newfilename = ''.join((name, '_0x', hashvalue, ext)) - return os.path.join(path, newfilename) + return op.join(path, newfilename) def check_forhash(filename): """checks if file has a hash in its filename""" if isinstance(filename, list): filename = filename[0] - path, name = os.path.split(filename) + path, name = op.split(filename) if re.search('(_0x[a-z0-9]{32})', name): hashvalue = re.findall('(_0x[a-z0-9]{32})', name) return True, hashvalue @@ -214,7 +225,7 @@ def check_forhash(filename): def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5): """ Computes hash of a file using 'crypto' module""" hex = None - if os.path.isfile(afile): + if op.isfile(afile): crypto_obj = crypto() with open(afile, 'rb') as fp: while True: @@ -229,7 +240,7 @@ def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5): def hash_timestamp(afile): """ Computes md5 hash of the timestamp of a file """ md5hex = None - if os.path.isfile(afile): + if op.isfile(afile): md5obj = md5() stat = os.stat(afile) md5obj.update(str(stat.st_size).encode()) @@ -248,7 +259,7 @@ def _generate_cifs_table(): On systems without a ``mount`` command, or with no CIFS mounts, returns an empty list. """ - exit_code, output = subprocess.getstatusoutput("mount") + exit_code, output = sp.getstatusoutput("mount") # Not POSIX if exit_code != 0: return [] @@ -323,7 +334,7 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, fmlogger.debug(newfile) if create_new: - while os.path.exists(newfile): + while op.exists(newfile): base, fname, ext = split_filename(newfile) s = re.search('_c[0-9]{4,4}$', fname) i = 0 @@ -353,9 +364,9 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, # copy of file (same hash) (keep) # different file (diff hash) (unlink) keep = False - if os.path.lexists(newfile): - if os.path.islink(newfile): - if all((os.readlink(newfile) == os.path.realpath(originalfile), + if op.lexists(newfile): + if op.islink(newfile): + if all((os.readlink(newfile) == op.realpath(originalfile), not use_hardlink, not copy)): keep = True elif posixpath.samefile(newfile, originalfile): @@ -366,13 +377,13 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, elif hashmethod == 'content': hashfn = hash_infile newhash = hashfn(newfile) - fmlogger.debug("File: %s already exists,%s, copy:%d" % - (newfile, newhash, copy)) + fmlogger.debug('File: %s already exists,%s, copy:%d', newfile, + newhash, copy) orighash = hashfn(originalfile) keep = newhash == orighash if keep: - fmlogger.debug("File: %s already exists, not overwriting, copy:%d" - % (newfile, copy)) + fmlogger.debug('File: %s already exists, not overwriting, copy:%d', + newfile, copy) else: os.unlink(newfile) @@ -383,9 +394,9 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, # ~hardlink & ~symlink => copy if not keep and use_hardlink: try: - fmlogger.debug("Linking File: %s->%s" % (newfile, originalfile)) + fmlogger.debug('Linking File: %s->%s', newfile, originalfile) # Use realpath to avoid hardlinking symlinks - os.link(os.path.realpath(originalfile), newfile) + os.link(op.realpath(originalfile), newfile) except OSError: use_hardlink = False # Disable hardlink for associated files else: @@ -393,7 +404,7 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, if not keep and not copy and os.name == 'posix': try: - fmlogger.debug("Symlinking File: %s->%s" % (newfile, originalfile)) + fmlogger.debug('Symlinking File: %s->%s', newfile, originalfile) os.symlink(originalfile, newfile) except OSError: copy = True # Disable symlink for associated files @@ -402,7 +413,7 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, if not keep: try: - fmlogger.debug("Copying File: %s->%s" % (newfile, originalfile)) + fmlogger.debug('Copying File: %s->%s', newfile, originalfile) shutil.copyfile(originalfile, newfile) except shutil.Error as e: fmlogger.warn(e.message) @@ -412,7 +423,7 @@ def copyfile(originalfile, newfile, copy=False, create_new=False, related_file_pairs = (get_related_files(f, include_this_file=False) for f in (originalfile, newfile)) for alt_ofile, alt_nfile in zip(*related_file_pairs): - if os.path.exists(alt_ofile): + if op.exists(alt_ofile): copyfile(alt_ofile, alt_nfile, copy, hashmethod=hashmethod, use_hardlink=use_hardlink, copy_related_files=False) @@ -437,7 +448,7 @@ def get_related_files(filename, include_this_file=True): if this_type in type_set: for related_type in type_set: if include_this_file or related_type != this_type: - related_files.append(os.path.join(path, name + related_type)) + related_files.append(op.join(path, name + related_type)) if not len(related_files): related_files = [filename] return related_files @@ -509,9 +520,9 @@ def check_depends(targets, dependencies): """ tgts = filename_to_list(targets) deps = filename_to_list(dependencies) - return all(map(os.path.exists, tgts)) and \ - min(map(os.path.getmtime, tgts)) > \ - max(list(map(os.path.getmtime, deps)) + [0]) + return all(map(op.exists, tgts)) and \ + min(map(op.getmtime, tgts)) > \ + max(list(map(op.getmtime, deps)) + [0]) def save_json(filename, data): @@ -596,6 +607,26 @@ def crash2txt(filename, record): fp.write(''.join(record['traceback'])) +def read_stream(stream, logger=None, encoding=None): + """ + Robustly reads a stream, sending a warning to a logger + if some decoding error was raised. + + >>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS + ['A...A', 'B'] + + + """ + default_encoding = encoding or locale.getdefaultlocale()[1] or 'UTF-8' + logger = logger or fmlogger + try: + out = stream.decode(default_encoding) + except UnicodeDecodeError as err: + out = stream.decode(default_encoding, errors='replace') + logger.warning('Error decoding string: %s', err) + return out.splitlines() + + def savepkl(filename, record): if filename.endswith('pklz'): pkl_file = gzip.open(filename, 'wb') @@ -604,6 +635,7 @@ def savepkl(filename, record): pickle.dump(record, pkl_file) pkl_file.close() + rst_levels = ['=', '-', '~', '+'] @@ -624,3 +656,203 @@ def write_rst_dict(info, prefix=''): for key, value in sorted(info.items()): out.append('{}* {} : {}'.format(prefix, key, str(value))) return '\n'.join(out) + '\n\n' + + +def dist_is_editable(dist): + """Is distribution an editable install? + + Parameters + ---------- + dist : string + Package name + + # Borrowed from `pip`'s' API + """ + for path_item in sys.path: + egg_link = op.join(path_item, dist + '.egg-link') + if op.isfile(egg_link): + return True + return False + + +def makedirs(path, exist_ok=False): + """ + Create path, if it doesn't exist. + + Parameters + ---------- + path : output directory to create + + """ + if not exist_ok: # The old makedirs + os.makedirs(path) + return path + + # this odd approach deals with concurrent directory cureation + if not op.exists(op.abspath(path)): + fmlogger.debug("Creating directory %s", path) + try: + os.makedirs(path) + except OSError: + fmlogger.debug("Problem creating directory %s", path) + if not op.exists(path): + raise OSError('Could not create directory %s' % path) + return path + + +def emptydirs(path, noexist_ok=False): + """ + Empty an existing directory, without deleting it. Do not + raise error if the path does not exist and noexist_ok is True. + + Parameters + ---------- + path : directory that should be empty + + """ + fmlogger.debug("Removing contents of %s", path) + + if noexist_ok and not op.exists(path): + return True + + if op.isfile(path): + raise OSError('path "%s" should be a directory' % path) + + try: + shutil.rmtree(path) + except OSError as ex: + elcont = os.listdir(path) + if ex.errno == errno.ENOTEMPTY and not elcont: + fmlogger.warning( + 'An exception was raised trying to remove old %s, but the path ' + 'seems empty. Is it an NFS mount?. Passing the exception.', path) + elif ex.errno == errno.ENOTEMPTY and elcont: + fmlogger.debug('Folder %s contents (%d items).', path, len(elcont)) + raise ex + else: + raise ex + + makedirs(path) + + +def which(cmd, env=None, pathext=None): + """ + Return the path to an executable which would be run if the given + cmd was called. If no cmd would be called, return ``None``. + + Code for Python < 3.3 is based on a code snippet from + http://orip.org/2009/08/python-checking-if-executable-exists-in.html + + """ + + if pathext is None: + pathext = os.getenv('PATHEXT', '').split(os.pathsep) + pathext.insert(0, '') + + path = os.getenv("PATH", os.defpath) + if env and 'PATH' in env: + path = env.get("PATH") + + if sys.version_info >= (3, 3): + for ext in pathext: + filename = shutil.which(cmd + ext, path=path) + if filename: + return filename + return None + + for ext in pathext: + extcmd = cmd + ext + for directory in path.split(os.pathsep): + filename = op.join(directory, extcmd) + if op.exists(filename): + return filename + return None + + +def get_dependencies(name, environ): + """Return library dependencies of a dynamically linked executable + + Uses otool on darwin, ldd on linux. Currently doesn't support windows. + + """ + if sys.platform == 'darwin': + proc = sp.Popen('otool -L `which %s`' % name, + stdout=sp.PIPE, + stderr=sp.PIPE, + shell=True, + env=environ) + elif 'linux' in sys.platform: + proc = sp.Popen('ldd `which %s`' % name, + stdout=sp.PIPE, + stderr=sp.PIPE, + shell=True, + env=environ) + else: + return 'Platform %s not supported' % sys.platform + o, e = proc.communicate() + return o.rstrip() + + +def canonicalize_env(env): + """Windows requires that environment be dicts with bytes as keys and values + This function converts any unicode entries for Windows only, returning the + dictionary untouched in other environments. + + Parameters + ---------- + env : dict + environment dictionary with unicode or bytes keys and values + + Returns + ------- + env : dict + Windows: environment dictionary with bytes keys and values + Other: untouched input ``env`` + """ + if os.name != 'nt': + return env + + out_env = {} + for key, val in env.items(): + if not isinstance(key, bytes): + key = key.encode('utf-8') + if not isinstance(val, bytes): + val = val.encode('utf-8') + out_env[key] = val + return out_env + + +def relpath(path, start=None): + """Return a relative version of a path""" + + try: + return op.relpath(path, start) + except AttributeError: + pass + + if start is None: + start = os.curdir + if not path: + raise ValueError("no path specified") + start_list = op.abspath(start).split(op.sep) + path_list = op.abspath(path).split(op.sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = op.splitunc(path) + unc_start, rest = op.splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError(("Cannot mix UNC and non-UNC paths " + "(%s and %s)") % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:] + if not rel_list: + return os.curdir + return op.join(*rel_list) diff --git a/nipype/utils/functions.py b/nipype/utils/functions.py new file mode 100644 index 0000000000..aa72d85009 --- /dev/null +++ b/nipype/utils/functions.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +""" +Handles custom functions used in Function interface. Future imports +are avoided to keep namespace as clear as possible. +""" +from builtins import next, str +from future.utils import raise_from +import inspect +from textwrap import dedent + +def getsource(function): + """Returns the source code of a function""" + return dedent(inspect.getsource(function)) + + +def create_function_from_source(function_source, imports=None): + """Return a function object from a function source + + Parameters + ---------- + function_source : unicode string + unicode string defining a function + imports : list of strings + list of import statements in string form that allow the function + to be executed in an otherwise empty namespace + """ + ns = {} + import_keys = [] + + try: + if imports is not None: + for statement in imports: + exec(statement, ns) + import_keys = list(ns.keys()) + exec(function_source, ns) + + except Exception as e: + msg = 'Error executing function\n{}\n'.format(function_source) + msg += ("Functions in connection strings have to be standalone. " + "They cannot be declared either interactively or inside " + "another function or inline in the connect string. Any " + "imports should be done inside the function.") + raise_from(RuntimeError(msg), e) + ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) + assert len(ns_funcs) == 1, "Function or inputs are ill-defined" + func = ns[ns_funcs[0]] + return func diff --git a/nipype/utils/logger.py b/nipype/utils/logger.py index b30b50bc72..2bdc54c791 100644 --- a/nipype/utils/logger.py +++ b/nipype/utils/logger.py @@ -6,6 +6,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: import logging +from warnings import warn import os import sys from .misc import str2bool @@ -15,7 +16,6 @@ RFHandler except ImportError: # Next 2 lines are optional: issue a warning to the user - from warnings import warn warn("ConcurrentLogHandler not installed. Using builtin log handler") from logging.handlers import RotatingFileHandler as RFHandler @@ -33,10 +33,12 @@ def __init__(self, config): stream=sys.stdout) # logging.basicConfig(stream=sys.stdout) self._logger = logging.getLogger('workflow') + self._utlogger = logging.getLogger('utils') self._fmlogger = logging.getLogger('filemanip') self._iflogger = logging.getLogger('interface') self.loggers = {'workflow': self._logger, + 'utils': self._utlogger, 'filemanip': self._fmlogger, 'interface': self._iflogger} self._hdlr = None @@ -53,15 +55,17 @@ def enable_file_logging(self): formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) hdlr.setFormatter(formatter) self._logger.addHandler(hdlr) - self._fmlogger.addHandler(hdlr) + self._utlogger.addHandler(hdlr) self._iflogger.addHandler(hdlr) + self._fmlogger.addHandler(hdlr) self._hdlr = hdlr def disable_file_logging(self): if self._hdlr: self._logger.removeHandler(self._hdlr) - self._fmlogger.removeHandler(self._hdlr) + self._utlogger.removeHandler(self._hdlr) self._iflogger.removeHandler(self._hdlr) + self._fmlogger.removeHandler(self._hdlr) self._hdlr = None def update_logging(self, config): @@ -69,14 +73,17 @@ def update_logging(self, config): self.disable_file_logging() self._logger.setLevel(logging.getLevelName(config.get('logging', 'workflow_level'))) - self._fmlogger.setLevel(logging.getLevelName(config.get('logging', - 'filemanip_level'))) + self._utlogger.setLevel(logging.getLevelName(config.get('logging', + 'utils_level'))) self._iflogger.setLevel(logging.getLevelName(config.get('logging', 'interface_level'))) if str2bool(config.get('logging', 'log_to_file')): self.enable_file_logging() def getLogger(self, name): + if name == 'filemanip': + warn('The "filemanip" logger has been deprecated and replaced by ' + 'the "utils" logger as of nipype 1.0') if name in self.loggers: return self.loggers[name] return None @@ -90,42 +97,7 @@ def logdebug_dict_differences(self, dold, dnew, prefix=""): typical use -- log difference for hashed_inputs """ - # First check inputs, since they usually are lists of tuples - # and dicts are required. - if isinstance(dnew, list): - dnew = dict(dnew) - if isinstance(dold, list): - dold = dict(dold) - - # Compare against hashed_inputs - # Keys: should rarely differ - new_keys = set(dnew.keys()) - old_keys = set(dold.keys()) - if len(new_keys - old_keys): - self._logger.debug("%s not previously seen: %s" - % (prefix, new_keys - old_keys)) - if len(old_keys - new_keys): - self._logger.debug("%s not presently seen: %s" - % (prefix, old_keys - new_keys)) - - # Values in common keys would differ quite often, - # so we need to join the messages together - msgs = [] - for k in new_keys.intersection(old_keys): - same = False - try: - new, old = dnew[k], dold[k] - same = new == old - if not same: - # Since JSON does not discriminate between lists and - # tuples, we might need to cast them into the same type - # as the last resort. And lets try to be more generic - same = old.__class__(new) == old - except Exception as e: - same = False - if not same: - msgs += ["%s: %r != %r" - % (k, dnew[k], dold[k])] - if len(msgs): - self._logger.debug("%s values differ in fields: %s" % (prefix, - ", ".join(msgs))) + from .misc import dict_diff + self._logger.warning("logdebug_dict_differences has been deprecated, please use " + "nipype.utils.misc.dict_diff.") + self._logger.debug(dict_diff(dold, dnew)) diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py index 552e24c435..0d5942940a 100644 --- a/nipype/utils/misc.py +++ b/nipype/utils/misc.py @@ -3,20 +3,30 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Miscellaneous utility functions """ -from __future__ import print_function, division, unicode_literals, absolute_import -from future import standard_library -standard_library.install_aliases() +from __future__ import print_function, unicode_literals, division, absolute_import from builtins import next, str -from future.utils import raise_from import sys import re from collections import Iterator -import inspect from distutils.version import LooseVersion -from textwrap import dedent + import numpy as np +from future.utils import raise_from +from future import standard_library +try: + from textwrap import indent as textwrap_indent +except ImportError: + def textwrap_indent(text, prefix): + """ A textwrap.indent replacement for Python < 3.3 """ + if not prefix: + return text + splittext = text.splitlines(True) + return prefix + prefix.join(splittext) + +standard_library.install_aliases() + def human_order_sorted(l): """Sorts string in human order (i.e. 'stat10' will go after 'stat2')""" @@ -66,47 +76,6 @@ def trim(docstring, marker=None): return '\n'.join(trimmed) -def getsource(function): - """Returns the source code of a function""" - src = dedent(inspect.getsource(function)) - return src - - -def create_function_from_source(function_source, imports=None): - """Return a function object from a function source - - Parameters - ---------- - function_source : pickled string - string in pickled form defining a function - imports : list of strings - list of import statements in string form that allow the function - to be executed in an otherwise empty namespace - """ - ns = {} - import_keys = [] - try: - if imports is not None: - for statement in imports: - exec(statement, ns) - import_keys = list(ns.keys()) - exec(function_source, ns) - - except Exception as e: - msg = '\nError executing function:\n %s\n' % function_source - msg += '\n'.join(["Functions in connection strings have to be standalone.", - "They cannot be declared either interactively or inside", - "another function or inline in the connect string. Any", - "imports should be done inside the function" - ]) - raise_from(RuntimeError(msg), e) - ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) - assert len(ns_funcs) == 1, "Function or inputs are ill-defined" - funcname = ns_funcs[0] - func = ns[funcname] - return func - - def find_indices(condition): "Return the indices where ravel(condition) is true" res, = np.nonzero(np.ravel(condition)) @@ -188,7 +157,7 @@ def package_check(pkg_name, version=None, app=None, checker=LooseVersion, Examples -------- package_check('numpy', '1.3') - package_check('networkx', '1.0', 'tutorial1') + package_check('scipy', '0.7', 'tutorial1') """ @@ -238,11 +207,11 @@ def unflatten(in_list, prev_structure): if not isinstance(prev_structure, list): return next(in_list) - else: - out = [] - for item in prev_structure: - out.append(unflatten(in_list, item)) - return out + + out = [] + for item in prev_structure: + out.append(unflatten(in_list, item)) + return out def normalize_mc_params(params, source): @@ -270,3 +239,57 @@ def normalize_mc_params(params, source): params[-1:2:-1] = aff2euler(matrix) return params + + +def dict_diff(dold, dnew, indent=0): + """Helper to log what actually changed from old to new values of + dictionaries. + + typical use -- log difference for hashed_inputs + """ + # First check inputs, since they usually are lists of tuples + # and dicts are required. + if isinstance(dnew, list): + dnew = dict(dnew) + if isinstance(dold, list): + dold = dict(dold) + + # Compare against hashed_inputs + # Keys: should rarely differ + new_keys = set(dnew.keys()) + old_keys = set(dold.keys()) + + diff = [] + if new_keys - old_keys: + diff += [" * keys not previously seen: %s" % (new_keys - old_keys)] + + if old_keys - new_keys: + diff += [" * keys not presently seen: %s" % (old_keys - new_keys)] + + # Add topical message + if diff: + diff.insert(0, "Dictionaries had differing keys:") + + diffkeys = len(diff) + + # Values in common keys would differ quite often, + # so we need to join the messages together + for k in new_keys.intersection(old_keys): + same = False + try: + new, old = dnew[k], dold[k] + same = new == old + if not same: + # Since JSON does not discriminate between lists and + # tuples, we might need to cast them into the same type + # as the last resort. And lets try to be more generic + same = old.__class__(new) == old + except Exception: + same = False + if not same: + diff += [" * %s: %r != %r" % (k, dnew[k], dold[k])] + + if len(diff) > diffkeys: + diff.insert(diffkeys, "Some dictionary entries had differing values:") + + return textwrap_indent('\n'.join(diff), ' ' * indent) diff --git a/nipype/utils/profiler.py b/nipype/utils/profiler.py new file mode 100644 index 0000000000..800b68a95f --- /dev/null +++ b/nipype/utils/profiler.py @@ -0,0 +1,372 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Utilities to keep track of performance +""" +from __future__ import print_function, division, unicode_literals, absolute_import + +import threading +from time import time +try: + import psutil +except ImportError as exc: + psutil = None + +from builtins import open, range +from .. import config, logging + +proflogger = logging.getLogger('utils') +resource_monitor = config.resource_monitor + +# Init variables +_MB = 1024.0**2 + + +class ResourceMonitor(threading.Thread): + """ + A ``Thread`` to monitor a specific PID with a certain frequence + to a file + """ + + def __init__(self, pid, freq=5, fname=None, python=True): + # Make sure psutil is imported + import psutil + + if freq < 0.2: + raise RuntimeError('Frequency (%0.2fs) cannot be lower than 0.2s' % freq) + + if fname is None: + fname = '.proc-%d_time-%s_freq-%0.2f' % (pid, time(), freq) + self._fname = fname + self._logfile = open(self._fname, 'w') + self._freq = freq + self._python = python + + # Leave process initialized and make first sample + self._process = psutil.Process(pid) + self._sample(cpu_interval=0.2) + + # Start thread + threading.Thread.__init__(self) + self._event = threading.Event() + + @property + def fname(self): + """Get/set the internal filename""" + return self._fname + + def stop(self): + """Stop monitoring""" + if not self._event.is_set(): + self._event.set() + self.join() + self._sample() + self._logfile.flush() + self._logfile.close() + + def _sample(self, cpu_interval=None): + cpu = 0.0 + rss = 0.0 + vms = 0.0 + try: + with self._process.oneshot(): + cpu += self._process.cpu_percent(interval=cpu_interval) + mem_info = self._process.memory_info() + rss += mem_info.rss + vms += mem_info.vms + except psutil.NoSuchProcess: + pass + + # Iterate through child processes and get number of their threads + try: + children = self._process.children(recursive=True) + except psutil.NoSuchProcess: + children = [] + + for child in children: + try: + with child.oneshot(): + cpu += child.cpu_percent() + mem_info = child.memory_info() + rss += mem_info.rss + vms += mem_info.vms + except psutil.NoSuchProcess: + pass + + print('%f,%f,%f,%f' % (time(), cpu, rss / _MB, vms / _MB), + file=self._logfile) + self._logfile.flush() + + def run(self): + """Core monitoring function, called by start()""" + start_time = time() + wait_til = start_time + while not self._event.is_set(): + self._sample() + wait_til += self._freq + self._event.wait(max(0, wait_til - time())) + + +# Log node stats function +def log_nodes_cb(node, status): + """Function to record node run statistics to a log file as json + dictionaries + + Parameters + ---------- + node : nipype.pipeline.engine.Node + the node being logged + status : string + acceptable values are 'start', 'end'; otherwise it is + considered and error + + Returns + ------- + None + this function does not return any values, it logs the node + status info to the callback logger + """ + + if status != 'end': + return + + # Import packages + import logging + import json + + status_dict = { + 'name': node.name, + 'id': node._id, + 'start': getattr(node.result.runtime, 'startTime'), + 'finish': getattr(node.result.runtime, 'endTime'), + 'duration': getattr(node.result.runtime, 'duration'), + 'runtime_threads': getattr( + node.result.runtime, 'cpu_percent', 'N/A'), + 'runtime_memory_gb': getattr( + node.result.runtime, 'mem_peak_gb', 'N/A'), + 'estimated_memory_gb': node.mem_gb, + 'num_threads': node.n_procs, + } + + if status_dict['start'] is None or status_dict['finish'] is None: + status_dict['error'] = True + + # Dump string to log + logging.getLogger('callback').debug(json.dumps(status_dict)) + + +# Get total system RAM +def get_system_total_memory_gb(): + """ + Function to get the total RAM of the running system in GB + """ + + # Import packages + import os + import sys + + # Get memory + if 'linux' in sys.platform: + with open('/proc/meminfo', 'r') as f_in: + meminfo_lines = f_in.readlines() + mem_total_line = [line for line in meminfo_lines + if 'MemTotal' in line][0] + mem_total = float(mem_total_line.split()[1]) + memory_gb = mem_total / (1024.0**2) + elif 'darwin' in sys.platform: + mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1] + memory_gb = float(mem_str) / (1024.0**3) + else: + err_msg = 'System platform: %s is not supported' + raise Exception(err_msg) + + # Return memory + return memory_gb + + +# Get max resources used for process +def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): + """ + Function to get the RAM and threads utilized by a given process + + Parameters + ---------- + pid : integer + the process ID of process to profile + mem_mb : float + the high memory watermark so far during process execution (in MB) + num_threads: int + the high thread watermark so far during process execution + + Returns + ------- + mem_mb : float + the new high memory watermark of process (MB) + num_threads : float + the new high thread watermark of process + """ + + if not resource_monitor: + raise RuntimeError('Attempted to measure resources with option ' + '"monitoring.enabled" set off.') + + try: + mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) + num_threads = max(num_threads, _get_num_threads(pid)) + except Exception as exc: + proflogger.info('Could not get resources used by process.\n%s', exc) + + return mem_mb, num_threads + + +# Get number of threads for process +def _get_num_threads(pid): + """ + Function to get the number of threads a process is using + + Parameters + ---------- + pid : integer + the process ID of process to profile + + Returns + ------- + num_threads : int + the number of threads that the process is using + + """ + + try: + proc = psutil.Process(pid) + # If process is running + if proc.status() == psutil.STATUS_RUNNING: + num_threads = proc.num_threads() + elif proc.num_threads() > 1: + tprocs = [psutil.Process(thr.id) for thr in proc.threads()] + alive_tprocs = [tproc for tproc in tprocs if tproc.status() == psutil.STATUS_RUNNING] + num_threads = len(alive_tprocs) + else: + num_threads = 1 + + child_threads = 0 + # Iterate through child processes and get number of their threads + for child in proc.children(recursive=True): + # Leaf process + if len(child.children()) == 0: + # If process is running, get its number of threads + if child.status() == psutil.STATUS_RUNNING: + child_thr = child.num_threads() + # If its not necessarily running, but still multi-threaded + elif child.num_threads() > 1: + # Cast each thread as a process and check for only running + tprocs = [psutil.Process(thr.id) for thr in child.threads()] + alive_tprocs = [tproc for tproc in tprocs + if tproc.status() == psutil.STATUS_RUNNING] + child_thr = len(alive_tprocs) + # Otherwise, no threads are running + else: + child_thr = 0 + # Increment child threads + child_threads += child_thr + except psutil.NoSuchProcess: + return None + + # Number of threads is max between found active children and parent + num_threads = max(child_threads, num_threads) + + # Return number of threads found + return num_threads + + +# Get ram usage of process +def _get_ram_mb(pid, pyfunc=False): + """ + Function to get the RAM usage of a process and its children + Reference: http://ftp.dev411.com/t/python/python-list/095thexx8g/\ +multiprocessing-forking-memory-usage + + Parameters + ---------- + pid : integer + the PID of the process to get RAM usage of + pyfunc : boolean (optional); default=False + a flag to indicate if the process is a python function; + when Pythons are multithreaded via multiprocess or threading, + children functions include their own memory + parents. if this + is set, the parent memory will removed from children memories + + + Returns + ------- + mem_mb : float + the memory RAM in MB utilized by the process PID + + """ + try: + # Init parent + parent = psutil.Process(pid) + # Get memory of parent + parent_mem = parent.memory_info().rss + mem_mb = parent_mem / _MB + # Iterate through child processes + for child in parent.children(recursive=True): + child_mem = child.memory_info().rss + if pyfunc: + child_mem -= parent_mem + mem_mb += child_mem / _MB + except psutil.NoSuchProcess: + return None + + # Return memory + return mem_mb + + +def _use_cpu(x): + ctr = 0 + while ctr < 1e7: + ctr += 1 + x * x + + +# Spin multiple threads +def _use_resources(n_procs, mem_gb): + """ + Function to execute multiple use_gb_ram functions in parallel + """ + import os + import sys + import psutil + from multiprocessing import Pool + from nipype import logging + from nipype.utils.profiler import _use_cpu + + iflogger = logging.getLogger('interface') + + # Getsize of one character string + BSIZE = sys.getsizeof(' ') - sys.getsizeof(' ') + BOFFSET = sys.getsizeof('') + _GB = 1024.0**3 + + def _use_gb_ram(mem_gb): + """A test function to consume mem_gb GB of RAM""" + num_bytes = int(mem_gb * _GB) + # Eat mem_gb GB of memory for 1 second + gb_str = ' ' * ((num_bytes - BOFFSET) // BSIZE) + assert sys.getsizeof(gb_str) == num_bytes + return gb_str + + # Measure the amount of memory this process already holds + p = psutil.Process(os.getpid()) + mem_offset = p.memory_info().rss / _GB + big_str = _use_gb_ram(mem_gb - mem_offset) + _use_cpu(5) + mem_total = p.memory_info().rss / _GB + del big_str + iflogger.info('[%d] Memory offset %0.2fGB, total %0.2fGB', + os.getpid(), mem_offset, mem_total) + + if n_procs > 1: + pool = Pool(n_procs) + pool.map(_use_cpu, range(n_procs)) + return True diff --git a/nipype/utils/provenance.py b/nipype/utils/provenance.py index 066cbb9a57..c316f67272 100644 --- a/nipype/utils/provenance.py +++ b/nipype/utils/provenance.py @@ -21,7 +21,7 @@ from .. import get_info, logging, __version__ from .filemanip import (md5, hashlib, hash_infile) -iflogger = logging.getLogger('interface') +logger = logging.getLogger('utils') foaf = pm.Namespace("foaf", "http://xmlns.com/foaf/0.1/") dcterms = pm.Namespace("dcterms", "http://purl.org/dc/terms/") nipype_ns = pm.Namespace("nipype", "http://nipy.org/nipype/terms/") @@ -173,7 +173,7 @@ def safe_encode(x, as_literal=True): jsonstr = json.dumps(outdict) except UnicodeDecodeError as excp: jsonstr = "Could not encode dictionary. {}".format(excp) - iflogger.warn('Prov: %s', jsonstr) + logger.warning('Prov: %s', jsonstr) if not as_literal: return jsonstr @@ -203,7 +203,7 @@ def safe_encode(x, as_literal=True): jsonstr = json.dumps(x) except UnicodeDecodeError as excp: jsonstr = "Could not encode list/tuple. {}".format(excp) - iflogger.warn('Prov: %s', jsonstr) + logger.warning('Prov: %s', jsonstr) if not as_literal: return jsonstr @@ -285,9 +285,20 @@ def prov_encode(graph, value, create_container=True): def write_provenance(results, filename='provenance', format='all'): - ps = ProvStore() - ps.add_results(results) - return ps.write_provenance(filename=filename, format=format) + prov = None + try: + ps = ProvStore() + ps.add_results(results) + prov = ps.write_provenance(filename=filename, format=format) + except Exception as e: + import traceback + err_msg = traceback.format_exc() + if getattr(e, 'args'): + err_msg += '\n\nException arguments:\n' + ', '.join(['"%s"' % arg for arg in e.args]) + logger.warning('Writing provenance failed - Exception details:\n%s', err_msg) + + return prov + class ProvStore(object): diff --git a/nipype/utils/spm_docs.py b/nipype/utils/spm_docs.py index 1b7a1a1dc4..3b9942f0af 100644 --- a/nipype/utils/spm_docs.py +++ b/nipype/utils/spm_docs.py @@ -27,7 +27,7 @@ def grab_doc(task_name): """ - cmd = matlab.MatlabCommandLine() + cmd = matlab.MatlabCommand(resource_monitor=False) # We need to tell Matlab where to find our spm_get_doc.m file. cwd = os.path.dirname(__file__) # Build matlab command diff --git a/nipype/utils/spm_flat_config.m b/nipype/utils/spm_flat_config.m index 8e46914667..6e489251b2 100644 --- a/nipype/utils/spm_flat_config.m +++ b/nipype/utils/spm_flat_config.m @@ -36,4 +36,4 @@ else objlist = {objlist{:} astruct}; end -end \ No newline at end of file +end diff --git a/nipype/utils/tests/test_config.py b/nipype/utils/tests/test_config.py new file mode 100644 index 0000000000..7684bdd55e --- /dev/null +++ b/nipype/utils/tests/test_config.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, division, unicode_literals, absolute_import +import os +import sys +import pytest +from nipype import config +from mock import MagicMock +from builtins import object + +try: + import xvfbwrapper + has_Xvfb = True +except ImportError: + has_Xvfb = False + +# Define mocks for xvfbwrapper. Do not forget the spec to ensure that +# hasattr() checks return False with missing attributes. +xvfbpatch = MagicMock(spec=['Xvfb']) +xvfbpatch.Xvfb.return_value = MagicMock(spec=['new_display', 'start', 'stop'], + new_display=2010) + +# Mock the legacy xvfbwrapper.Xvfb class (changed display attribute name) +xvfbpatch_old = MagicMock(spec=['Xvfb']) +xvfbpatch_old.Xvfb.return_value = MagicMock(spec=['vdisplay_num', 'start', 'stop'], + vdisplay_num=2010) + + +@pytest.mark.parametrize('dispnum', range(5)) +def test_display_config(monkeypatch, dispnum): + """Check that the display_variable option is used ($DISPLAY not set)""" + config._display = None + dispstr = ':%d' % dispnum + config.set('execution', 'display_variable', dispstr) + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + assert config.get_display() == config.get('execution', 'display_variable') + # Test that it was correctly cached + assert config.get_display() == config.get('execution', 'display_variable') + + +@pytest.mark.parametrize('dispnum', range(5)) +def test_display_system(monkeypatch, dispnum): + """Check that when only a $DISPLAY is defined, it is used""" + config._display = None + config._config.remove_option('execution', 'display_variable') + dispstr = ':%d' % dispnum + monkeypatch.setitem(os.environ, 'DISPLAY', dispstr) + assert config.get_display() == dispstr + # Test that it was correctly cached + assert config.get_display() == dispstr + + +def test_display_config_and_system(monkeypatch): + """Check that when only both config and $DISPLAY are defined, the config takes precedence""" + config._display = None + dispstr = ':10' + config.set('execution', 'display_variable', dispstr) + monkeypatch.setitem(os.environ, 'DISPLAY', ':0') + assert config.get_display() == dispstr + # Test that it was correctly cached + assert config.get_display() == dispstr + + +def test_display_noconfig_nosystem_patched(monkeypatch): + """Check that when no $DISPLAY nor option are specified, a virtual Xvfb is used""" + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch) + assert config.get_display() == ":2010" + # Test that it was correctly cached + assert config.get_display() == ':2010' + + +def test_display_empty_patched(monkeypatch): + """ + Check that when $DISPLAY is empty string and no option is specified, + a virtual Xvfb is used + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setitem(os.environ, 'DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch) + assert config.get_display() == ':2010' + # Test that it was correctly cached + assert config.get_display() == ':2010' + + +def test_display_noconfig_nosystem_patched_oldxvfbwrapper(monkeypatch): + """ + Check that when no $DISPLAY nor option are specified, + a virtual Xvfb is used (with a legacy version of xvfbwrapper). + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old) + assert config.get_display() == ":2010" + # Test that it was correctly cached + assert config.get_display() == ':2010' + + +def test_display_empty_patched_oldxvfbwrapper(monkeypatch): + """ + Check that when $DISPLAY is empty string and no option is specified, + a virtual Xvfb is used (with a legacy version of xvfbwrapper). + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setitem(os.environ, 'DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old) + assert config.get_display() == ':2010' + # Test that it was correctly cached + assert config.get_display() == ':2010' + + +def test_display_noconfig_nosystem_notinstalled(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is not installed + but necessary (no config and $DISPLAY unset) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', None) + with pytest.raises(RuntimeError): + config.get_display() + + +def test_display_empty_notinstalled(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is not installed + but necessary (no config and $DISPLAY empty) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setitem(os.environ, 'DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', None) + with pytest.raises(RuntimeError): + config.get_display() + + +@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed') +def test_display_noconfig_nosystem_installed(monkeypatch): + """ + Check that actually uses xvfbwrapper when installed (not mocked) + and necessary (no config and $DISPLAY unset) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + newdisp = config.get_display() + assert int(newdisp.split(':')[-1]) > 1000 + # Test that it was correctly cached + assert config.get_display() == newdisp + + +@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed') +def test_display_empty_installed(monkeypatch): + """ + Check that actually uses xvfbwrapper when installed (not mocked) + and necessary (no config and $DISPLAY empty) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setitem(os.environ, 'DISPLAY', '') + newdisp = config.get_display() + assert int(newdisp.split(':')[-1]) > 1000 + # Test that it was correctly cached + assert config.get_display() == newdisp + + +def test_display_empty_macosx(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is necessary + (no config and $DISPLAY unset) but platform is OSX. See + https://github.com/nipy/nipype/issues/1400 + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', '') + + monkeypatch.setattr(sys, 'platform', 'darwin') + with pytest.raises(RuntimeError): + config.get_display() + +def test_cwd_cached(tmpdir): + """Check that changing dirs does not change nipype's cwd""" + oldcwd = config.cwd + tmpdir.chdir() + assert config.cwd == oldcwd diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 9e0f3abb78..d50bef355a 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -6,8 +6,6 @@ import os import time -from tempfile import mkstemp, mkdtemp -import shutil import warnings import pytest @@ -93,7 +91,7 @@ def _temp_analyze_files_prime(tmpdir): orig_hdr = tmpdir.join("orig_prime.hdr") orig_img.open('w+').close() orig_hdr.open('w+').close() - return str(orig_img), str(orig_hdr) + return orig_img.strpath, orig_hdr.strpath def test_copyfile(_temp_analyze_files): @@ -275,15 +273,14 @@ def test_list_to_filename(list, expected): assert x == expected -def test_check_depends(): +def test_check_depends(tmpdir): def touch(fname): with open(fname, 'a'): os.utime(fname, None) - tmpdir = mkdtemp() - dependencies = [os.path.join(tmpdir, str(i)) for i in range(3)] - targets = [os.path.join(tmpdir, str(i)) for i in range(3, 6)] + dependencies = [tmpdir.join(str(i)).strpath for i in range(3)] + targets = [tmpdir.join(str(i)).strpath for i in range(3, 6)] # Targets newer than dependencies for dep in dependencies: @@ -307,13 +304,11 @@ def touch(fname): else: assert False, "Should raise OSError on missing dependency" - shutil.rmtree(tmpdir) - -def test_json(): +def test_json(tmpdir): # Simple roundtrip test of json files, just a sanity check. adict = dict(a='one', c='three', b='two') - fd, name = mkstemp(suffix='.json') + name = tmpdir.join('test.json').strpath save_json(name, adict) # save_json closes the file new_dict = load_json(name) os.unlink(name) diff --git a/nipype/utils/tests/test_functions.py b/nipype/utils/tests/test_functions.py new file mode 100644 index 0000000000..1d9b9dac7a --- /dev/null +++ b/nipype/utils/tests/test_functions.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +import sys +import pytest +from nipype.utils.functions import (getsource, create_function_from_source) + +def _func1(x): + return x**3 + +def test_func_to_str(): + + def func1(x): + return x**2 + + # Should be ok with both functions! + for f in _func1, func1: + f_src = getsource(f) + f_recreated = create_function_from_source(f_src) + assert f(2.3) == f_recreated(2.3) + +def test_func_to_str_err(): + bad_src = "obbledygobbledygook" + with pytest.raises(RuntimeError): create_function_from_source(bad_src) + +def _print_statement(): + try: + exec('print ""') + return True + except SyntaxError: + return False + +def test_func_string(): + def is_string(): + return isinstance('string', str) + + wrapped_func = create_function_from_source(getsource(is_string)) + assert is_string() == wrapped_func() + +@pytest.mark.skipif(sys.version_info[0] > 2, reason="breaks python 3") +def test_func_print_py2(): + wrapped_func = create_function_from_source(getsource(_print_statement)) + assert wrapped_func() diff --git a/nipype/utils/tests/test_misc.py b/nipype/utils/tests/test_misc.py index f2780a584f..1685fd645e 100644 --- a/nipype/utils/tests/test_misc.py +++ b/nipype/utils/tests/test_misc.py @@ -8,9 +8,8 @@ import pytest -from nipype.utils.misc import (container_to_string, getsource, - create_function_from_source, str2bool, flatten, - unflatten) +from nipype.utils.misc import (container_to_string, str2bool, + flatten, unflatten) def test_cont_to_str(): @@ -35,26 +34,6 @@ def test_cont_to_str(): assert (container_to_string(123) == '123') -def _func1(x): - return x**3 - - -def test_func_to_str(): - - def func1(x): - return x**2 - - # Should be ok with both functions! - for f in _func1, func1: - f_src = getsource(f) - f_recreated = create_function_from_source(f_src) - assert f(2.3) == f_recreated(2.3) - -def test_func_to_str_err(): - bad_src = "obbledygobbledygook" - with pytest.raises(RuntimeError): create_function_from_source(bad_src) - - @pytest.mark.parametrize("string, expected", [ ("yes", True), ("true", True), ("t", True), ("1", True), ("no", False), ("false", False), ("n", False), ("f", False), ("0", False) diff --git a/nipype/utils/tests/test_provenance.py b/nipype/utils/tests/test_provenance.py index 270774dcf5..ce35a95aac 100644 --- a/nipype/utils/tests/test_provenance.py +++ b/nipype/utils/tests/test_provenance.py @@ -21,8 +21,7 @@ def test_provenance(): assert 'echo hello' in provn def test_provenance_exists(tmpdir): - tempdir = str(tmpdir) - os.chdir(tempdir) + tmpdir.chdir() from nipype import config from nipype.interfaces.base import CommandLine provenance_state = config.get('execution', 'write_provenance') @@ -31,8 +30,7 @@ def test_provenance_exists(tmpdir): CommandLine('echo hello').run() config.set('execution', 'write_provenance', provenance_state) config.set('execution', 'hash_method', hash_state) - provenance_exists = os.path.exists(os.path.join(tempdir, 'provenance.provn')) - assert provenance_exists + assert tmpdir.join('provenance.provn').check() def test_safe_encode(): a = '\xc3\xa9lg' diff --git a/nipype/utils/tests/use_resources b/nipype/utils/tests/use_resources new file mode 100755 index 0000000000..fd2e860a1a --- /dev/null +++ b/nipype/utils/tests/use_resources @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# +# use_resources + +''' +Python script to use a certain amount of RAM on disk and number of +threads + +Usage: + use_resources -g -p +''' + +# Make main executable +if __name__ == '__main__': + + # Import packages + import argparse + from nipype.utils.profiler import _use_resources + + # Init argparser + parser = argparse.ArgumentParser(description=__doc__) + + # Add arguments + parser.add_argument('-g', '--num_gb', required=True, type=float, + help='Number of GB RAM to use, can be float or int') + parser.add_argument('-p', '--num_threads', required=True, type=int, + help='Number of threads to run in parallel') + + # Parse args + args = parser.parse_args() + _use_resources(args.num_threads, args.num_gb) diff --git a/nipype/workflows/data/ecc.sch b/nipype/workflows/data/ecc.sch index a7de1f2b0b..b9e8d8c3c3 100644 --- a/nipype/workflows/data/ecc.sch +++ b/nipype/workflows/data/ecc.sch @@ -3,7 +3,7 @@ setscale 4 setoption smoothing 6 setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 clear U -clear UA +clear UA clear UB clear US clear UP @@ -53,7 +53,7 @@ clear U setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 sort U -copy U UG +copy U UG # 1mm scale setscale 1 setoption smoothing 2 diff --git a/nipype/workflows/data/hmc.sch b/nipype/workflows/data/hmc.sch index 08f3e76e85..aeabcae29a 100644 --- a/nipype/workflows/data/hmc.sch +++ b/nipype/workflows/data/hmc.sch @@ -2,7 +2,7 @@ setscale 4 setoption smoothing 6 clear U -clear UA +clear UA clear UB clear US clear UP @@ -51,7 +51,7 @@ clear U setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 sort U -copy U UG +copy U UG # 1mm scale setscale 1 setoption smoothing 2 diff --git a/nipype/workflows/dmri/fsl/artifacts.py b/nipype/workflows/dmri/fsl/artifacts.py index cad1e43563..c74cbc18b4 100644 --- a/nipype/workflows/dmri/fsl/artifacts.py +++ b/nipype/workflows/dmri/fsl/artifacts.py @@ -232,7 +232,7 @@ def all_fsl_pipeline(name='fsl_all_correct', outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_mask', 'out_bvec']), name='outputnode') - def _gen_index(in_file): + def gen_index(in_file): import numpy as np import nibabel as nb import os @@ -242,6 +242,9 @@ def _gen_index(in_file): np.savetxt(out_file, np.ones((vols,)).T) return out_file + gen_idx = pe.Node(niu.Function( + input_names=['in_file'], output_names=['out_file'], + function=gen_index), name='gen_index') avg_b0_0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_pre') @@ -272,10 +275,11 @@ def _gen_index(in_file): ('topup.out_fieldcoef', 'in_topup_fieldcoef'), ('topup.out_movpar', 'in_topup_movpar')]), (bet_dwi0, ecc, [('mask_file', 'in_mask')]), + (inputnode, gen_idx, [('in_file', 'in_file')]), (inputnode, ecc, [('in_file', 'in_file'), - (('in_file', _gen_index), 'in_index'), ('in_bval', 'in_bval'), ('in_bvec', 'in_bvec')]), + (gen_idx, ecc, [('out_file', 'in_index')]), (inputnode, rot_bvec, [('in_bvec', 'in_bvec')]), (ecc, rot_bvec, [('out_parameter', 'eddy_params')]), (ecc, avg_b0_1, [('out_corrected', 'in_dwi')]), diff --git a/nipype/workflows/dmri/fsl/tests/test_dti.py b/nipype/workflows/dmri/fsl/tests/test_dti.py index 9a8ed4ca13..7c5a7a4426 100644 --- a/nipype/workflows/dmri/fsl/tests/test_dti.py +++ b/nipype/workflows/dmri/fsl/tests/test_dti.py @@ -9,15 +9,13 @@ import nipype.pipeline.engine as pe import warnings -import tempfile -import shutil from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline from nipype.utils.filemanip import list_to_filename @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_create_bedpostx_pipeline(): +def test_create_bedpostx_pipeline(tmpdir): fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) mask_file = os.path.join(fsl_course_dir, "fdt2/subj1.bedpostX/nodif_brain_mask.nii.gz") @@ -72,7 +70,7 @@ def test_create_bedpostx_pipeline(): test_f1 = pe.Node(util.AssertEqual(), name="mean_f1_test") pipeline = pe.Workflow(name="test_bedpostx") - pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_bedpostx_") + pipeline.base_dir = tmpdir.mkdir("nipype_test_bedpostx_").strpath pipeline.connect([(slice_mask, original_bedpostx, [("roi_file", "mask")]), (slice_mask, nipype_bedpostx, [("roi_file", "inputnode.mask")]), @@ -85,4 +83,3 @@ def test_create_bedpostx_pipeline(): ]) pipeline.run(plugin='Linear') - shutil.rmtree(pipeline.base_dir) diff --git a/nipype/workflows/dmri/fsl/tests/test_epi.py b/nipype/workflows/dmri/fsl/tests/test_epi.py index f7b349b442..eeb36ee409 100644 --- a/nipype/workflows/dmri/fsl/tests/test_epi.py +++ b/nipype/workflows/dmri/fsl/tests/test_epi.py @@ -9,14 +9,12 @@ import nipype.pipeline.engine as pe import warnings -import tempfile -import shutil from nipype.workflows.dmri.fsl.epi import create_eddy_correct_pipeline @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def test_create_eddy_correct_pipeline(): +def test_create_eddy_correct_pipeline(tmpdir): fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) dwi_file = os.path.join(fsl_course_dir, "fdt1/subj1/data.nii.gz") @@ -36,7 +34,7 @@ def test_create_eddy_correct_pipeline(): test = pe.Node(util.AssertEqual(), name="eddy_corrected_dwi_test") pipeline = pe.Workflow(name="test_eddycorrect") - pipeline.base_dir = tempfile.mkdtemp(prefix="nipype_test_eddycorrect_") + pipeline.base_dir = tmpdir.mkdir("nipype_test_eddycorrect_").strpath pipeline.connect([(trim_dwi, original_eddycorrect, [("roi_file", "in_file")]), (trim_dwi, nipype_eddycorrect, [("roi_file", "inputnode.in_file")]), @@ -45,4 +43,3 @@ def test_create_eddy_correct_pipeline(): ]) pipeline.run(plugin='Linear') - shutil.rmtree(pipeline.base_dir) diff --git a/nipype/workflows/dmri/fsl/tests/test_tbss.py b/nipype/workflows/dmri/fsl/tests/test_tbss.py index 20f7331fda..9cf2c9fe50 100644 --- a/nipype/workflows/dmri/fsl/tests/test_tbss.py +++ b/nipype/workflows/dmri/fsl/tests/test_tbss.py @@ -126,7 +126,7 @@ def _tbss_test_helper(estimate_skeleton): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def disabled_tbss_est_skeleton(): +def test_disabled_tbss_est_skeleton(): _tbss_test_helper(True) # this test is disabled until we figure out what is wrong with TBSS in 5.0.9 @@ -134,5 +134,5 @@ def disabled_tbss_est_skeleton(): @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") @pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") -def disabled_tbss_est_skeleton_use_precomputed_skeleton(): +def test_disabled_tbss_est_skeleton_use_precomputed_skeleton(): _tbss_test_helper(False) diff --git a/nipype/workflows/fmri/fsl/preprocess.py b/nipype/workflows/fmri/fsl/preprocess.py index 9660f19c53..aa8ac03673 100644 --- a/nipype/workflows/fmri/fsl/preprocess.py +++ b/nipype/workflows/fmri/fsl/preprocess.py @@ -4,7 +4,6 @@ from __future__ import division import os - from ....interfaces import fsl as fsl # fsl from ....interfaces import utility as util # utility from ....pipeline import engine as pe # pypeline engine @@ -18,6 +17,28 @@ def getthreshop(thresh): return ['-thr %.10f -Tmin -bin' % (0.1 * val[1]) for val in thresh] +def pickrun(files, whichrun): + """pick file from list of files""" + + filemap = {'first': 0, 'last': -1, 'middle': len(files) // 2} + + if isinstance(files, list): + + # whichrun is given as integer + if isinstance(whichrun, int): + return files[whichrun] + # whichrun is given as string + elif isinstance(whichrun, str): + if whichrun not in filemap.keys(): + raise(KeyError, 'Sorry, whichrun must be either integer index' + 'or string in form of "first", "last" or "middle') + else: + return files[filemap[whichrun]] + else: + # in case single file name is given + return files + + def pickfirst(files): if isinstance(files, list): return files[0] @@ -401,7 +422,7 @@ def create_parallelfeat_preproc(name='featpreproc', highpass=True): return featpreproc -def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle'): +def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle', whichrun=0): """Create a FEAT preprocessing workflow with registration to one volume of the first run Parameters @@ -412,6 +433,7 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle') name : name of workflow (default: featpreproc) highpass : boolean (default: True) whichvol : which volume of the first run to register to ('first', 'middle', 'last', 'mean') + whichrun : which run to draw reference volume from (integer index or 'first', 'middle', 'last') Inputs:: @@ -511,8 +533,8 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle') if whichvol != 'mean': extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), iterfield=['in_file'], - name='extractref') - featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') + name = 'extractref') + featpreproc.connect(img2float, ('out_file', pickrun, whichrun), extract_ref, 'in_file') featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), extract_ref, 't_min') featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') @@ -530,7 +552,7 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle') featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') else: motion_correct.inputs.mean_vol = True - featpreproc.connect(motion_correct, ('mean_img', pickfirst), outputnode, 'reference') + featpreproc.connect(motion_correct, ('mean_img', pickrun, whichrun), outputnode, 'reference') featpreproc.connect(motion_correct, 'par_file', outputnode, 'motion_parameters') featpreproc.connect(motion_correct, 'out_file', outputnode, 'realigned_files') @@ -550,10 +572,9 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle') Extract the mean volume of the first functional run """ - meanfunc = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean', - suffix='_mean'), + meanfunc = pe.Node(interface=fsl.ImageMaths(op_string = '-Tmean', suffix='_mean'), name='meanfunc') - featpreproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file') + featpreproc.connect(motion_correct, ('out_file', pickrun, whichrun), meanfunc, 'in_file') """ Strip the skull from the mean functional to generate a mask @@ -699,7 +720,7 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle') iterfield=['in_file'], name='meanfunc3') - featpreproc.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file') + featpreproc.connect(meanscale, ('out_file', pickrun, whichrun), meanfunc3, 'in_file') featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') """ @@ -749,7 +770,7 @@ def create_susan_smooth(name="susan_smooth", separate_masks=True): Inputs:: inputnode.in_files : functional runs (filename or list of filenames) - inputnode.fwhm : fwhm for smoothing with SUSAN + inputnode.fwhm : fwhm for smoothing with SUSAN (float or list of floats) inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing) Outputs:: @@ -766,6 +787,19 @@ def create_susan_smooth(name="susan_smooth", separate_masks=True): >>> smooth.run() # doctest: +SKIP """ + # replaces the functionality of a "for loop" + def cartesian_product(fwhms, in_files, usans, btthresh): + from nipype.utils.filemanip import filename_to_list + # ensure all inputs are lists + in_files = filename_to_list(in_files) + fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms + # create cartesian product lists (s_ = single element of list) + cart_in_file = [s_in_file for s_in_file in in_files for s_fwhm in fwhms] + cart_fwhm = [s_fwhm for s_in_file in in_files for s_fwhm in fwhms] + cart_usans = [s_usans for s_usans in usans for s_fwhm in fwhms] + cart_btthresh = [s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms] + + return cart_in_file, cart_fwhm, cart_usans, cart_btthresh susan_smooth = pe.Workflow(name=name) @@ -785,8 +819,15 @@ def create_susan_smooth(name="susan_smooth", separate_masks=True): functional """ + multi_inputs = pe.Node(util.Function(function=cartesian_product, + output_names=['cart_in_file', + 'cart_fwhm', + 'cart_usans', + 'cart_btthresh']), + name='multi_inputs') + smooth = pe.MapNode(interface=fsl.SUSAN(), - iterfield=['in_file', 'brightness_threshold', 'usans'], + iterfield=['in_file', 'brightness_threshold', 'usans', 'fwhm'], name='smooth') """ @@ -843,10 +884,17 @@ def create_susan_smooth(name="susan_smooth", separate_masks=True): """ Define a function to get the brightness threshold for SUSAN """ - susan_smooth.connect(inputnode, 'fwhm', smooth, 'fwhm') - susan_smooth.connect(inputnode, 'in_files', smooth, 'in_file') - susan_smooth.connect(median, ('out_stat', getbtthresh), smooth, 'brightness_threshold') - susan_smooth.connect(merge, ('out', getusans), smooth, 'usans') + + susan_smooth.connect([ + (inputnode, multi_inputs, [('in_files', 'in_files'), + ('fwhm', 'fwhms')]), + (median, multi_inputs, [(('out_stat', getbtthresh), 'btthresh')]), + (merge, multi_inputs, [(('out', getusans), 'usans')]), + (multi_inputs, smooth, [('cart_in_file', 'in_file'), + ('cart_fwhm', 'fwhm'), + ('cart_btthresh', 'brightness_threshold'), + ('cart_usans', 'usans')]), + ]) outputnode = pe.Node(interface=util.IdentityInterface(fields=['smoothed_files']), name='outputnode') diff --git a/nipype/workflows/fmri/fsl/tests/test_preprocess.py b/nipype/workflows/fmri/fsl/tests/test_preprocess.py new file mode 100644 index 0000000000..4f382bdc1a --- /dev/null +++ b/nipype/workflows/fmri/fsl/tests/test_preprocess.py @@ -0,0 +1,25 @@ +__author__ = 'oliver' + +from ..preprocess import create_featreg_preproc, pickrun + + +def test_pickrun(): + files = ['1', '2', '3', '4'] + assert pickrun(files, 0) == '1' + assert pickrun(files, 'first') == '1' + assert pickrun(files, -1) == '4' + assert pickrun(files, 'last') == '4' + assert pickrun(files, 'middle') == '3' + + +def test_create_featreg_preproc(): + """smoke test""" + wf = create_featreg_preproc(whichrun=0) + + # test type + import nipype + assert type(wf) == nipype.pipeline.engine.Workflow + + # test methods + assert wf.get_node('extractref') + assert wf._get_dot() diff --git a/nipype/workflows/fmri/spm/preprocess.py b/nipype/workflows/fmri/spm/preprocess.py index 384284434d..1a8b8cddee 100644 --- a/nipype/workflows/fmri/spm/preprocess.py +++ b/nipype/workflows/fmri/spm/preprocess.py @@ -8,7 +8,6 @@ from ....interfaces import spm as spm from ....interfaces import utility as niu from ....pipeline import engine as pe -from ....interfaces.matlab import no_matlab from ...smri.freesurfer.utils import create_getmask_flow from .... import logging @@ -141,7 +140,8 @@ def create_vbm_preproc(name='vbmpreproc'): >>> preproc = create_vbm_preproc() >>> preproc.inputs.inputspec.fwhm = 8 - >>> preproc.inputs.inputspec.structural_files = [os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] + >>> preproc.inputs.inputspec.structural_files = [ + ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] >>> preproc.inputs.inputspec.template_prefix = 'Template' >>> preproc.run() # doctest: +SKIP @@ -185,7 +185,9 @@ def getclass1images(class_images): class1images.extend(session[0]) return class1images - workflow.connect(dartel_template, ('segment.native_class_images', getclass1images), norm2mni, 'apply_to_files') + workflow.connect( + dartel_template, ('segment.native_class_images', getclass1images), + norm2mni, 'apply_to_files') workflow.connect(inputnode, 'fwhm', norm2mni, 'fwhm') def compute_icv(class_images): @@ -217,10 +219,11 @@ def compute_icv(class_images): "icv" ]), name="outputspec") - workflow.connect([(dartel_template, outputnode, [('outputspec.template_file', 'template_file')]), - (norm2mni, outputnode, [("normalized_files", "normalized_files")]), - (calc_icv, outputnode, [("icv", "icv")]), - ]) + workflow.connect([ + (dartel_template, outputnode, [('outputspec.template_file', 'template_file')]), + (norm2mni, outputnode, [("normalized_files", "normalized_files")]), + (calc_icv, outputnode, [("icv", "icv")]), + ]) return workflow @@ -233,7 +236,8 @@ def create_DARTEL_template(name='dartel_template'): ------- >>> preproc = create_DARTEL_template() - >>> preproc.inputs.inputspec.structural_files = [os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] + >>> preproc.inputs.inputspec.structural_files = [ + ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] >>> preproc.inputs.inputspec.template_prefix = 'Template' >>> preproc.run() # doctest: +SKIP @@ -259,24 +263,34 @@ def create_DARTEL_template(name='dartel_template'): name='segment') workflow.connect(inputnode, 'structural_files', segment, 'channel_files') - version = spm.Info.version() - if version: - spm_path = version['path'] - if version['name'] == 'SPM8': - tissue1 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 1), 2, (True, True), (False, False)) - tissue2 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 2), 2, (True, True), (False, False)) - tissue3 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 3), 2, (True, False), (False, False)) - tissue4 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 4), 3, (False, False), (False, False)) - tissue5 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 5), 4, (False, False), (False, False)) - tissue6 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 6), 2, (False, False), (False, False)) - elif version['name'] == 'SPM12': - spm_path = version['path'] + spm_info = spm.Info.getinfo() + if spm_info: + spm_path = spm_info['path'] + if spm_info['name'] == 'SPM8': + tissue1 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 1), + 2, (True, True), (False, False)) + tissue2 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 2), + 2, (True, True), (False, False)) + tissue3 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 3), + 2, (True, False), (False, False)) + tissue4 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 4), + 3, (False, False), (False, False)) + tissue5 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 5), + 4, (False, False), (False, False)) + tissue6 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 6), + 2, (False, False), (False, False)) + elif spm_info['name'] == 'SPM12': + spm_path = spm_info['path'] tissue1 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 1), 1, (True, True), (False, False)) tissue2 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 2), 1, (True, True), (False, False)) - tissue3 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 3), 2, (True, False), (False, False)) - tissue4 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 4), 3, (False, False), (False, False)) - tissue5 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 5), 4, (False, False), (False, False)) - tissue6 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 6), 2, (False, False), (False, False)) + tissue3 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 3), + 2, (True, False), (False, False)) + tissue4 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 4), + 3, (False, False), (False, False)) + tissue5 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 5), + 4, (False, False), (False, False)) + tissue6 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 6), + 2, (False, False), (False, False)) else: logger.critical('Unsupported version of SPM') diff --git a/nipype/workflows/rsfmri/fsl/resting.py b/nipype/workflows/rsfmri/fsl/resting.py index 01da5014df..a4dc1db5af 100644 --- a/nipype/workflows/rsfmri/fsl/resting.py +++ b/nipype/workflows/rsfmri/fsl/resting.py @@ -120,7 +120,7 @@ def create_resting_preproc(name='restpreproc', base_dir=None): name='getthreshold') threshold_stddev = pe.Node(fsl.Threshold(), name='threshold') compcor = pe.Node(confounds.ACompCor(components_file="noise_components.txt", - use_regress_poly=False), + pre_filter=False), name='compcor') remove_noise = pe.Node(fsl.FilterRegressor(filter_all=True), name='remove_noise') diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py index 7ae4483b55..68e62d7ee8 100644 --- a/nipype/workflows/rsfmri/fsl/tests/test_resting.py +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -51,7 +51,7 @@ class TestResting(): @pytest.fixture(autouse=True) def setup_class(self, tmpdir): # setup temp folder - os.chdir(str(tmpdir)) + tmpdir.chdir() self.in_filenames = {key: os.path.abspath(value) for key, value in self.in_filenames.items()} diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 6247b04cca..0000000000 --- a/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -norecursedirs = .git build dist doc nipype/external tools examples src -addopts = --doctest-modules \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ce2adf9d09..a5ac0a5683 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,17 @@ -numpy>=1.6.2 +numpy>=1.9.0 scipy>=0.11 -networkx>=1.7 +networkx>=1.9 traits>=4.6 python-dateutil>=1.5 nibabel>=2.1.0 future>=0.16.0 simplejson>=3.8.0 -prov>=1.5.0 +prov==1.5.0 click>=6.6.0 funcsigs configparser pytest>=3.0 mock pydotplus +pydot>=1.2.3 packaging diff --git a/rtd_requirements.txt b/rtd_requirements.txt index 1ee6c766ac..b36047b653 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -1,17 +1,18 @@ numpy>=1.6.2 scipy>=0.11 -networkx>=1.7 +networkx>=1.9 traits>=4.6 python-dateutil>=1.5 nibabel>=2.1.0 future>=0.16.0 simplejson>=3.8.0 -prov>=1.5.0 +prov==1.5.0 funcsigs configparser pytest>=3.0 mock pydotplus +pydot>=1.2.3 psutil matplotlib packaging diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..3c6e79cf31 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/setup.py b/setup.py index 331fa5905b..012efd2722 100755 --- a/setup.py +++ b/setup.py @@ -100,15 +100,17 @@ def main(): pjoin('testing', 'data', 'dicomdir', '*'), pjoin('testing', 'data', 'bedpostxout', '*'), pjoin('testing', 'data', 'tbss_dir', '*'), - pjoin('testing', 'data', 'brukerdir', '*'), - pjoin('testing', 'data', 'brukerdir', 'pdata', '*'), + pjoin('testing', 'data', 'brukerdir', 'fid'), pjoin('testing', 'data', 'brukerdir', 'pdata', '1', '*'), + pjoin('testing', 'data', 'ds005', '*'), + pjoin('testing', 'data', 'realign_json.json'), pjoin('workflows', 'data', '*'), pjoin('pipeline', 'engine', 'report_template.html'), pjoin('external', 'd3.js'), - pjoin('interfaces', 'script_templates', '*'), - pjoin('interfaces', 'tests', 'realign_json.json'), + pjoin('interfaces', 'fsl', 'model_templates', '*'), pjoin('interfaces', 'tests', 'use_resources'), + 'pytest.ini', + 'conftest.py', ] # Python 3: use a locals dictionary diff --git a/tools/apigen.py b/tools/apigen.py index d3a732d881..c594042f71 100644 --- a/tools/apigen.py +++ b/tools/apigen.py @@ -103,11 +103,11 @@ def set_package_name(self, package_name): def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') - >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" def func(): ") u'func' - >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' - >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py index 1e2227fadf..a798910dcb 100755 --- a/tools/build_interface_docs.py +++ b/tools/build_interface_docs.py @@ -42,7 +42,7 @@ ] docwriter.class_skip_patterns += ['AFNICommand', 'ANTS', - 'FSL', + 'FSLCommand', 'FS', 'Info', '^SPM', diff --git a/tools/interfacedocgen.py b/tools/interfacedocgen.py index 3eb7467c4b..80356d3ded 100644 --- a/tools/interfacedocgen.py +++ b/tools/interfacedocgen.py @@ -124,11 +124,11 @@ def set_package_name(self, package_name): def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') - >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" def func(): ") u'func' - >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' - >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNICODE + >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip()