diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index d700b59665..d0546f627f 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,3 +1,5 @@
+# Sun Jan 1 12:38:02 2023 -0500 - effigies@gmail.com - STY: Run pre-commit config on all files
+d14c1cf282a9c3b19189f490f10c35f5739e24d1
# Thu Dec 29 22:53:17 2022 -0500 - effigies@gmail.com - STY: Reduce array().astype() and similar constructs
bf298113da99079c9c7b5e1690e41879828cd472
# Thu Dec 29 22:32:46 2022 -0500 - effigies@gmail.com - STY: pyupgrade --py37-plus
@@ -9,4 +11,4 @@ bf298113da99079c9c7b5e1690e41879828cd472
# Thu Dec 29 21:32:00 2022 -0500 - effigies@gmail.com - STY: isort
0ab2856cac4d4baae7ab3e2f6d58421db55d807f
# Thu Dec 29 21:30:29 2022 -0500 - effigies@gmail.com - STY: blue
-1a8dd302ff85b1136c81d492509b80e7748339f0
\ No newline at end of file
+1a8dd302ff85b1136c81d492509b80e7748339f0
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..8c884eb2cc
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,22 @@
+exclude: '.*/data/.*'
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+ - id: check-json
+ - id: check-toml
+ - id: check-added-large-files
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-vcs-permalinks
+ - repo: https://github.com/grantjenks/blue
+ rev: v0.9.1
+ hooks:
+ - id: blue
+ - repo: https://github.com/pycqa/isort
+ rev: 5.11.2
+ hooks:
+ - id: isort
diff --git a/Makefile b/Makefile
index 093e177c36..7d4c6666ae 100644
--- a/Makefile
+++ b/Makefile
@@ -38,7 +38,7 @@ RELEASE_VERSION ?= $(SETUPPY_VERSION)
all: build
-build:
+build:
$(PYTHON) setup.py config --noisy
$(PYTHON) setup.py build
@@ -81,6 +81,7 @@ $(WWW_DIR):
.git-blame-ignore-revs:
git log --grep "\[git-blame-ignore-rev\]" --pretty=format:"# %ad - %ae - %s%n%H" \
> .git-blame-ignore-revs
+ echo >> .git-blame-ignore-revs
#
# Tests
diff --git a/Makefile.win b/Makefile.win
index 30f8275311..00c15ea031 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -1,6 +1,6 @@
# Makefile NiBabel under Windows using a standard Python distribution
-installer:
+installer:
# now the installer
python setup.py bdist_wininst
diff --git a/bin/nib-ls b/bin/nib-ls
index f2e447d518..067efb0533 100755
--- a/bin/nib-ls
+++ b/bin/nib-ls
@@ -13,6 +13,5 @@ Output a summary table for neuroimaging files (resolution, dimensionality, etc.)
from nibabel.cmdline.ls import main
-
if __name__ == '__main__':
main()
diff --git a/bin/nib-nifti-dx b/bin/nib-nifti-dx
index b395ee1d9a..2562e0f0d8 100755
--- a/bin/nib-nifti-dx
+++ b/bin/nib-nifti-dx
@@ -7,7 +7,7 @@
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
-""" Print nifti diagnostics for header files """
+"""Print nifti diagnostics for header files"""
from nibabel.cmdline.nifti_dx import main
diff --git a/bin/parrec2nii b/bin/parrec2nii
index 27a1abca05..4a21c6d288 100755
--- a/bin/parrec2nii
+++ b/bin/parrec2nii
@@ -4,6 +4,5 @@
from nibabel.cmdline.parrec2nii import main
-
if __name__ == '__main__':
main()
diff --git a/doc/source/README.txt b/doc/source/README.txt
index 2a3d2647d6..32b5df8c09 100644
--- a/doc/source/README.txt
+++ b/doc/source/README.txt
@@ -16,7 +16,7 @@ Discover available make targets::
make help
Clean up previous build::
-
+
make clean
Build html documentation::
diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html
index be655cc5f4..642bae6738 100644
--- a/doc/source/_templates/indexsidebar.html
+++ b/doc/source/_templates/indexsidebar.html
@@ -19,4 +19,3 @@
Search mailing list archive
-
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1e3d298fdc..04ac32483b 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -42,8 +42,9 @@
try:
import nibabel
except ImportError:
- raise RuntimeError('Need nibabel on Python PATH; consider "make htmldoc" '
- 'from nibabel root directory')
+ raise RuntimeError(
+ 'Need nibabel on Python PATH; consider "make htmldoc" from nibabel root directory'
+ )
# -- General configuration ----------------------------------------------------
@@ -55,23 +56,24 @@
fobj.write(rel['long_description'])
# Load metadata from setup.cfg
-with open(Path("../../pyproject.toml"), 'rb') as fobj:
+with open(Path('../../pyproject.toml'), 'rb') as fobj:
pyproject = tomllib.load(fobj)
-authors = pyproject["project"]["authors"][0]
+authors = pyproject['project']['authors'][0]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.inheritance_diagram',
- 'sphinx.ext.autosummary',
- 'texext.math_dollar', # has to go before numpydoc
- 'numpydoc',
- 'matplotlib.sphinxext.plot_directive',
- ]
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.inheritance_diagram',
+ 'sphinx.ext.autosummary',
+ 'texext.math_dollar', # has to go before numpydoc
+ 'numpydoc',
+ 'matplotlib.sphinxext.plot_directive',
+]
# Autosummary always wants to use a `generated/` directory.
# We generate with `make api-stamp`
@@ -85,13 +87,13 @@
source_suffix = '.rst'
# The encoding of source files.
-#source_encoding = 'utf-8'
+# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
-project = u'NiBabel'
+project = 'NiBabel'
copyright = f"2006-2022, {authors['name']} <{authors['email']}>"
# The version info for the project you're documenting, acts as replacement for
@@ -105,11 +107,11 @@
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-#language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y, %H:%M PDT'
@@ -124,32 +126,32 @@
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# -- Sphinxext configuration --------------------------------------------------
# Set attributes for layout of inheritance diagrams
-inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14,
- ratio='compress')
-inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
- color='dodgerblue1', style='filled')
+inheritance_graph_attrs = dict(rankdir='LR', size='"6.0, 8.0"', fontsize=14, ratio='compress')
+inheritance_node_attrs = dict(
+ shape='ellipse', fontsize=14, height=0.75, color='dodgerblue1', style='filled'
+)
# Flag to show todo items in rendered output
todo_include_todos = True
@@ -168,26 +170,26 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
-#html_title = ''
+# html_title = ''
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@@ -196,31 +198,39 @@
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
html_index = 'index.html'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['localtoc.html', 'relations.html', 'sourcelink.html',
- 'indexsidebar.html', 'searchbox.html', 'reggie.html']}
+html_sidebars = {
+ 'index': [
+ 'localtoc.html',
+ 'relations.html',
+ 'sourcelink.html',
+ 'indexsidebar.html',
+ 'searchbox.html',
+ 'reggie.html',
+ ]
+}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {'index': 'index.html'}
+# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
-#html_use_modindex = True
+# html_use_modindex = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
@@ -228,10 +238,10 @@
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
+# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'nibabeldoc'
@@ -241,34 +251,32 @@
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
+# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
-latex_documents = [
- ('index', 'nibabel.tex', u'NiBabel Documentation', u'NiBabel Authors',
- 'manual')]
+latex_documents = [('index', 'nibabel.tex', 'NiBabel Documentation', 'NiBabel Authors', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_use_modindex = True
+# latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
diff --git a/doc/source/devel/biaps/biap_0001.rst b/doc/source/devel/biaps/biap_0001.rst
index 7cf2b5b0ac..659967b549 100644
--- a/doc/source/devel/biaps/biap_0001.rst
+++ b/doc/source/devel/biaps/biap_0001.rst
@@ -297,4 +297,3 @@ How about the names in the proposal? ``is_proxy``; ``unproxy=True``?
.. vim: ft=rst
-
diff --git a/doc/source/devel/biaps/biap_0003.rst b/doc/source/devel/biaps/biap_0003.rst
index 3b4bdad24e..7abb07efc1 100644
--- a/doc/source/devel/biaps/biap_0003.rst
+++ b/doc/source/devel/biaps/biap_0003.rst
@@ -41,7 +41,7 @@ From `adding nifti extensions`_:
* 4 = NIFTI_ECODE_AFNI = AFNI header attributes: The format of the AFNI
extension in the NIfTI-1.1 format is described at
http://nifti.nimh.nih.gov/nifti-1/AFNIextension1/
-* 6 = NIFTI_ECODE_COMMENT = comment: arbitrary non-NUL ASCII text, with no
+* 6 = NIFTI_ECODE_COMMENT = comment: arbitrary non-NUL ASCII text, with no
additional structure implied
* 8 = NIFTI_ECODE_XCEDE = XCEDE metadata:
http://www.nbirn.net/Resources/Users/Applications/xcede/index.htm
@@ -369,7 +369,7 @@ apply to the Cartesian product of the image axis values. For example, if the
values of ``applies_to`` == ``['slice', 'time']``, and the slice and time axes
in the array are lengths (6, 10) respectively, then the values apply to all
combinations of the 6 possible values for slice indices and the 10 possible
-values for the time indices (ie apply to all 6x10=60 values). The axis metadata
+values for the time indices (ie apply to all 6x10=60 values). The axis metadata
values in this case can be:
* a scalar. The value applies to every combination of (slice, time)
diff --git a/doc/source/devel/biaps/biap_0004.rst b/doc/source/devel/biaps/biap_0004.rst
index d8ac1569af..229025d01a 100644
--- a/doc/source/devel/biaps/biap_0004.rst
+++ b/doc/source/devel/biaps/biap_0004.rst
@@ -16,9 +16,9 @@ nibabel objects and functions.
Motivation
**********
-It is very common to convert source DICOM images to another format, typically
-Nifti, before doing any image processing. The Nifti format is significantly
-easier to work with and has wide spread compatibility. However, the vast
+It is very common to convert source DICOM images to another format, typically
+Nifti, before doing any image processing. The Nifti format is significantly
+easier to work with and has wide spread compatibility. However, the vast
amount of meta data stored in the source DICOM files will be lost.
After implementing this proposal, users will be able to preserve all of the
@@ -32,7 +32,7 @@ private elements. The meta data will then be easily accessible through the
(256, 256, 24, 8)
>>> print nii.get_meta('RepetitionTime')
3500.0
- >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx))
+ >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx))
for idx in xrange(data.shape[-1])]
>>> print echo_times
[16.4, 32.8, 49.2, 65.6, 82.0, 98.4, 114.8, 131.2]
@@ -50,25 +50,25 @@ Overview
********
dcmstack reads a series of DICOM images, works out their relationship in terms
-of slices and volumes, and compiles them into multidimensional volumes. It can
-produce the corresponding data volume and affine, or a Nifti image (with any
+of slices and volumes, and compiles them into multidimensional volumes. It can
+produce the corresponding data volume and affine, or a Nifti image (with any
additional header information set appropriately).
-In the course of the read, dcmstack creates a `DcmMeta` object for
-each input file. This object is an ordered mapping that can contain a copy
-of all the meta data in the DICOM header. By default some filtering is
-applied to reduce the chance of including PHI. The set of DcmMeta objects are
-then merged together in the same order as the image data to create a single
+In the course of the read, dcmstack creates a `DcmMeta` object for
+each input file. This object is an ordered mapping that can contain a copy
+of all the meta data in the DICOM header. By default some filtering is
+applied to reduce the chance of including PHI. The set of DcmMeta objects are
+then merged together in the same order as the image data to create a single
DcmMeta object that summarizes all of the meta data for the series.
-To summarize the meta data, each element is classified based on how the values
-repeat (e.g. const, per_slice, per_volume, etc.). Each element has a name (the
-keyword from the DICOM standard) and one or more values (the number of values
-depends on the classification and the shape of the image). Each classification's
+To summarize the meta data, each element is classified based on how the values
+repeat (e.g. const, per_slice, per_volume, etc.). Each element has a name (the
+keyword from the DICOM standard) and one or more values (the number of values
+depends on the classification and the shape of the image). Each classification's
meta data is stored stored in a separate nested dictionary.
-While creating the Nifti image output, the `DcmMeta` is stored in a
-`DcmMetaExtension` which can be added as a header extension. This extension
+While creating the Nifti image output, the `DcmMeta` is stored in a
+`DcmMetaExtension` which can be added as a header extension. This extension
simply does a JSON encoding directly on the `DcmMeta` object.
When working with these images, it's possible to keep track of the
@@ -78,7 +78,7 @@ slice, and remove information for other slices. Or when merging 3D volumes to
a 4D time series, we want to merge together the meta data too.
At the moment, dcmstack only creates Nifti images. There's no reason that this
-should be so, and the relationship of dcmstack to other spatial images should be
+should be so, and the relationship of dcmstack to other spatial images should be
more flexible.
******
@@ -105,10 +105,10 @@ wrapping the `DcmMeta` in the Extension API?
Status
------
-Resolved. We now have a separate `DcmMeta` object which inherits from
-`OrderedDict` and contains all of the functionality previously in
-`DcmMetaExtension` except those related to acting as a Nifti1Extension.
-The `DcmMetaExtension` now provides just the functionality for being
+Resolved. We now have a separate `DcmMeta` object which inherits from
+`OrderedDict` and contains all of the functionality previously in
+`DcmMetaExtension` except those related to acting as a Nifti1Extension.
+The `DcmMetaExtension` now provides just the functionality for being
a Nifti1Extension.
Keeping track of metadata when manipulating images
@@ -117,13 +117,13 @@ Keeping track of metadata when manipulating images
When slicing images, it is good to be able to keep track of the relevant DICOM
metadata for the particular slice. Or when merging images, it is good to be
able to compile the metadata across slices into the (e.g) volume metadata. Or,
-say, when coregistering an image, it is good to be able to know that the
-metadata that is per-slice no longer directly corresponds to a slice of the
-data array.
+say, when coregistering an image, it is good to be able to know that the
+metadata that is per-slice no longer directly corresponds to a slice of the
+data array.
At the moment, dcmstack deals with this by wrapping the image with DICOM meta
information in `NiftiWrapper` object : see
-https://github.com/moloney/dcmstack/blob/master/src/dcmstack/dcmmeta.py#L1185 .
+https://github.com/moloney/dcmstack/blob/d157741/src/dcmstack/dcmmeta.py#L1232.
This object accepts a Nifti image as input, that usually contains a
`DcmMetaExtension`, and has methods `get_meta` (to get metadata from extension),
`split` (for taking slice specific metadata into the split parts), `meta_valid`
@@ -146,8 +146,8 @@ Put the `DcmMeta` data into the `extra` object that is input to the
Add a `get_meta` method to `SpatialImage` that uses the to-be-defined API of the
`extra` object. Maybe, by default, this would just get keys out of the mapping.
-Define an API for the `extra` object to give back metadata that is potentially
-varying (per slice or volume). We also need a way to populate the `extra` object
+Define an API for the `extra` object to give back metadata that is potentially
+varying (per slice or volume). We also need a way to populate the `extra` object
when loading an image that has an associated `DcmMeta` object.
Use this API to get metadata. Try and make this work with functions outside the
@@ -179,57 +179,57 @@ Add `create_dcmmeta` method to the nibabel DICOM wrapper objects, that can be
specialized for each known DICOM format variation. Put the rules for slice
information etc into each class.
-For the Siemens files, we will need to make a list of elements from the private
-CSA headers that are known to be slice specific. For the multiframe DICOM files
-we should be able to do this in a programmatic manner, since the varying data
-should live in the PerFrameFunctionalSequence DICOM element. Each element that
-is reclassified should be simplified with the `DcmMeta.simplify` method so that
-it can be classified appropriately.
+For the Siemens files, we will need to make a list of elements from the private
+CSA headers that are known to be slice specific. For the multiframe DICOM files
+we should be able to do this in a programmatic manner, since the varying data
+should live in the PerFrameFunctionalSequence DICOM element. Each element that
+is reclassified should be simplified with the `DcmMeta.simplify` method so that
+it can be classified appropriately.
Meta data in nested DICOM sequences can not be independently classified
=======================================================================
-The code for summarizing meta data only works on the top level of key/value
-pairs. Any value that is a nested dataset is treated as a single entity,
-which prevents us from classifying its individual elements differently.
+The code for summarizing meta data only works on the top level of key/value
+pairs. Any value that is a nested dataset is treated as a single entity,
+which prevents us from classifying its individual elements differently.
-In a DICOM data set, any element that is a sequence contains one or more
-nested DICOM data sets. For most MRI images this is not an issue since
-they rarely contain many sequences, and the ones they do are usually small
-and relatively unimportant. However in multiframe DICOM files make heavy
+In a DICOM data set, any element that is a sequence contains one or more
+nested DICOM data sets. For most MRI images this is not an issue since
+they rarely contain many sequences, and the ones they do are usually small
+and relatively unimportant. However in multiframe DICOM files make heavy
use of nested sequences to store data.
Plan
----
-This same issue was solved for the translated Siemens CSA sub headers by
-unpacking each nested dataset by joining the keys from each level with a
-dotted notation. For example, in the `CsaSeries` subheader there is a nested
-`MrPhoenixProtocol` dataset which has an element `ulVersion` so the key we
-use after unpacking is `CsaSeries.MrPhoenixProtocol.ulVersion`.
+This same issue was solved for the translated Siemens CSA sub headers by
+unpacking each nested dataset by joining the keys from each level with a
+dotted notation. For example, in the `CsaSeries` subheader there is a nested
+`MrPhoenixProtocol` dataset which has an element `ulVersion` so the key we
+use after unpacking is `CsaSeries.MrPhoenixProtocol.ulVersion`.
-We can take the same approach for DICOM sequence elements. One additional
+We can take the same approach for DICOM sequence elements. One additional
consideration is that each of these element is actually a list of data sets,
so we would need to add an index number to the key somehow.
-The alternative is to handle nested data sets recursively in the meta data
-summarizing code. This would be fairly complex and you would no longer be
-able to refer to each element with a single string, at least not without
+The alternative is to handle nested data sets recursively in the meta data
+summarizing code. This would be fairly complex and you would no longer be
+able to refer to each element with a single string, at least not without
some mini-language for traversing the nested datasets.
Improving access to varying meta data through the Nifti
=======================================================
-Currently, when accessing varying meta data through the `get_meta` method
+Currently, when accessing varying meta data through the `get_meta` method
you can only get one value at a time::
- >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx))
+ >>> echo_times = [nii.get_meta('EchoTime', (0, 0, 0, idx))
for idx in xrange(data.shape[-1])]
-You can easily get multiple values from the `DcmMeta` object itself, but
-then you lose the capability to automatically check if the meta data is
+You can easily get multiple values from the `DcmMeta` object itself, but
+then you lose the capability to automatically check if the meta data is
valid in relation to the current image.
.. _dcmstack : https://github.com/moloney/dcmstack
-.. _DcmMetaExtension : https://github.com/moloney/dcmstack/blob/master/src/dcmstack/dcmmeta.py#L92
+.. _DcmMetaExtension : https://github.com/moloney/dcmstack/blob/d157741/src/dcmstack/dcmmeta.py#L112
.. vim: ft=rst
diff --git a/doc/source/devel/biaps/biap_0006.rst b/doc/source/devel/biaps/biap_0006.rst
index ad4a0f9b8d..16a3a4833f 100644
--- a/doc/source/devel/biaps/biap_0006.rst
+++ b/doc/source/devel/biaps/biap_0006.rst
@@ -194,7 +194,7 @@ In NIfTI:
We saw above that the MGH format refers to a volume (in our sense) as a
*frame*. ECAT has the same usage - a frame is a 3D volume. The fmristat
software uses frame in the same sense, e.g., `line 32 of example.m
-`_.
+`_.
Unfortunately DICOM appears to use "frame" to mean a 2D slice. For example,
here is the definition of a "multi-frame image"::
diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py
index 76e0dbc641..017f873abf 100644
--- a/doc/source/devel/register_me.py
+++ b/doc/source/devel/register_me.py
@@ -1,7 +1,7 @@
-from os.path import join as pjoin, expanduser, abspath, dirname
-import sys
import configparser as cfp
-
+import sys
+from os.path import abspath, dirname, expanduser
+from os.path import join as pjoin
if sys.platform == 'win32':
HOME_INI = pjoin(expanduser('~'), '_dpkg', 'local.dsource')
diff --git a/doc/source/devel/spm_use.rst b/doc/source/devel/spm_use.rst
index 56c7051696..8c47cd7f5e 100644
--- a/doc/source/devel/spm_use.rst
+++ b/doc/source/devel/spm_use.rst
@@ -16,8 +16,8 @@ their call syntax is the same for example.
>> fname = 'some_image.nii';
>> vol = spm_vol(fname) % the vol struct
-
- vol =
+
+ vol =
fname: 'some_image.nii'
mat: [4x4 double]
@@ -37,7 +37,7 @@ their call syntax is the same for example.
0 0 2 -74
0 0 0 1
- >> help spm_vol
+ >> help spm_vol
Get header information etc for images.
FORMAT V = spm_vol(P)
P - a matrix of filenames.
@@ -84,7 +84,7 @@ their call syntax is the same for example.
>> vol.private
- ans =
+ ans =
NIFTI object: 1-by-1
dat: [91x109x91 file_array]
@@ -113,7 +113,7 @@ Images in SPM are always 3D. Note this behavior:
>> fname = 'functional_01.nii';
>> vol = spm_vol(fname)
- vol =
+ vol =
191x1 struct array with fields:
fname
@@ -160,7 +160,7 @@ Some simple ones:
>> new_vol.fname = new_fname;
>> spm_write_vol(new_vol, img_arr)
- ans =
+ ans =
fname: 'another_image.nii'
mat: [4x4 double]
@@ -262,7 +262,7 @@ world coordinates according to the affine looks like:
53.0000
1.0000
- >> vals = spm_sample_vol(vol, vc(1), vc(2), vc(3), hold_val)
+ >> vals = spm_sample_vol(vol, vc(1), vc(2), vc(3), hold_val)
vals =
diff --git a/doc/source/dicom/dcm2nii_algorithms.rst b/doc/source/dicom/dcm2nii_algorithms.rst
index 809ac51c51..88cd55dfcd 100644
--- a/doc/source/dicom/dcm2nii_algorithms.rst
+++ b/doc/source/dicom/dcm2nii_algorithms.rst
@@ -7,7 +7,7 @@
dcm2nii_ is an open source DICOM_ to nifti_ conversion program, written
by Chris Rorden, in Delphi (object orientated pascal). It's part of
Chris' popular mricron_ collection of programs. The source appears to
-be best found on the `mricron NITRC site`_. It's BSD_ licensed.
+be best found on the `mricron NITRC site`_. It's BSD_ licensed.
.. _mricron NITRC site: https://www.nitrc.org/projects/mricron
diff --git a/doc/source/dicom/derivations/dicom_mosaic.py b/doc/source/dicom/derivations/dicom_mosaic.py
index 074c5491c1..5def2e9490 100644
--- a/doc/source/dicom/derivations/dicom_mosaic.py
+++ b/doc/source/dicom/derivations/dicom_mosaic.py
@@ -1,22 +1,19 @@
-""" Just showing the mosaic simplification """
+"""Just showing the mosaic simplification"""
-from sympy import Matrix, Symbol, symbols, simplify
+from sympy import Matrix, Symbol, simplify, symbols
def numbered_matrix(nrows, ncols, symbol_prefix):
- return Matrix(nrows, ncols, lambda i, j: Symbol(
- symbol_prefix + '_{%d%d}' % (i+1, j+1)))
+ return Matrix(nrows, ncols, lambda i, j: Symbol(symbol_prefix + '_{%d%d}' % (i + 1, j + 1)))
def numbered_vector(nrows, symbol_prefix):
- return Matrix(nrows, 1, lambda i, j: Symbol(
- symbol_prefix + '_{%d}' % (i+1)))
+ return Matrix(nrows, 1, lambda i, j: Symbol(symbol_prefix + '_{%d}' % (i + 1)))
RS = numbered_matrix(3, 3, 'rs')
-mdc, mdr, rdc, rdr = symbols(
- 'md_{cols} md_{rows} rd_{cols} rd_{rows}')
+mdc, mdr, rdc, rdr = symbols('md_{cols} md_{rows} rd_{cols} rd_{rows}')
md_adj = Matrix((mdc - 1, mdr - 1, 0)) / -2
rd_adj = Matrix((rdc - 1, rdr - 1, 0)) / -2
@@ -24,8 +21,6 @@ def numbered_vector(nrows, symbol_prefix):
adj = -(RS * md_adj) + RS * rd_adj
adj.simplify()
-Q = RS[:, :2] * Matrix((
- (mdc - rdc) / 2,
- (mdr - rdr) / 2))
+Q = RS[:, :2] * Matrix(((mdc - rdc) / 2, (mdr - rdr) / 2))
assert simplify(adj - Q) == Matrix([0, 0, 0])
diff --git a/doc/source/dicom/derivations/spm_dicom_orient.py b/doc/source/dicom/derivations/spm_dicom_orient.py
index 936e807ce1..12b3ee99b6 100644
--- a/doc/source/dicom/derivations/spm_dicom_orient.py
+++ b/doc/source/dicom/derivations/spm_dicom_orient.py
@@ -1,4 +1,4 @@
-""" Symbolic versions of the DICOM orientation mathemeatics.
+"""Symbolic versions of the DICOM orientation mathemeatics.
Notes on the SPM orientation machinery.
@@ -8,20 +8,17 @@
"""
import numpy as np
-
import sympy
-from sympy import Matrix, Symbol, symbols, zeros, ones, eye
+from sympy import Matrix, Symbol, eye, ones, symbols, zeros
# The code below is general (independent of SPMs code)
def numbered_matrix(nrows, ncols, symbol_prefix):
- return Matrix(nrows, ncols, lambda i, j: Symbol(
- symbol_prefix + '_{%d%d}' % (i + 1, j + 1)))
+ return Matrix(nrows, ncols, lambda i, j: Symbol(symbol_prefix + '_{%d%d}' % (i + 1, j + 1)))
def numbered_vector(nrows, symbol_prefix):
- return Matrix(nrows, 1, lambda i, j: Symbol(
- symbol_prefix + '_{%d}' % (i + 1)))
+ return Matrix(nrows, 1, lambda i, j: Symbol(symbol_prefix + '_{%d}' % (i + 1)))
# premultiplication matrix to go from 0 based to 1 based indexing
@@ -46,7 +43,7 @@ def numbered_vector(nrows, symbol_prefix):
R = zeros(4, 2)
R[:3, :] = R3
-# The following is specific to the SPM algorithm.
+# The following is specific to the SPM algorithm.
x1 = ones(4, 1)
y1 = ones(4, 1)
y1[:3, :] = pos_pat_0
diff --git a/doc/source/dicom/dicom_fields.rst b/doc/source/dicom/dicom_fields.rst
index 5d2af15739..f7d2ab8490 100644
--- a/doc/source/dicom/dicom_fields.rst
+++ b/doc/source/dicom/dicom_fields.rst
@@ -15,7 +15,7 @@ because we've covered those somewhat in :ref:`dicom-orientation`.
Fields for ordering DICOM files into images
===========================================
-You'll see some discussion of this in :ref:`spm-dicom`.
+You'll see some discussion of this in :ref:`spm-dicom`.
Section 7.3.1: general series module
diff --git a/doc/source/dicom/dicom_info.rst b/doc/source/dicom/dicom_info.rst
index a1173073fe..b9883d4bfc 100644
--- a/doc/source/dicom/dicom_info.rst
+++ b/doc/source/dicom/dicom_info.rst
@@ -47,5 +47,5 @@ Here is a selected list of other tools and relevant resources:
* http://www.barre.nom.fr/medical/samples/
* http://pubimage.hcuge.ch:8080/
* Via links from the dcm2nii_ page.
-
+
.. include:: ../links_names.txt
diff --git a/doc/source/dicom/dicom_mosaic.rst b/doc/source/dicom/dicom_mosaic.rst
index 5ff0f1fcf7..789247f3ff 100644
--- a/doc/source/dicom/dicom_mosaic.rst
+++ b/doc/source/dicom/dicom_mosaic.rst
@@ -17,7 +17,7 @@ with something like::
import dicom
dcm_data = dicom.read_file('my_file.dcm')
plt.imshow(dcm_data.pixel_array)
-
+
.. image:: mosaic_grid.png
Getting the slices from the mosaic
@@ -83,7 +83,7 @@ rd_{cols})$ and the mosaic dimensions are $(md_{rows}, md_{cols})$. The
.. math::
- \mathbf{i} = \mathbf{c} + RS
+ \mathbf{i} = \mathbf{c} + RS
\begin{bmatrix} -(md_{rows}-1) / 2\\
-(md_{cols}-1) / 2\\
0 \end{bmatrix}
@@ -94,7 +94,7 @@ the true image position $\mathbf{t}$:
.. math::
- \mathbf{t} = \mathbf{i} -
+ \mathbf{t} = \mathbf{i} -
(RS \begin{bmatrix} -(md_{rows}-1) / 2\\
-(md_{cols}-1) / 2\\
0 \end{bmatrix}) +
@@ -106,11 +106,11 @@ Because of the final zero in the voxel translations, this simplifies to:
.. math::
- \mathbf{t} = \mathbf{i} +
+ \mathbf{t} = \mathbf{i} +
Q \begin{bmatrix} (md_{rows} - rd_{rowss}) / 2 \\
(md_{cols} - rd_{cols}) / 2 \end{bmatrix}
-where:
+where:
.. math::
diff --git a/doc/source/dicom/dicom_orientation.rst b/doc/source/dicom/dicom_orientation.rst
index dae0ea5c60..275b16ce78 100644
--- a/doc/source/dicom/dicom_orientation.rst
+++ b/doc/source/dicom/dicom_orientation.rst
@@ -17,7 +17,7 @@ definitions`_ (2009):
patient. The y-axis is increasing to the posterior side of the
patient. The z-axis is increasing toward the head of the patient.
-(we'll ignore the quadupeds for now).
+(we'll ignore the quadupeds for now).
In a way it's funny to call this the 'patient-based' coordinate system.
'Doctor-based coordinate system' is a better name. Think of a doctor
@@ -33,7 +33,7 @@ patient.
DICOM pixel data
================
-C.7.6.3.1.4 - Pixel Data
+C.7.6.3.1.4 - Pixel Data
Pixel Data (7FE0,0010) for this image. The order of pixels sent for
each image plane is left to right, top to bottom, i.e., the upper
left pixel (labeled 1,1) is sent first followed by the remainder of
@@ -110,21 +110,21 @@ system* - see `DICOM object definitions`_ section 3.17.1):
\begin{bmatrix} P_x\\
P_y\\
P_z\\
- 1 \end{bmatrix} =
- \begin{bmatrix} X_x\Delta{i} & Y_x\Delta{j} & 0 & S_x \\
+ 1 \end{bmatrix} =
+ \begin{bmatrix} X_x\Delta{i} & Y_x\Delta{j} & 0 & S_x \\
X_y\Delta{i} & Y_y\Delta{j} & 0 & S_y \\
X_z\Delta{i} & Y_z\Delta{j} & 0 & S_z \\
0 & 0 & 0 & 1 \end{bmatrix}
\begin{bmatrix} i\\
j\\
0\\
- 1 \end{bmatrix}
- = M
+ 1 \end{bmatrix}
+ = M
\begin{bmatrix} i\\
j\\
0\\
- 1 \end{bmatrix}
-
+ 1 \end{bmatrix}
+
Where:
#. $P_{xyz}$ : The coordinates of the voxel (i,j) in the frame's
@@ -207,20 +207,20 @@ DICOM affine formula
\begin{bmatrix} P_x\\
P_y\\
P_z\\
- 1 \end{bmatrix} =
- \begin{bmatrix} F_{11}\Delta{r} & F_{12}\Delta{c} & 0 & S_x \\
+ 1 \end{bmatrix} =
+ \begin{bmatrix} F_{11}\Delta{r} & F_{12}\Delta{c} & 0 & S_x \\
F_{21}\Delta{r} & F_{22}\Delta{c} & 0 & S_y \\
F_{31}\Delta{r} & F_{32}\Delta{c} & 0 & S_z \\
0 & 0 & 0 & 1 \end{bmatrix}
\begin{bmatrix} r\\
c\\
0\\
- 1 \end{bmatrix}
- = A
+ 1 \end{bmatrix}
+ = A
\begin{bmatrix} r\\
c\\
0\\
- 1 \end{bmatrix}
+ 1 \end{bmatrix}
Where:
@@ -258,7 +258,7 @@ In the *multi slice* case, we can assume that the
'ImageOrientationPatient' field is the same for all the slices.
We want to get the affine transformation matrix $A$ that maps from voxel
-coordinates in the DICOM file(s), to mm in the :ref:`dicom-pcs`.
+coordinates in the DICOM file(s), to mm in the :ref:`dicom-pcs`.
By voxel coordinates, we mean coordinates of form $(r, c, s)$ - the row,
column and slice indices - as for the :ref:`dicom-slice-affine`.
@@ -305,7 +305,7 @@ For the multi-slice case, we can fill in $\mathbf{k}$ by using the information
from $T^N$, because $T^N$ is the translation needed to take the
first voxel in the last (slice index = $N-1$) slice to mm space. So:
-.. math::
+.. math::
\left(\begin{smallmatrix}T^N\\1\end{smallmatrix}\right) = A \left(\begin{smallmatrix}0\\0\\N - 1\\1\end{smallmatrix}\right)
@@ -325,7 +325,7 @@ and therefore:
.. math::
A_{multi} = \left(\begin{smallmatrix}F_{{11}} \Delta{r} & F_{{12}} \Delta{c} & \frac{T^{N}_{{1}} - T^{1}_{{1}}}{N - 1} & T^{1}_{{1}}\\F_{{21}} \Delta{r} & F_{{22}} \Delta{c} & \frac{T^{N}_{{2}} - T^{1}_{{2}}}{N - 1} & T^{1}_{{2}}\\F_{{31}} \Delta{r} & F_{{32}} \Delta{c} & \frac{T^{N}_{{3}} - T^{1}_{{3}}}{N - 1} & T^{1}_{{3}}\\0 & 0 & 0 & 1\end{smallmatrix}\right)
-
+
A_{single} = \left(\begin{smallmatrix}F_{{11}} \Delta{r} & F_{{12}} \Delta{c} & \Delta{s} n_{{1}} & T^{1}_{{1}}\\F_{{21}} \Delta{r} & F_{{22}} \Delta{c} & \Delta{s} n_{{2}} & T^{1}_{{2}}\\F_{{31}} \Delta{r} & F_{{32}} \Delta{c} & \Delta{s} n_{{3}} & T^{1}_{{3}}\\0 & 0 & 0 & 1\end{smallmatrix}\right)
See :download:`derivations/spm_dicom_orient.py` for the derivations and
@@ -410,4 +410,3 @@ plus a constant.
Again, see :download:`derivations/spm_dicom_orient.py` for the derivations.
.. include:: ../links_names.txt
-
diff --git a/doc/source/dicom/siemens_csa.rst b/doc/source/dicom/siemens_csa.rst
index 7807f7b89f..9beec6150a 100644
--- a/doc/source/dicom/siemens_csa.rst
+++ b/doc/source/dicom/siemens_csa.rst
@@ -12,7 +12,7 @@ header. We'll call this the *CSA header*.
CSA header
==========
-See this Siemens `Syngo DICOM conformance`_ statement, and a GDCM_
+See this Siemens `Syngo DICOM conformance`_ statement, and a GDCM_
`Siemens header dump`_.
.. _`Siemens header dump`: http://sourceforge.net/apps/mediawiki/gdcm/index.php?title=Gdcmdump#SIEMENS_CSA_Header
@@ -38,7 +38,7 @@ same format. The fields can be of two types, CSA1 and CSA2.
Both are always little-endian, whatever the machine endian is.
The CSA2 format begins with the string 'SV10', the CSA1 format does
-not.
+not.
The code below keeps track of the position *within the CSA header
stream*. We'll call this ``csa_position``. At this point (after
@@ -81,14 +81,14 @@ At this point SPM does a check, by calculating the length of this item
If ``item_len`` is less than 0 or greater than
``csa_max_pos-csa_position`` (the remaining number of bytes to read in
the whole header) then we break from the item reading loop,
-setting the value below to ''.
+setting the value below to ''.
Then we calculate ``item_len`` rounded up to the nearest 4 byte boundary
-tp get ``next_item_pos``.
+tp get ``next_item_pos``.
-2. value : uint8, ``item_len``.
+2. value : uint8, ``item_len``.
-We set the stream position to ``next_item_pos``.
+We set the stream position to ``next_item_pos``.
CSA2
====
@@ -126,10 +126,10 @@ Now there's a different length check from CSA1. ``item_len`` is given
just by ``xx[1]``. If ``item_len`` > ``csa_max_pos - csa_position``
(the remaining bytes in the header), then we just read the remaining
bytes in the header (as above) into ``value`` below, as uint8, move the
-filepointer to the next 4 byte boundary, and give up reading.
+filepointer to the next 4 byte boundary, and give up reading.
-2. value : uint8, ``item_len``.
+2. value : uint8, ``item_len``.
-We set the stream position to the next 4 byte boundary.
+We set the stream position to the next 4 byte boundary.
.. include:: ../links_names.txt
diff --git a/doc/source/dicom/spm_dicom.rst b/doc/source/dicom/spm_dicom.rst
index 67b6bcf0ca..5b0deb1672 100644
--- a/doc/source/dicom/spm_dicom.rst
+++ b/doc/source/dicom/spm_dicom.rst
@@ -5,7 +5,7 @@
======================
These are some notes on the algorithms that SPM_ uses to convert from
-DICOM_ to nifti_. There are other notes in :ref:`dicom-mosaic`.
+DICOM_ to nifti_. There are other notes in :ref:`dicom-mosaic`.
The relevant SPM files are ``spm_dicom_headers.m``,
``spm_dicom_dict.mat`` and ``spm_dicom_convert.m``. These notes refer
@@ -29,7 +29,7 @@ written by John Ahsburner (JA). Relevant fixes are:
File opening
------------
-When opening the DICOM file, SPM (subfunction ``readdicomfile``)
+When opening the DICOM file, SPM (subfunction ``readdicomfile``)
#. opens as little endian
#. reads 4 characters starting at pos 128
@@ -76,7 +76,7 @@ explicit (as in 'explicit little endian'):
There's a check for not-even tag length. If not even:
-#. 4294967295 appears to be OK - and decoded as Inf for tag length.
+#. 4294967295 appears to be OK - and decoded as Inf for tag length.
#. 13 appears to mean 10 and is reset to be 10
#. Any other odd number is not valid and gives a tag length of 0
@@ -89,7 +89,7 @@ tag length of 13 set to tag length 10.
``spm_dicom_convert.m``
=======================
-Written by John Ashburner and Jesper Andersson.
+Written by John Ashburner and Jesper Andersson.
File categorization
-------------------
@@ -97,7 +97,7 @@ File categorization
SPM makes a special case of Siemens 'spectroscopy images'. These are
images that have 'SOPClassUID' == '1.3.12.2.1107.5.9.1' and the private
tag of (29, 1210); for these it pulls out the affine, and writes a
-volume of ones corresponding to the acquisition planes.
+volume of ones corresponding to the acquisition planes.
For images that are not spectroscopy:
@@ -111,7 +111,7 @@ For images that are not spectroscopy:
* Fields 'SeriesNumber', 'AcquisitionNumber' and 'InstanceNumber' are
set to 1 if absent.
-Next SPM distinguishes between :ref:`dicom-mosaic` and standard DICOM.
+Next SPM distinguishes between :ref:`dicom-mosaic` and standard DICOM.
Mosaic images are those with the Siemens private tag::
@@ -140,7 +140,7 @@ Take first header, put as start of first volume. For each subsequent header:
field in form of 9 integers separated by '_', where 'X' in this
string replaced by '-1' - giving 'ICE1'
-Then, for each currently identified volume:
+Then, for each currently identified volume:
#. If we have ICE1 above, and we do have 'CSAIMageHeaderInfo', with a
'name', in the first header in this volume, then extract ICE dims in
@@ -180,7 +180,7 @@ For each volume:
#. For each header in this volume, get the z coordinate by taking the
dot product of the 'ImagePositionPatient' vector and ``z_dir_cos``
(see :ref:`dicom-z-from-slice`).
-#. Sort the headers according to this estimated z coordinate.
+#. Sort the headers according to this estimated z coordinate.
#. If this volume is more than one slice, and there are any slices with
the same z coordinate (as defined above), run the
:ref:`dicom-img-resort` on this volume - on the basis that it may
@@ -214,7 +214,7 @@ that the routine is still working on - ``work_list``.
for making filenames.
#. Calculate the z coordinate as for :ref:`spm-second-pass`, for each
DICOM header.
-#. Sort the headers by 'InstanceNumber'
+#. Sort the headers by 'InstanceNumber'
#. If any headers have the same 'InstanceNumber', then discard all but
the first header with the same number. At this point the remaining
headers in ``work_list`` will have different 'InstanceNumber's, but
@@ -222,7 +222,7 @@ that the routine is still working on - ``work_list``.
#. Now sort by z coordinate
#. If there are ``N`` headers, make a ``N`` length vector of flags
``is_processed``, for which all values == False
-#. Make an output list of header lists, call it ``hdr_vol_out``, set to empty.
+#. Make an output list of header lists, call it ``hdr_vol_out``, set to empty.
#. While there are still any False elements in ``is_processed``:
#. Find first header for which corresponding ``is_processed`` is
@@ -236,7 +236,7 @@ that the routine is still working on - ``work_list``.
corresponding to ``zsind`` to ``hdr_vol_out[i]``. This assumes
that the original ``work_list`` contained two or more volumes,
each with an identical set of z coordinates.
- #. Set corresponding ``is_processed`` flag to True for all ``z_same_indices``.
+ #. Set corresponding ``is_processed`` flag to True for all ``z_same_indices``.
#. Finally, if the headers in ``work_list`` have 'InstanceNumber's that
cannot be sorted to a sequence ascending in units of 1, or if any
@@ -269,7 +269,7 @@ Then define the following matrices:
.. math::
R = \left(\begin{smallmatrix}1 & a & 1 & 0\\1 & b & 0 & 1\\1 & c & 0 & 0\\1 & d & 0 & 0\end{smallmatrix}\right)
-
+
L = \left(\begin{smallmatrix}T^{1}_{{1}} & e & F_{{11}} \Delta{r} & F_{{12}} \Delta{c}\\T^{1}_{{2}} & f & F_{{21}} \Delta{r} & F_{{22}} \Delta{c}\\T^{1}_{{3}} & g & F_{{31}} \Delta{r} & F_{{32}} \Delta{c}\\1 & h & 0 & 0\end{smallmatrix}\right)
For a volume with more than one slice (header), then $a=1; b=1, c=N, d=1$. $e, f, g$ are the values from $T^N$,
diff --git a/doc/source/gitwash/configure_git.rst b/doc/source/gitwash/configure_git.rst
index 9911d7cbb1..a19f592bd5 100644
--- a/doc/source/gitwash/configure_git.rst
+++ b/doc/source/gitwash/configure_git.rst
@@ -138,19 +138,19 @@ and it gives graph / text output something like this (but with color!)::
* 6d8e1ee - (HEAD, origin/my-fancy-feature, my-fancy-feature) NF - a fancy file (45 minutes ago) [Matthew Brett]
* d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/master (2 weeks ago) [Jonathan Terhorst]
- |\
+ |\
| * 4aff2a8 - fixed bug 35, and added a test in test_bugfixes (2 weeks ago) [Hugo]
- |/
+ |/
* a7ff2e5 - Added notes on discussion/proposal made during Data Array Summit. (2 weeks ago) [Corran Webster]
* 68f6752 - Initial implementation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr
* 376adbd - Merge pull request #46 from terhorst/master (2 weeks ago) [Jonathan Terhorst]
- |\
+ |\
| * b605216 - updated joshu example to current api (3 weeks ago) [Jonathan Terhorst]
| * 2e991e8 - add testing for outer ufunc (3 weeks ago) [Jonathan Terhorst]
| * 7beda5a - prevent axis from throwing an exception if testing equality with non-axis object (3 weeks ago) [Jonathan Terhorst]
| * 65af65e - convert unit testing code to assertions (3 weeks ago) [Jonathan Terhorst]
| * 956fbab - Merge remote-tracking branch 'upstream/master' (3 weeks ago) [Jonathan Terhorst]
- | |\
+ | |\
| |/
Thanks to Yury V. Zaytsev for posting it.
diff --git a/doc/source/gitwash/development_workflow.rst b/doc/source/gitwash/development_workflow.rst
index b89db449ba..7c117cfcce 100644
--- a/doc/source/gitwash/development_workflow.rst
+++ b/doc/source/gitwash/development_workflow.rst
@@ -22,7 +22,7 @@ In what follows we'll refer to the upstream nibabel ``master`` branch, as
* Name your branch for the purpose of the changes - e.g.
``bugfix-for-issue-14`` or ``refactor-database-code``.
* If you can possibly avoid it, avoid merging trunk or any other branches into
- your feature branch while you are working.
+ your feature branch while you are working.
* If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk`
* Ask on the `nibabel mailing list`_ if you get stuck.
* Ask for code review!
diff --git a/doc/source/gitwash/forking_hell.rst b/doc/source/gitwash/forking_hell.rst
index d70b28ffdf..1dd14f4618 100644
--- a/doc/source/gitwash/forking_hell.rst
+++ b/doc/source/gitwash/forking_hell.rst
@@ -30,4 +30,3 @@ Create your own forked copy of nibabel_
should find yourself at the home page for your own forked copy of nibabel_.
.. include:: links.inc
-
diff --git a/doc/source/gitwash/git_resources.rst b/doc/source/gitwash/git_resources.rst
index ba7b275e05..d18b0ef48b 100644
--- a/doc/source/gitwash/git_resources.rst
+++ b/doc/source/gitwash/git_resources.rst
@@ -9,9 +9,9 @@ Tutorials and summaries
* `github help`_ has an excellent series of how-to guides.
* `learn.github`_ has an excellent series of tutorials
-* The `pro git book`_ is a good in-depth book on git.
+* The `pro git book`_ is a good in-depth book on git.
* A `git cheat sheet`_ is a page giving summaries of common commands.
-* The `git user manual`_
+* The `git user manual`_
* The `git tutorial`_
* The `git community book`_
* `git ready`_ |emdash| a nice series of tutorials
diff --git a/doc/source/gitwash/index.rst b/doc/source/gitwash/index.rst
index 4eae7b7008..9dcc741fbc 100644
--- a/doc/source/gitwash/index.rst
+++ b/doc/source/gitwash/index.rst
@@ -14,5 +14,3 @@ Contents:
patching
git_development
git_resources
-
-
diff --git a/doc/source/gitwash/set_up_fork.rst b/doc/source/gitwash/set_up_fork.rst
index bbdb43fe8d..c4fb086bf0 100644
--- a/doc/source/gitwash/set_up_fork.rst
+++ b/doc/source/gitwash/set_up_fork.rst
@@ -65,4 +65,3 @@ Just for your own satisfaction, show yourself that you now have a new
origin git@github.com:your-user-name/nibabel.git (push)
.. include:: links.inc
-
diff --git a/doc/source/installing_data.rst b/doc/source/installing_data.rst
index c1b335fd02..ce32de2375 100644
--- a/doc/source/installing_data.rst
+++ b/doc/source/installing_data.rst
@@ -10,7 +10,7 @@ packages for some of the DICOM tests in nibabel. There are also data packages
for standard template images, and other packages for components of nipy,
including the main nipy package.
-For more details on data package design, see :ref:`data-package-design`.
+For more details on data package design, see :ref:`data-package-design`.
We haven't yet made a nice automated way of downloading and installing the
packages. For the moment you can find packages for the data and template files
@@ -78,4 +78,3 @@ with contents::
[DATA]
c:\some\path\share\nipy
-
diff --git a/doc/source/notebooks/ata_error.ipynb b/doc/source/notebooks/ata_error.ipynb
index 216f754161..5a26ed0f98 100644
--- a/doc/source/notebooks/ata_error.ipynb
+++ b/doc/source/notebooks/ata_error.ipynb
@@ -248,4 +248,4 @@
"metadata": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/source/notebooks/cross_product_error.ipynb b/doc/source/notebooks/cross_product_error.ipynb
index 1889c3be7d..bcf8c23d36 100644
--- a/doc/source/notebooks/cross_product_error.ipynb
+++ b/doc/source/notebooks/cross_product_error.ipynb
@@ -151,4 +151,4 @@
"metadata": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/source/old/design.txt b/doc/source/old/design.txt
index f2d30ddf56..35901977b5 100644
--- a/doc/source/old/design.txt
+++ b/doc/source/old/design.txt
@@ -46,7 +46,7 @@ by the image itself.
frame; for reading, we just cast up to a dtype that can hold all the
frame dtypes; for writing, we may just write as one type, or disallow
writing altogether.
-* array shape - ``shape``.
+* array shape - ``shape``.
* byte offset - ``offset`` at which data starts. This is not relevant
for the way we currently read MINC files for example - and may not be
relevant for ECAT files, in the sense that it may be the offset to
@@ -67,7 +67,7 @@ We think of an image as being the association of:
.. note::
- Why are the first three dimensions spatial?
+ Why are the first three dimensions spatial?
For simplicity, we want the transformation (above) to be spatial.
Because the images are always at least 3D, and the transform is
@@ -105,7 +105,7 @@ image format API - see :ref:`image-formats`
This immediately suggests the following interface::
- img = Image(data, affine=None, output_space=None,
+ img = Image(data, affine=None, output_space=None,
meta=None, format=None, filename=None)
The output space is a string
@@ -123,12 +123,12 @@ might imagine these methods::
img.load(filename, format=None) # class method
img.save(filename=None, format=None)
img.as_file(filemaker=None, format=None)
-
+
and some things that formats generally support like::
img.write_header(filename=None)
img.write_data(data=None, filename=None, slicedef=None)
-
+
``img.as_file`` returns the image as saved to disk; the image might
completely correspond to something on disk, in which case it may return
its own filename, or it might not correspond to something on disk, in
@@ -192,12 +192,12 @@ SPM Analyze adds an optional extra data file in Matlab ``.mat`` format::
some_image.mat
Of course there are rules / rules-of-thumb as to what extensions these
-various filenames can be.
+various filenames can be.
We may want to associate an image with a filename or set of filenames.
But we may also want to be able to associate images with file-like
objects, such as open files, or anything else that implements a file
-protocol.
+protocol.
The image ``format`` will know what the ``image`` needs in terms of
files. For example, a single file NIfTI image will need a single
@@ -208,7 +208,7 @@ Let's call a full specification of what the format needs a *filedef*.
For the moment, let's imagine that is a dictionary with keys ``image``,
``header``, and optional ``mat``. The values can be filenames or
file-like objects. A *filespec* is some argument or set of arguments
-that allow us to fully specify a *filedef*.
+that allow us to fully specify a *filedef*.
The simple case of a single-file NIfTI image::
@@ -221,7 +221,7 @@ tries to work out the format from the filespec.
Consider::
- img = Image(data, filespec='some_image.nii',
+ img = Image(data, filespec='some_image.nii',
format=Nifti1SingleFormat)
also OK. But::
@@ -255,7 +255,3 @@ might raise an error, on the lines of::
- or it might just assume that you mean for the image and the header to
be the same file. Perhaps that is too implicit.
-
-
-
-
diff --git a/doc/source/old/format_design.txt b/doc/source/old/format_design.txt
index ad0d23e40d..29585866a9 100644
--- a/doc/source/old/format_design.txt
+++ b/doc/source/old/format_design.txt
@@ -1,4 +1,4 @@
-.. -*- rst -*-
+.. -*- rst -*-
.. _image-formats:
@@ -15,7 +15,7 @@ diagram
Image class plays the role of the Abstraction, and the Format plays the
role of the implementor.
-The Format object provides an interface to the underlying file format.
+The Format object provides an interface to the underlying file format.
The Image has the following methods:
@@ -107,6 +107,3 @@ format-specific tasks::
fmt.set_sform(np.eye(4) * 2)
fmt.fields['descrip'] = 'some information'
fmt.to_filename('another_file.nii')
-
-
-
diff --git a/doc/source/old/orientation.txt b/doc/source/old/orientation.txt
index e74d65517f..b44a11e309 100644
--- a/doc/source/old/orientation.txt
+++ b/doc/source/old/orientation.txt
@@ -135,4 +135,3 @@ left-right by default. We chose this break from the standard because
that is what SPM does with non-affine niftis, and because it seemed more
sensible, and because it's more consistent with what we do with SPM
non-nifti images (not surprisingly).
-
diff --git a/doc/source/scripts/make_coord_examples.py b/doc/source/scripts/make_coord_examples.py
index f763b28c28..aa83fbcd84 100644
--- a/doc/source/scripts/make_coord_examples.py
+++ b/doc/source/scripts/make_coord_examples.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-""" Make graphics and example image for coordinate tutorial
+"""Make graphics and example image for coordinate tutorial
Expects MNI nonlinear template t1 and t2 images in directory of script -
specifically these files:
@@ -18,16 +18,15 @@
import math
+import matplotlib.pyplot as plt
+import nipy
+import nipy.algorithms.resample as rsm
+import nipy.core.api as nca
import numpy as np
import numpy.linalg as npl
import nibabel.eulerangles as euler
-import nipy
-import nipy.core.api as nca
-import nipy.algorithms.resample as rsm
-import matplotlib.pyplot as plt
-
T1_IMG = 'mni_icbm152_t1_tal_nlin_asym_09a.nii'
T2_IMG = 'mni_icbm152_t2_tal_nlin_asym_09a.nii'
@@ -36,7 +35,7 @@
img = nipy.load_image(img_fname)
# Set affine as for FOV, not AC
RZS = img.affine[:3, :3]
- vox_fov_center = -(np.array(img.shape) - 1) / 2.
+ vox_fov_center = -(np.array(img.shape) - 1) / 2.0
T = RZS.dot(vox_fov_center)
img.affine[:3, 3] = T
# Take stuff off the top of the full image, to emphasize FOV
@@ -63,18 +62,18 @@
epi_br = np.array((92, 70)) * 2
epi_tl = np.array((7, 63)) * 2
# Find lengths of sides
-epi_y_len = np.sqrt((np.subtract(epi_bl, epi_tl)**2).sum())
-epi_x_len = np.sqrt((np.subtract(epi_bl, epi_br)**2).sum())
+epi_y_len = np.sqrt((np.subtract(epi_bl, epi_tl) ** 2).sum())
+epi_x_len = np.sqrt((np.subtract(epi_bl, epi_br) ** 2).sum())
x, y = 0, 1
# Make a rectangular box with these sides
+
def make_ortho_box(bl, x_len, y_len):
- """ Make a box with sides parallel to the axes
- """
- return np.array((bl,
- [bl[x] + x_len, bl[y]],
- [bl[x], bl[y] + y_len],
- [bl[x] + x_len, bl[y] + y_len]))
+ """Make a box with sides parallel to the axes"""
+ return np.array(
+ (bl, [bl[x] + x_len, bl[y]], [bl[x], bl[y] + y_len], [bl[x] + x_len, bl[y] + y_len])
+ )
+
orth_epi_box = make_ortho_box(epi_bl, epi_x_len, epi_y_len)
@@ -86,8 +85,7 @@ def make_ortho_box(bl, x_len, y_len):
def plot_line(pt1, pt2, fmt='r-', label=None):
- plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], fmt,
- label=label)
+ plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], fmt, label=label)
def plot_box(box_def, fmt='r-', label=None):
@@ -103,22 +101,18 @@ def rotate_box(box_def, angle, origin):
box_def_zeroed = box_def - origin
cost = math.cos(angle)
sint = math.sin(angle)
- rot_array = np.array([[cost, -sint],
- [sint, cost]])
+ rot_array = np.array([[cost, -sint], [sint, cost]])
box_def_zeroed = np.dot(rot_array, box_def_zeroed.T).T
return box_def_zeroed + origin
def labeled_point(pt, marker, text, markersize=10, color='k'):
plt.plot(pt[0], pt[1], marker, markersize=markersize)
- plt.text(pt[0] + markersize / 2,
- pt[1] - markersize / 2,
- text,
- color=color)
+ plt.text(pt[0] + markersize / 2, pt[1] - markersize / 2, text, color=color)
def plot_localizer():
- plt.imshow(sagittal, cmap="gray", origin='lower', extent=sag_extents)
+ plt.imshow(sagittal, cmap='gray', origin='lower', extent=sag_extents)
plt.xlabel('mm from isocenter')
plt.ylabel('mm from isocenter')
@@ -126,8 +120,10 @@ def plot_localizer():
def save_plot():
# Plot using global variables
plot_localizer()
+
def vx2mm(pts):
return pts - iso_center
+
plot_box(vx2mm(rot_box), label='EPI bounding box')
plot_box(vx2mm(anat_box), 'b-', label='Structural bounding box')
labeled_point(vx2mm(epi_center), 'ro', 'EPI FOV center')
@@ -145,7 +141,7 @@ def vx2mm(pts):
anat_center = np.mean(anat_box, axis=0)
# y axis on the plot is first axis of image
sag_y, sag_x = sagittal.shape
-iso_center = (np.array([sag_x, sag_y]) - 1) / 2.
+iso_center = (np.array([sag_x, sag_y]) - 1) / 2.0
sag_extents = [-iso_center[0], iso_center[0], -iso_center[1], iso_center[1]]
# Back to image coordinates
@@ -155,7 +151,7 @@ def vx2mm(pts):
rot = np.eye(4)
rot[:3, :3] = euler.euler2mat(0, 0, -angle)
# downsample to make smaller output image
-downsamp = 1/3
+downsamp = 1 / 3
epi_scale = np.diag([downsamp, downsamp, downsamp, 1])
# template voxels to epi box image voxels
vox2epi_vox = epi_scale.dot(rot.dot(epi_trans))
@@ -165,8 +161,7 @@ def vx2mm(pts):
epi_vox_shape = np.array([data.shape[0], epi_x_len, epi_y_len]) * downsamp
# Make sure dimensions are odd by rounding up or down
# This makes the voxel center an integer index, which is convenient
-epi_vox_shape = [np.floor(d) if np.floor(d) % 2 else np.ceil(d)
- for d in epi_vox_shape]
+epi_vox_shape = [np.floor(d) if np.floor(d) % 2 else np.ceil(d) for d in epi_vox_shape]
# resample, preserving affine
epi_cmap = nca.vox2mni(epi_vox2mm)
epi = rsm.resample(t2_img, epi_cmap, np.eye(4), epi_vox_shape)
@@ -178,8 +173,7 @@ def vx2mm(pts):
anat_trans[:3, 3] = -np.array([0, anat_box[0, 0], anat_box[0, 1]])
vox2anat_vox = anat_scale.dot(anat_trans)
anat_vox2mm = t1_img.affine.dot(npl.inv(vox2anat_vox))
-anat_vox_shape = np.round(np.divide(
- [data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes))
+anat_vox_shape = np.round(np.divide([data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes))
anat_cmap = nca.vox2mni(anat_vox2mm)
anat = rsm.resample(t1_img, anat_cmap, np.eye(4), anat_vox_shape)
anat_data = anat.get_fdata()
diff --git a/doc/tools/LICENSE.txt b/doc/tools/LICENSE.txt
index 9e1d415af8..50431cd88e 100644
--- a/doc/tools/LICENSE.txt
+++ b/doc/tools/LICENSE.txt
@@ -4,4 +4,3 @@ https://www.mail-archive.com/sphinx-dev@googlegroups.com/msg02472.html
and were released under a BSD/MIT license by Fernando Perez, Matthew Brett and
the PyMVPA folks. Further cleanups by the scikit-image crew.
-
diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py
index 68d8f68749..3167362643 100644
--- a/doc/tools/apigen.py
+++ b/doc/tools/apigen.py
@@ -21,7 +21,6 @@
import os
import re
from inspect import getmodule
-
from types import BuiltinFunctionType, FunctionType
# suppress print statements (warnings for empty files)
@@ -29,20 +28,21 @@
class ApiDocWriter:
- """ Class for automatic detection and parsing of API docs
+ """Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format"""
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
- def __init__(self,
- package_name,
- rst_extension='.txt',
- package_skip_patterns=None,
- module_skip_patterns=None,
- other_defines=True
- ):
- r""" Initialize package for parsing
+ def __init__(
+ self,
+ package_name,
+ rst_extension='.txt',
+ package_skip_patterns=None,
+ module_skip_patterns=None,
+ other_defines=True,
+ ):
+ r"""Initialize package for parsing
Parameters
----------
@@ -85,7 +85,7 @@ def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
- """ Set package_name
+ """Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
@@ -102,11 +102,10 @@ def set_package_name(self, package_name):
self.root_path = root_module.__path__[-1]
self.written_modules = None
- package_name = property(get_package_name, set_package_name, None,
- 'get/set package_name')
+ package_name = property(get_package_name, set_package_name, None, 'get/set package_name')
def _import(self, name):
- """ Import namespace package """
+ """Import namespace package"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
@@ -114,7 +113,7 @@ def _import(self, name):
return mod
def _get_object_name(self, line):
- """ Get second token in line
+ """Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
@@ -129,7 +128,7 @@ def _get_object_name(self, line):
return name.rstrip(':')
def _uri2path(self, uri):
- """ Convert uri to absolute filepath
+ """Convert uri to absolute filepath
Parameters
----------
@@ -171,7 +170,7 @@ def _uri2path(self, uri):
return path
def _path2uri(self, dirpath):
- """ Convert directory path to uri """
+ """Convert directory path to uri"""
package_dir = self.package_name.replace('.', os.path.sep)
relpath = dirpath.replace(self.root_path, package_dir)
if relpath.startswith(os.path.sep):
@@ -179,7 +178,7 @@ def _path2uri(self, dirpath):
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
- """ Parse module defined in *uri* """
+ """Parse module defined in *uri*"""
filename = self._uri2path(uri)
if filename is None:
print(filename, 'erk')
@@ -233,7 +232,7 @@ def _parse_module_with_import(self, uri):
return functions, classes
def _parse_lines(self, linesource):
- """ Parse lines of text for functions and classes """
+ """Parse lines of text for functions and classes"""
functions = []
classes = []
for line in linesource:
@@ -293,16 +292,16 @@ def generate_api_doc(self, uri):
head += '\n.. currentmodule:: ' + uri + '\n'
body += '\n.. currentmodule:: ' + uri + '\n\n'
for c in classes:
- body += '\n:class:`' + c + '`\n' \
- + self.rst_section_levels[3] * \
- (len(c)+9) + '\n\n'
+ body += '\n:class:`' + c + '`\n' + self.rst_section_levels[3] * (len(c) + 9) + '\n\n'
body += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
- body += ' :members:\n' \
- ' :undoc-members:\n' \
- ' :show-inheritance:\n' \
- '\n' \
+ body += (
+ ' :members:\n'
+ ' :undoc-members:\n'
+ ' :show-inheritance:\n'
+ '\n'
' .. automethod:: __init__\n\n'
+ )
head += '.. autosummary::\n\n'
for f in classes + functions:
head += ' ' + f + '\n'
@@ -317,7 +316,7 @@ def generate_api_doc(self, uri):
return head, body
def _survives_exclude(self, matchstr, match_type):
- """ Returns True if *matchstr* does not match patterns
+ """Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
@@ -358,7 +357,7 @@ def _survives_exclude(self, matchstr, match_type):
return True
def discover_modules(self):
- r""" Return module sequence discovered from ``self.package_name``
+ r"""Return module sequence discovered from ``self.package_name``
Parameters
@@ -385,22 +384,21 @@ def discover_modules(self):
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
- root_uri = self._path2uri(os.path.join(self.root_path,
- dirpath))
+ root_uri = self._path2uri(os.path.join(self.root_path, dirpath))
# Normally, we'd only iterate over dirnames, but since
# dipy does not import a whole bunch of modules we'll
# include those here as well (the *.py filenames).
- filenames = [f[:-3] for f in filenames if
- f.endswith('.py') and not f.startswith('__init__')]
+ filenames = [
+ f[:-3] for f in filenames if f.endswith('.py') and not f.startswith('__init__')
+ ]
for filename in filenames:
package_uri = '/'.join((dirpath, filename))
for subpkg_name in dirnames + filenames:
package_uri = '.'.join((root_uri, subpkg_name))
package_path = self._uri2path(package_uri)
- if (package_path and
- self._survives_exclude(package_uri, 'package')):
+ if package_path and self._survives_exclude(package_uri, 'package'):
modules.append(package_uri)
return sorted(modules)
@@ -408,10 +406,12 @@ def discover_modules(self):
def write_modules_api(self, modules, outdir):
# upper-level modules
main_module = modules[0].split('.')[0]
- ulms = ['.'.join(m.split('.')[:2]) if m.count('.') >= 1
- else m.split('.')[0] for m in modules]
+ ulms = [
+ '.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules
+ ]
from collections import OrderedDict
+
module_by_ulm = OrderedDict()
for v, k in zip(modules, ulms):
@@ -423,12 +423,12 @@ def write_modules_api(self, modules, outdir):
written_modules = []
for ulm, mods in module_by_ulm.items():
- print(f"Generating docs for {ulm}:")
+ print(f'Generating docs for {ulm}:')
document_head = []
document_body = []
for m in mods:
- print(" -> " + m)
+ print(' -> ' + m)
head, body = self.generate_api_doc(m)
document_head.append(head)
@@ -488,20 +488,19 @@ def write_index(self, outdir, froot='gen', relative_to=None):
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
- path = os.path.join(outdir, froot+self.rst_extension)
+ path = os.path.join(outdir, froot + self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
- relpath = (
- outdir + os.path.sep).replace(relative_to + os.path.sep, '')
+ relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path, 'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
- title = "API Reference"
- w(title + "\n")
- w("=" * len(title) + "\n\n")
+ title = 'API Reference'
+ w(title + '\n')
+ w('=' * len(title) + '\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(f' {os.path.join(relpath, f)}\n')
diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py
index 007175a262..2fded8fbfc 100755
--- a/doc/tools/build_modref_templates.py
+++ b/doc/tools/build_modref_templates.py
@@ -2,18 +2,19 @@
"""Script to auto-generate our API docs.
"""
+import os
+import re
+
# stdlib imports
import sys
-import re
-import os
+
+# version comparison
+from distutils.version import LooseVersion as V
from os.path import join as pjoin
# local imports
from apigen import ApiDocWriter
-# version comparison
-from distutils.version import LooseVersion as V
-
# *****************************************************************************
@@ -38,7 +39,7 @@ def abort(error):
try:
__import__(package)
except ImportError as e:
- abort("Can not import " + package)
+ abort('Can not import ' + package)
module = sys.modules[package]
@@ -54,6 +55,7 @@ def abort(error):
if os.path.exists(version_file):
# Versioneer
from runpy import run_path
+
try:
source_version = run_path(version_file)['get_versions']()['version']
except (FileNotFoundError, KeyError):
@@ -64,27 +66,30 @@ def abort(error):
# Legacy fall-back
info_file = pjoin('..', package, 'info.py')
info_lines = open(info_file).readlines()
- source_version = '.'.join([v.split('=')[1].strip(" '\n.")
- for v in info_lines if re.match(
- '^_version_(major|minor|micro|extra)', v
- )])
+ source_version = '.'.join(
+ [
+ v.split('=')[1].strip(" '\n.")
+ for v in info_lines
+ if re.match('^_version_(major|minor|micro|extra)', v)
+ ]
+ )
print('***', source_version)
if source_version != installed_version:
- abort("Installed version does not match source version")
-
- docwriter = ApiDocWriter(package, rst_extension='.rst',
- other_defines=other_defines)
- docwriter.package_skip_patterns += [r'\.fixes$',
- r'\.fixes.*$',
- r'\.externals$',
- r'\.externals.*$',
- r'.*test.*$',
- r'\.info.*$',
- r'\.pkg_info.*$',
- r'\.py3k.*$',
- r'\._version.*$',
- ]
+ abort('Installed version does not match source version')
+
+ docwriter = ApiDocWriter(package, rst_extension='.rst', other_defines=other_defines)
+ docwriter.package_skip_patterns += [
+ r'\.fixes$',
+ r'\.fixes.*$',
+ r'\.externals$',
+ r'\.externals.*$',
+ r'.*test.*$',
+ r'\.info.*$',
+ r'\.pkg_info.*$',
+ r'\.py3k.*$',
+ r'\._version.*$',
+ ]
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'index', relative_to=outdir)
print('%d files written' % len(docwriter.written_modules))
diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py
index 1649ba62da..53f4a32bdc 100644
--- a/nibabel/tests/test_viewers.py
+++ b/nibabel/tests/test_viewers.py
@@ -56,7 +56,7 @@ def test_viewer():
with pytest.raises(
(
ValueError, # MPL3.5 and lower
- KeyError, # MPL3.6 and higher
+ KeyError, # MPL3.6 and higher
)
):
OrthoSlicer3D.cmap.fset(v, 'foo') # wrong cmap
diff --git a/nisext/__init__.py b/nisext/__init__.py
index 644503e3f7..b556c66d13 100644
--- a/nisext/__init__.py
+++ b/nisext/__init__.py
@@ -1,7 +1,6 @@
# init for sext package
-""" Setuptools extensions
+"""Setuptools extensions
nibabel uses these routines, and houses them, and installs them. nipy-proper
and dipy use them.
"""
-
diff --git a/nisext/py3builder.py b/nisext/py3builder.py
index 7bcaf2348c..24bd298364 100644
--- a/nisext/py3builder.py
+++ b/nisext/py3builder.py
@@ -1,4 +1,4 @@
-""" distutils utilities for porting to python 3 within 2-compatible tree """
+"""distutils utilities for porting to python 3 within 2-compatible tree"""
try:
@@ -6,20 +6,20 @@
except ImportError:
# 2.x - no parsing of code
from distutils.command.build_py import build_py
-else: # Python 3
+else: # Python 3
# Command to also apply 2to3 to doctests
from distutils import log
+
class build_py(build_py_2to3):
def run_2to3(self, files):
# Add doctest parsing; this stuff copied from distutils.utils in
# python 3.2 source
if not files:
return
- fixer_names, options, explicit = (self.fixer_names,
- self.options,
- self.explicit)
+ fixer_names, options, explicit = (self.fixer_names, self.options, self.explicit)
# Make this class local, to delay import of 2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
diff --git a/nisext/sexts.py b/nisext/sexts.py
index 6ececdac78..b206588dec 100644
--- a/nisext/sexts.py
+++ b/nisext/sexts.py
@@ -1,19 +1,18 @@
-""" Distutils / setuptools helpers """
+"""Distutils / setuptools helpers"""
import os
-from os.path import join as pjoin, split as psplit, splitext
-
from configparser import ConfigParser
-
-from distutils.version import LooseVersion
+from distutils import log
from distutils.command.build_py import build_py
from distutils.command.install_scripts import install_scripts
-
-from distutils import log
+from distutils.version import LooseVersion
+from os.path import join as pjoin
+from os.path import split as psplit
+from os.path import splitext
def get_comrec_build(pkg_dir, build_cmd=build_py):
- """ Return extended build command class for recording commit
+ """Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
@@ -47,15 +46,20 @@ def get_comrec_build(pkg_dir, build_cmd=build_py):
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
+
class MyBuildPy(build_cmd):
- """ Subclass to write commit data into installation tree """
+ """Subclass to write commit data into installation tree"""
+
def run(self):
build_cmd.run(self)
import subprocess
- proc = subprocess.Popen('git rev-parse --short HEAD',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
+
+ proc = subprocess.Popen(
+ 'git rev-parse --short HEAD',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = str(repo_commit)
@@ -65,11 +69,12 @@ def run(self):
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
cfg_parser.write(open(out_pth, 'wt'))
+
return MyBuildPy
def _add_append_key(in_dict, key, value):
- """ Helper for appending dependencies to setuptools args """
+ """Helper for appending dependencies to setuptools args"""
# If in_dict[key] does not exist, create it
# If in_dict[key] is a string, make it len 1 list of strings
# Append value to in_dict[key] list
@@ -81,14 +86,16 @@ def _add_append_key(in_dict, key, value):
# Dependency checks
-def package_check(pkg_name, version=None,
- optional=False,
- checker=LooseVersion,
- version_getter=None,
- messages=None,
- setuptools_args=None
- ):
- """ Check if package `pkg_name` is present and has good enough version
+def package_check(
+ pkg_name,
+ version=None,
+ optional=False,
+ checker=LooseVersion,
+ version_getter=None,
+ messages=None,
+ setuptools_args=None,
+):
+ """Check if package `pkg_name` is present and has good enough version
Has two modes of operation. If `setuptools_args` is None (the default),
raise an error for missing non-optional dependencies and log warnings for
@@ -134,42 +141,35 @@ def package_check(pkg_name, version=None,
setuptools_mode = not setuptools_args is None
optional_tf = bool(optional)
if version_getter is None:
+
def version_getter(pkg_name):
mod = __import__(pkg_name)
return mod.__version__
+
if messages is None:
messages = {}
msgs = {
- 'missing': 'Cannot import package "%s" - is it installed?',
- 'missing opt': 'Missing optional package "%s"',
- 'opt suffix': '; you may get run-time errors',
- 'version too old': 'You have version %s of package "%s"'
- ' but we need version >= %s', }
+ 'missing': 'Cannot import package "%s" - is it installed?',
+ 'missing opt': 'Missing optional package "%s"',
+ 'opt suffix': '; you may get run-time errors',
+ 'version too old': 'You have version %s of package "%s" but we need version >= %s',
+ }
msgs.update(messages)
- status, have_version = _package_status(pkg_name,
- version,
- version_getter,
- checker)
+ status, have_version = _package_status(pkg_name, version, version_getter, checker)
if status == 'satisfied':
return
if not setuptools_mode:
if status == 'missing':
if not optional_tf:
raise RuntimeError(msgs['missing'] % pkg_name)
- log.warn(msgs['missing opt'] % pkg_name +
- msgs['opt suffix'])
+ log.warn(msgs['missing opt'] % pkg_name + msgs['opt suffix'])
return
elif status == 'no-version':
raise RuntimeError(f'Cannot find version for {pkg_name}')
assert status == 'low-version'
if not optional_tf:
- raise RuntimeError(msgs['version too old'] % (have_version,
- pkg_name,
- version))
- log.warn(msgs['version too old'] % (have_version,
- pkg_name,
- version)
- + msgs['opt suffix'])
+ raise RuntimeError(msgs['version too old'] % (have_version, pkg_name, version))
+ log.warn(msgs['version too old'] % (have_version, pkg_name, version) + msgs['opt suffix'])
return
# setuptools mode
if optional_tf and not isinstance(optional, str):
@@ -180,9 +180,7 @@ def version_getter(pkg_name):
if optional_tf:
if not 'extras_require' in setuptools_args:
setuptools_args['extras_require'] = {}
- _add_append_key(setuptools_args['extras_require'],
- optional,
- dependency)
+ _add_append_key(setuptools_args['extras_require'], optional, dependency)
else:
_add_append_key(setuptools_args, 'install_requires', dependency)
@@ -203,8 +201,7 @@ def _package_status(pkg_name, version, version_getter, checker):
return 'satisfied', have_version
-BAT_TEMPLATE = \
-r"""@echo off
+BAT_TEMPLATE = r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
@@ -217,8 +214,9 @@ def _package_status(pkg_name, version, version_getter, checker):
call "%py_exe%" %pyscript% %*
"""
+
class install_scripts_bat(install_scripts):
- """ Make scripts executable on Windows
+ """Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
@@ -234,25 +232,24 @@ class install_scripts_bat(install_scripts):
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
+
def run(self):
install_scripts.run(self)
- if not os.name == "nt":
+ if not os.name == 'nt':
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
- if not (first_line.startswith('#!') and
- 'python' in first_line.lower()):
- log.info("No #!python executable found, skipping .bat "
- "wrapper")
+ if not (first_line.startswith('#!') and 'python' in first_line.lower()):
+ log.info('No #!python executable found, skipping .bat wrapper')
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
- log.info(f"Making {bat_file} wrapper for {filepath}")
+ log.info(f'Making {bat_file} wrapper for {filepath}')
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
@@ -268,7 +265,7 @@ def __init__(self, vars):
def read_vars_from(ver_file):
- """ Read variables from Python text file
+ """Read variables from Python text file
Parameters
----------
diff --git a/nisext/testers.py b/nisext/testers.py
index 05b2d92a3e..07f71af696 100644
--- a/nisext/testers.py
+++ b/nisext/testers.py
@@ -1,4 +1,4 @@
-""" Test package information in various install settings
+"""Test package information in various install settings
The routines here install the package from source directories, zips or eggs, and
check these installations by running tests, checking version information,
@@ -26,28 +26,29 @@
# Run tests from binary egg
bdist-egg-tests:
$(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")'
-
"""
import os
-import sys
-from os.path import join as pjoin, abspath
-from glob import glob
+import re
import shutil
+import sys
import tempfile
import zipfile
-import re
-from subprocess import Popen, PIPE
+from glob import glob
+from os.path import abspath
+from os.path import join as pjoin
+from subprocess import PIPE, Popen
NEEDS_SHELL = os.name != 'nt'
-PYTHON=sys.executable
+PYTHON = sys.executable
HAVE_PUTENV = hasattr(os, 'putenv')
PY_LIB_SDIR = 'pylib'
+
def back_tick(cmd, ret_err=False, as_str=True):
- """ Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
+ """Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
@@ -94,7 +95,7 @@ def back_tick(cmd, ret_err=False, as_str=True):
def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True):
- """ Run command in own process in anonymous path
+ """Run command in own process in anonymous path
Parameters
----------
@@ -127,15 +128,16 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True):
# (via `cmd`). Consider that PYTHONPATH may not be set. Because the
# command might run scripts via the shell, prepend script_dir to the
# system path also.
- paths_add = \
-r"""
+ paths_add = r"""
os.environ['PATH'] = r'"{script_dir}"' + os.path.pathsep + os.environ['PATH']
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None:
os.environ['PYTHONPATH'] = r'"{pkg_path}"'
else:
os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH
-""".format(**locals())
+""".format(
+ **locals()
+ )
if print_location:
p_loc = f'print({mod_name}.__file__);'
else:
@@ -146,14 +148,17 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True):
os.chdir(tmpdir)
with open('script.py', 'wt') as fobj:
fobj.write(
-r"""
+ r"""
import os
import sys
sys.path.insert(0, r"{pkg_path}")
{paths_add}
import {mod_name}
{p_loc}
-{cmd}""".format(**locals()))
+{cmd}""".format(
+ **locals()
+ )
+ )
res = back_tick(f'{PYTHON} script.py', ret_err=True)
finally:
os.chdir(cwd)
@@ -162,7 +167,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True):
def zip_extract_all(fname, path=None):
- """ Extract all members from zipfile
+ """Extract all members from zipfile
Deals with situation where the directory is stored in the zipfile as a name,
as well as files that have to go into this directory.
@@ -176,7 +181,7 @@ def zip_extract_all(fname, path=None):
def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'):
- """ Install package in `from_dir` to standard location in `to_dir`
+ """Install package in `from_dir` to standard location in `to_dir`
Parameters
----------
@@ -191,8 +196,7 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'):
subdirectory within `to_dir` to which scripts will be installed
"""
site_pkgs_path = os.path.join(to_dir, py_lib_sdir)
- py_lib_locs = (f' --install-purelib={site_pkgs_path} '
- f'--install-platlib={site_pkgs_path}')
+ py_lib_locs = f' --install-purelib={site_pkgs_path} ' f'--install-platlib={site_pkgs_path}'
pwd = os.path.abspath(os.getcwd())
cmd = f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}'
try:
@@ -202,10 +206,10 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'):
os.chdir(pwd)
-def install_from_zip(zip_fname, install_path, pkg_finder=None,
- py_lib_sdir=PY_LIB_SDIR,
- script_sdir='bin'):
- """ Install package from zip file `zip_fname`
+def install_from_zip(
+ zip_fname, install_path, pkg_finder=None, py_lib_sdir=PY_LIB_SDIR, script_sdir='bin'
+):
+ """Install package from zip file `zip_fname`
Parameters
----------
@@ -240,7 +244,7 @@ def install_from_zip(zip_fname, install_path, pkg_finder=None,
def contexts_print_info(mod_name, repo_path, install_path):
- """ Print result of get_info from different installation routes
+ """Print result of get_info from different installation routes
Runs installation from:
@@ -280,7 +284,7 @@ def contexts_print_info(mod_name, repo_path, install_path):
def info_from_here(mod_name):
- """ Run info context checks starting in working directory
+ """Run info context checks starting in working directory
Runs checks from current working directory, installing temporary
installations into a new temporary directory
@@ -299,7 +303,7 @@ def info_from_here(mod_name):
def tests_installed(mod_name, source_path=None):
- """ Install from `source_path` into temporary directory; run tests
+ """Install from `source_path` into temporary directory; run tests
Parameters
----------
@@ -315,21 +319,19 @@ def tests_installed(mod_name, source_path=None):
scripts_path = pjoin(install_path, 'bin')
try:
install_from_to(source_path, install_path, PY_LIB_SDIR, 'bin')
- stdout, stderr = run_mod_cmd(mod_name,
- site_pkgs_path,
- mod_name + '.test()',
- scripts_path)
+ stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, mod_name + '.test()', scripts_path)
finally:
shutil.rmtree(install_path)
print(stdout)
print(stderr)
+
# Tell nose this is not a test
tests_installed.__test__ = False
def check_installed_files(repo_mod_path, install_mod_path):
- """ Check files in `repo_mod_path` are installed at `install_mod_path`
+ """Check files in `repo_mod_path` are installed at `install_mod_path`
At the moment, all this does is check that all the ``*.py`` files in
`repo_mod_path` are installed at `install_mod_path`.
@@ -348,11 +350,11 @@ def check_installed_files(repo_mod_path, install_mod_path):
list of files that should have been installed, but have not been
installed
"""
- return missing_from(repo_mod_path, install_mod_path, filter=r"\.py$")
+ return missing_from(repo_mod_path, install_mod_path, filter=r'\.py$')
def missing_from(path0, path1, filter=None):
- """ Return filenames present in `path0` but not in `path1`
+ """Return filenames present in `path0` but not in `path1`
Parameters
----------
@@ -386,8 +388,7 @@ def missing_from(path0, path1, filter=None):
def check_files(mod_name, repo_path=None, scripts_sdir='bin'):
- """ Print library and script files not picked up during install
- """
+ """Print library and script files not picked up during install"""
if repo_path is None:
repo_path = abspath(os.getcwd())
install_path = tempfile.mkdtemp()
@@ -396,67 +397,60 @@ def check_files(mod_name, repo_path=None, scripts_sdir='bin'):
repo_bin = pjoin(repo_path, 'bin')
installed_bin = pjoin(install_path, 'bin')
try:
- zip_fname = make_dist(repo_path,
- install_path,
- 'sdist --formats=zip',
- '*.zip')
+ zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip')
pf = get_sdist_finder(mod_name)
install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, scripts_sdir)
- lib_misses = missing_from(repo_mod_path, installed_mod_path, r"\.py$")
+ lib_misses = missing_from(repo_mod_path, installed_mod_path, r'\.py$')
script_misses = missing_from(repo_bin, installed_bin)
finally:
shutil.rmtree(install_path)
if lib_misses:
- print("Missed library files: ", ', '.join(lib_misses))
+ print('Missed library files: ', ', '.join(lib_misses))
else:
- print("You got all the library files")
+ print('You got all the library files')
if script_misses:
- print("Missed script files: ", ', '.join(script_misses))
+ print('Missed script files: ', ', '.join(script_misses))
else:
- print("You got all the script files")
+ print('You got all the script files')
return len(lib_misses) > 0 or len(script_misses) > 0
def get_sdist_finder(mod_name):
- """ Return function finding sdist source directory for `mod_name`
- """
+ """Return function finding sdist source directory for `mod_name`"""
+
def pf(pth):
pkg_dirs = glob(pjoin(pth, mod_name + '-*'))
if len(pkg_dirs) != 1:
raise OSError('There must be one and only one package dir')
return pkg_dirs[0]
+
return pf
def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True):
- """ Make sdist zip, install from it, and run tests """
+ """Make sdist zip, install from it, and run tests"""
if repo_path is None:
repo_path = abspath(os.getcwd())
install_path = tempfile.mkdtemp()
try:
- zip_fname = make_dist(repo_path,
- install_path,
- 'sdist --formats=zip',
- '*.zip')
+ zip_fname = make_dist(repo_path, install_path, 'sdist --formats=zip', '*.zip')
pf = get_sdist_finder(mod_name)
install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin')
site_pkgs_path = pjoin(install_path, PY_LIB_SDIR)
script_path = pjoin(install_path, 'bin')
cmd = f"{mod_name}.test(label='{label}', doctests={doctests})"
- stdout, stderr = run_mod_cmd(mod_name,
- site_pkgs_path,
- cmd,
- script_path)
+ stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, script_path)
finally:
shutil.rmtree(install_path)
print(stdout)
print(stderr)
+
sdist_tests.__test__ = False
def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True):
- """ Make bdist_egg, unzip it, and run tests from result
+ """Make bdist_egg, unzip it, and run tests from result
We've got a problem here, because the egg does not contain the scripts, and
so, if we are testing the scripts with ``mod.test()``, we won't pick up the
@@ -472,26 +466,21 @@ def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True):
install_path = tempfile.mkdtemp()
scripts_path = pjoin(install_path, 'bin')
try:
- zip_fname = make_dist(repo_path,
- install_path,
- 'bdist_egg',
- '*.egg')
+ zip_fname = make_dist(repo_path, install_path, 'bdist_egg', '*.egg')
zip_extract_all(zip_fname, install_path)
cmd = f"{mod_name}.test(label='{label}', doctests={doctests})"
- stdout, stderr = run_mod_cmd(mod_name,
- install_path,
- cmd,
- scripts_path)
+ stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, scripts_path)
finally:
shutil.rmtree(install_path)
print(stdout)
print(stderr)
+
bdist_egg_tests.__test__ = False
def make_dist(repo_path, out_dir, setup_params, zipglob):
- """ Create distutils distribution file
+ """Create distutils distribution file
Parameters
----------
@@ -525,8 +514,10 @@ def make_dist(repo_path, out_dir, setup_params, zipglob):
back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}')
zips = glob(pjoin(out_dir, zipglob))
if len(zips) != 1:
- raise OSError(f"There must be one and only one {zipglob} "
- f"file, but I found \"{': '.join(zips)}\"")
+ raise OSError(
+ f'There must be one and only one {zipglob} '
+ f"file, but I found \"{': '.join(zips)}\""
+ )
finally:
os.chdir(pwd)
return zips[0]
diff --git a/nisext/tests/test_sexts.py b/nisext/tests/test_sexts.py
index 22d6ce7a9b..f262ec5685 100644
--- a/nisext/tests/test_sexts.py
+++ b/nisext/tests/test_sexts.py
@@ -1,13 +1,13 @@
-""" Tests for nisexts.sexts module
+"""Tests for nisexts.sexts module
"""
import sys
import types
-from ..sexts import package_check
-
import pytest
+from ..sexts import package_check
+
FAKE_NAME = 'nisext_improbable'
assert FAKE_NAME not in sys.modules
FAKE_MODULE = types.ModuleType('nisext_fake')
@@ -44,10 +44,12 @@ def test_package_check_setuptools():
# If setuptools arg not None, missing package just adds it to arg
with pytest.raises(RuntimeError):
package_check(FAKE_NAME, setuptools_args=None)
+
def pkg_chk_sta(*args, **kwargs):
st_args = {}
package_check(*args, setuptools_args=st_args, **kwargs)
return st_args
+
assert pkg_chk_sta(FAKE_NAME) == {'install_requires': ['nisext_improbable']}
# Check that this gets appended to existing value
old_sta = {'install_requires': ['something']}
@@ -58,7 +60,9 @@ def pkg_chk_sta(*args, **kwargs):
package_check(FAKE_NAME, setuptools_args=old_sta)
assert old_sta == {'install_requires': ['something', 'nisext_improbable']}
# Optional, add to extras_require
- assert pkg_chk_sta(FAKE_NAME, optional='something') == {'extras_require': {'something': ['nisext_improbable']}}
+ assert pkg_chk_sta(FAKE_NAME, optional='something') == {
+ 'extras_require': {'something': ['nisext_improbable']}
+ }
# Check that this gets appended to existing value
old_sta = {'extras_require': {'something': ['amodule']}}
package_check(FAKE_NAME, optional='something', setuptools_args=old_sta)
@@ -66,8 +70,7 @@ def pkg_chk_sta(*args, **kwargs):
# That string gets converted to a list here too
old_sta = {'extras_require': {'something': 'amodule'}}
package_check(FAKE_NAME, optional='something', setuptools_args=old_sta)
- assert old_sta == {'extras_require':
- {'something': ['amodule', 'nisext_improbable']}}
+ assert old_sta == {'extras_require': {'something': ['amodule', 'nisext_improbable']}}
# But optional has to be a string if not empty and setuptools_args defined
with pytest.raises(RuntimeError):
package_check(FAKE_NAME, optional=True, setuptools_args={})
@@ -84,21 +87,20 @@ def pkg_chk_sta(*args, **kwargs):
assert pkg_chk_sta(FAKE_NAME, version='0.3') == {'install_requires': exp_spec}
# Unless optional in which case goes into extras_require
package_check(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2')
- assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == {'extras_require': {'afeature': exp_spec}}
+ assert pkg_chk_sta(FAKE_NAME, version='0.3', optional='afeature') == {
+ 'extras_require': {'afeature': exp_spec}
+ }
# Might do custom version check
- assert pkg_chk_sta(FAKE_NAME,
- version='0.2',
- version_getter=lambda x: '0.2') == {}
+ assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=lambda x: '0.2') == {}
# If the version check fails, put into requires
bad_getter = lambda x: x.not_an_attribute
exp_spec = [FAKE_NAME + '>=0.2']
- assert pkg_chk_sta(FAKE_NAME,
- version='0.2',
- version_getter=bad_getter) == {'install_requires': exp_spec}
+ assert pkg_chk_sta(FAKE_NAME, version='0.2', version_getter=bad_getter) == {
+ 'install_requires': exp_spec
+ }
# Likewise for optional dependency
- assert pkg_chk_sta(FAKE_NAME,
- version='0.2',
- optional='afeature',
- version_getter=bad_getter) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}}
+ assert pkg_chk_sta(
+ FAKE_NAME, version='0.2', optional='afeature', version_getter=bad_getter
+ ) == {'extras_require': {'afeature': [FAKE_NAME + '>=0.2']}}
finally:
del sys.modules[FAKE_NAME]
diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py
index c2e9e021f4..f81a40f1df 100644
--- a/nisext/tests/test_testers.py
+++ b/nisext/tests/test_testers.py
@@ -1,19 +1,19 @@
-""" Tests for testers
+"""Tests for testers
"""
import os
from os.path import dirname, pathsep
-from ..testers import back_tick, run_mod_cmd, PYTHON
-
import pytest
+from ..testers import PYTHON, back_tick, run_mod_cmd
+
def test_back_tick():
cmd = f'{PYTHON} -c "print(\'Hello\')"'
- assert back_tick(cmd) == "Hello"
- assert back_tick(cmd, ret_err=True) == ("Hello", "")
- assert back_tick(cmd, True, False) == (b"Hello", b"")
+ assert back_tick(cmd) == 'Hello'
+ assert back_tick(cmd, ret_err=True) == ('Hello', '')
+ assert back_tick(cmd, True, False) == (b'Hello', b'')
cmd = f'{PYTHON} -c "raise ValueError()"'
with pytest.raises(RuntimeError):
back_tick(cmd)
@@ -22,7 +22,7 @@ def test_back_tick():
def test_run_mod_cmd():
mod = 'os'
mod_dir = dirname(os.__file__)
- assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ("Hello", "")
+ assert run_mod_cmd(mod, mod_dir, "print('Hello')", None, False) == ('Hello', '')
sout, serr = run_mod_cmd(mod, mod_dir, "print('Hello again')")
assert serr == ''
mod_file, out_str = [s.strip() for s in sout.split('\n')]
diff --git a/pyproject.toml b/pyproject.toml
index 3e1ba04449..f98e03119f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -84,14 +84,15 @@ nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"]
[tool.blue]
line_length = 99
target-version = ["py37"]
-extend-exclude = """
+force-exclude = """
(
_version.py
| nibabel/externals/
+ | versioneer.py
)
"""
[tool.isort]
profile = "black"
line_length = 99
-extend_skip = ["_version.py", "externals"]
+extend_skip = ["_version.py", "externals", "versioneer.py"]
diff --git a/setup.py b/setup.py
index 4b9bde35b0..2a2f8d8e21 100755
--- a/setup.py
+++ b/setup.py
@@ -21,10 +21,10 @@
pip install --upgrade build
python -m build
-
"""
from setuptools import setup
+
import versioneer
setup(
diff --git a/tools/bisect_nose.py b/tools/bisect_nose.py
index 3f9092564b..7036e0b9cc 100755
--- a/tools/bisect_nose.py
+++ b/tools/bisect_nose.py
@@ -1,9 +1,8 @@
#!/usr/bin/env python
-""" Utility for git-bisecting nose failures
+"""Utility for git-bisecting nose failures
"""
DESCRIP = 'Check nose output for given text, set sys exit for git bisect'
-EPILOG = \
-"""
+EPILOG = """
Imagine you've just detected a nose test failure. The failure is in a
particular test or test module - here 'test_analyze.py'. The failure *is* in
git branch ``main-master`` but it *is not* in tag ``v1.6.1``. Then you can
@@ -37,14 +36,13 @@
on the python path.
"""
import os
-import sys
+import re
import shutil
+import sys
import tempfile
-import re
-from functools import partial
-from subprocess import check_call, Popen, PIPE, CalledProcessError
-
from argparse import ArgumentParser, RawDescriptionHelpFormatter
+from functools import partial
+from subprocess import PIPE, CalledProcessError, Popen, check_call
caller = partial(check_call, shell=True)
popener = partial(Popen, stdout=PIPE, stderr=PIPE, shell=True)
@@ -63,31 +61,27 @@ def call_or_untestable(cmd):
def main():
- parser = ArgumentParser(description=DESCRIP,
- epilog=EPILOG,
- formatter_class=RawDescriptionHelpFormatter)
- parser.add_argument('test_path', type=str,
- help='Path to test')
- parser.add_argument('--error-txt', type=str,
- help='regular expression for error of interest')
- parser.add_argument('--clean', action='store_true',
- help='Clean git tree before running tests')
- parser.add_argument('--build', action='store_true',
- help='Build git tree before running tests')
+ parser = ArgumentParser(
+ description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter
+ )
+ parser.add_argument('test_path', type=str, help='Path to test')
+ parser.add_argument('--error-txt', type=str, help='regular expression for error of interest')
+ parser.add_argument('--clean', action='store_true', help='Clean git tree before running tests')
+ parser.add_argument('--build', action='store_true', help='Build git tree before running tests')
# parse the command line
args = parser.parse_args()
path = os.path.abspath(args.test_path)
if args.clean:
- print("Cleaning")
+ print('Cleaning')
call_or_untestable('git clean -fxd')
if args.build:
- print("Building")
+ print('Building')
call_or_untestable('python setup.py build_ext -i')
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
- print("Testing")
+ print('Testing')
proc = popener('nosetests ' + path)
stdout, stderr = proc.communicate()
finally:
diff --git a/tools/ci/activate.sh b/tools/ci/activate.sh
index ebef3b650b..567e13a67b 100644
--- a/tools/ci/activate.sh
+++ b/tools/ci/activate.sh
@@ -7,4 +7,3 @@ else
ls -R virtenv
false
fi
-
diff --git a/tools/dicomfs.wsgi b/tools/dicomfs.wsgi
index a5f9d30984..bd2480a647 100644
--- a/tools/dicomfs.wsgi
+++ b/tools/dicomfs.wsgi
@@ -8,11 +8,11 @@
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# Copyright (C) 2011 Christian Haselgrove
+import cgi
import sys
import traceback
-from functools import partial
import urllib
-import cgi
+from functools import partial
import jinja2
@@ -24,7 +24,7 @@ BASE_DIR = None
# default setting for whether to follow symlinks in BASE_DIR. Python 2.5 only
# accepts False for this setting, Python >= 2.6 accepts True or False
-FOLLOWLINKS=False
+FOLLOWLINKS = False
# Define routine to get studies
studies_getter = partial(dft.get_studies, followlinks=FOLLOWLINKS)
@@ -118,17 +118,18 @@ Study comments: {{ study.comments }}