Skip to content

Update lint.yml

Update lint.yml #2

GitHub Actions / Black failed Nov 6, 2024 in 0s

23 errors

Black found 23 errors

Annotations

Check failure on line 48 in /home/runner/work/linc-convert/linc-convert/scripts/utils.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/utils.py#L14-L48

     Returns
     -------
     orientation : str
         A three-letter permutation of {('R', 'L'), ('A', 'P'), ('S', 'I')}
     """
-    orientation = {
-        'coronal': 'LI',
-        'axial': 'LP',
-        'sagittal': 'PI',
-    }.get(orientation.lower(), orientation).upper()
+    orientation = (
+        {
+            "coronal": "LI",
+            "axial": "LP",
+            "sagittal": "PI",
+        }
+        .get(orientation.lower(), orientation)
+        .upper()
+    )
     if len(orientation) == 2:
-        if 'L' not in orientation and 'R' not in orientation:
-            orientation += 'R'
-        if 'P' not in orientation and 'A' not in orientation:
-            orientation += 'A'
-        if 'I' not in orientation and 'S' not in orientation:
-            orientation += 'S'
+        if "L" not in orientation and "R" not in orientation:
+            orientation += "R"
+        if "P" not in orientation and "A" not in orientation:
+            orientation += "A"
+        if "I" not in orientation and "S" not in orientation:
+            orientation += "S"
     return orientation
 
 
 def orientation_to_affine(orientation, vxw=1, vxh=1, vxd=1):
     orientation = orientation_ensure_3d(orientation)
     affine = np.zeros([4, 4])
     vx = np.asarray([vxw, vxh, vxd])
     for i in range(3):
         letter = orientation[i]
-        sign = -1 if letter in 'LPI' else 1
-        letter = {'L': 'R', 'P': 'A', 'I': 'S'}.get(letter, letter)
-        index = list('RAS').index(letter)
+        sign = -1 if letter in "LPI" else 1
+        letter = {"L": "R", "P": "A", "I": "S"}.get(letter, letter)
+        index = list("RAS").index(letter)
         affine[index, i] = sign * vx[i]
     return affine
 
 
 def center_affine(affine, shape):

Check failure on line 282 in /home/runner/work/linc-convert/linc-convert/scripts/utils.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/utils.py#L62-L282

 
 def make_compressor(name, **prm):
     if not isinstance(name, str):
         return name
     name = name.lower()
-    if name == 'blosc':
+    if name == "blosc":
         Compressor = numcodecs.Blosc
-    elif name == 'zlib':
+    elif name == "zlib":
         Compressor = numcodecs.Zlib
     else:
-        raise ValueError('Unknown compressor', name)
+        raise ValueError("Unknown compressor", name)
     return Compressor(**prm)
 
 
 ome_valid_units = {
-    'space': [
-        'angstrom',
-        'attometer',
-        'centimeter',
-        'decimeter',
-        'exameter',
-        'femtometer',
-        'foot',
-        'gigameter',
-        'hectometer',
-        'inch',
-        'kilometer',
-        'megameter',
-        'meter',
-        'micrometer',
-        'mile',
-        'millimeter',
-        'nanometer',
-        'parsec',
-        'petameter',
-        'picometer',
-        'terameter',
-        'yard',
-        'yoctometer',
-        'yottameter',
-        'zeptometer',
-        'zettameter',
+    "space": [
+        "angstrom",
+        "attometer",
+        "centimeter",
+        "decimeter",
+        "exameter",
+        "femtometer",
+        "foot",
+        "gigameter",
+        "hectometer",
+        "inch",
+        "kilometer",
+        "megameter",
+        "meter",
+        "micrometer",
+        "mile",
+        "millimeter",
+        "nanometer",
+        "parsec",
+        "petameter",
+        "picometer",
+        "terameter",
+        "yard",
+        "yoctometer",
+        "yottameter",
+        "zeptometer",
+        "zettameter",
     ],
-    'time': [
-        'attosecond',
-        'centisecond',
-        'day',
-        'decisecond',
-        'exasecond',
-        'femtosecond',
-        'gigasecond',
-        'hectosecond',
-        'hour',
-        'kilosecond',
-        'megasecond',
-        'microsecond',
-        'millisecond',
-        'minute',
-        'nanosecond',
-        'petasecond',
-        'picosecond',
-        'second',
-        'terasecond',
-        'yoctosecond',
-        'yottasecond',
-        'zeptosecond',
-        'zettasecond',
-    ]
+    "time": [
+        "attosecond",
+        "centisecond",
+        "day",
+        "decisecond",
+        "exasecond",
+        "femtosecond",
+        "gigasecond",
+        "hectosecond",
+        "hour",
+        "kilosecond",
+        "megasecond",
+        "microsecond",
+        "millisecond",
+        "minute",
+        "nanosecond",
+        "petasecond",
+        "picosecond",
+        "second",
+        "terasecond",
+        "yoctosecond",
+        "yottasecond",
+        "zeptosecond",
+        "zettasecond",
+    ],
 }
 
 nifti_valid_units = [
-    'unknown',
-    'meter',
-    'mm',
-    'micron',
-    'sec',
-    'msec',
-    'usec',
-    'hz',
-    'ppm',
-    'rads',
+    "unknown",
+    "meter",
+    "mm",
+    "micron",
+    "sec",
+    "msec",
+    "usec",
+    "hz",
+    "ppm",
+    "rads",
 ]
 
 si_prefix_short2long = {
-    'Q': 'quetta',
-    'R': 'ronna',
-    'Y': 'yotta',
-    'Z': 'zetta',
-    'E': 'exa',
-    'P': 'peta',
-    'T': 'tera',
-    'G': 'giga',
-    'M': 'mega',
-    'K': 'kilo',
-    'k': 'kilo',
-    'H': 'hecto',
-    'h': 'hecto',
-    'D': 'deca',
-    'da': 'deca',
-    'd': 'deci',
-    'c': 'centi',
-    'm': 'milli',
-    'u': 'micro',
-    'μ': 'micro',
-    'n': 'nano',
-    'p': 'pico',
-    'f': 'femto',
-    'a': 'atto',
-    'z': 'zepto',
-    'y': 'yocto',
-    'r': 'ronto',
-    'q': 'quecto',
-}
-
-si_prefix_long2short = {
-    long: short
-    for short, long in si_prefix_short2long.items()
-}
+    "Q": "quetta",
+    "R": "ronna",
+    "Y": "yotta",
+    "Z": "zetta",
+    "E": "exa",
+    "P": "peta",
+    "T": "tera",
+    "G": "giga",
+    "M": "mega",
+    "K": "kilo",
+    "k": "kilo",
+    "H": "hecto",
+    "h": "hecto",
+    "D": "deca",
+    "da": "deca",
+    "d": "deci",
+    "c": "centi",
+    "m": "milli",
+    "u": "micro",
+    "μ": "micro",
+    "n": "nano",
+    "p": "pico",
+    "f": "femto",
+    "a": "atto",
+    "z": "zepto",
+    "y": "yocto",
+    "r": "ronto",
+    "q": "quecto",
+}
+
+si_prefix_long2short = {long: short for short, long in si_prefix_short2long.items()}
 
 
 si_prefix_exponent = {
-    'Q': 30,
-    'R': 27,
-    'Y': 24,
-    'Z': 21,
-    'E': 18,
-    'P': 15,
-    'T': 12,
-    'G': 9,
-    'M': 6,
-    'K': 3,
-    'k': 3,
-    'H': 2,
-    'h': 2,
-    'D': 1,
-    'da': 1,
-    '': 0,
-    'd': -1,
-    'c': -2,
-    'm': -3,
-    'u': -6,
-    'μ': -6,
-    'n': -9,
-    'p': -12,
-    'f': -15,
-    'a': -18,
-    'z': -21,
-    'y': -24,
-    'r': -27,
-    'q': -30,
+    "Q": 30,
+    "R": 27,
+    "Y": 24,
+    "Z": 21,
+    "E": 18,
+    "P": 15,
+    "T": 12,
+    "G": 9,
+    "M": 6,
+    "K": 3,
+    "k": 3,
+    "H": 2,
+    "h": 2,
+    "D": 1,
+    "da": 1,
+    "": 0,
+    "d": -1,
+    "c": -2,
+    "m": -3,
+    "u": -6,
+    "μ": -6,
+    "n": -9,
+    "p": -12,
+    "f": -15,
+    "a": -18,
+    "z": -21,
+    "y": -24,
+    "r": -27,
+    "q": -30,
 }
 
 
 unit_space_short2long = {
-    short + 'm': long + 'meter'
-    for short, long in si_prefix_short2long.items()
-}
-unit_space_short2long.update({
-    'm': 'meter',
-    'mi': 'mile',
-    'yd': 'yard',
-    'ft': 'foot',
-    'in': 'inch',
-    "'": 'foot',
-    '"': 'inch',
-    'Å': 'angstrom',
-    'pc': 'parsec',
-})
-unit_space_long2short = {
-    long: short
-    for short, long in unit_space_short2long.items()
-}
-unit_space_long2short['micron'] = 'u'
+    short + "m": long + "meter" for short, long in si_prefix_short2long.items()
+}
+unit_space_short2long.update(
+    {
+        "m": "meter",
+        "mi": "mile",
+        "yd": "yard",
+        "ft": "foot",
+        "in": "inch",
+        "'": "foot",
+        '"': "inch",
+        "Å": "angstrom",
+        "pc": "parsec",
+    }
+)
+unit_space_long2short = {long: short for short, long in unit_space_short2long.items()}
+unit_space_long2short["micron"] = "u"
 
 unit_time_short2long = {
-    short + 's': long + 'second'
-    for short, long in si_prefix_short2long.items()
-}
-unit_time_short2long.update({
-    'y': 'year',
-    'd': 'day',
-    'h': 'hour',
-    'm': 'minute',
-    's': 'second',
-})
-unit_time_long2short = {
-    long: short
-    for short, long in unit_time_short2long.items()
-}
+    short + "s": long + "second" for short, long in si_prefix_short2long.items()
+}
+unit_time_short2long.update(
+    {
+        "y": "year",
+        "d": "day",
+        "h": "hour",
+        "m": "minute",
+        "s": "second",
+    }
+)
+unit_time_long2short = {long: short for short, long in unit_time_short2long.items()}
 
 unit_space_scale = {
-    prefix + 'm': 10**exponent
-    for prefix, exponent in si_prefix_exponent.items()
-}
-unit_space_scale.update({
-    'mi': 1609.344,
-    'yd': 0.9144,
-    'ft': 0.3048,
-    "'": 0.3048,
-    'in': 25.4E-3,
-    '"': 25.4E-3,
-    'Å': 1E-10,
-    'pc': 3.0857E16,
-})
+    prefix + "m": 10**exponent for prefix, exponent in si_prefix_exponent.items()
+}
+unit_space_scale.update(
+    {
+        "mi": 1609.344,
+        "yd": 0.9144,
+        "ft": 0.3048,
+        "'": 0.3048,
+        "in": 25.4e-3,
+        '"': 25.4e-3,
+        "Å": 1e-10,
+        "pc": 3.0857e16,
+    }
+)
 
 unit_time_scale = {
-    prefix + 's': 10**exponent
-    for prefix, exponent in si_prefix_exponent.items()
-}
-unit_time_scale.update({
-    'y': 365.25*24*60*60,
-    'd': 24*60*60,
-    'h': 60*60,
-    'm': 60,
-})
+    prefix + "s": 10**exponent for prefix, exponent in si_prefix_exponent.items()
+}
+unit_time_scale.update(
+    {
+        "y": 365.25 * 24 * 60 * 60,
+        "d": 24 * 60 * 60,
+        "h": 60 * 60,
+        "m": 60,
+    }
+)
 
 
 def convert_unit(value, src, dst):
     src = unit_to_scale(src)
     dst = unit_to_scale(dst)

Check failure on line 312 in /home/runner/work/linc-convert/linc-convert/scripts/utils.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/utils.py#L287-L312

         unit = unit_space_short2long[unit]
     elif unit in unit_time_short2long:
         unit = unit_time_short2long[unit]
     elif unit in si_prefix_short2long:
         unit = si_prefix_short2long[unit]
-    if unit not in (*ome_valid_units['space'], *ome_valid_units['time']):
-        raise ValueError('Unknow unit')
+    if unit not in (*ome_valid_units["space"], *ome_valid_units["time"]):
+        raise ValueError("Unknow unit")
     return unit
 
 
 def to_nifti_unit(unit):
     unit = to_ome_unit(unit)
     return {
-        'meter': 'meter',
-        'millimeter': 'mm',
-        'micrometer': 'micron',
-        'second': 'sec',
-        'millisecond': 'msec',
-        'microsecond': 'usec',
-    }.get(unit, 'unknown')
+        "meter": "meter",
+        "millimeter": "mm",
+        "micrometer": "micron",
+        "second": "sec",
+        "millisecond": "msec",
+        "microsecond": "usec",
+    }.get(unit, "unknown")
 
 
 def unit_to_scale(unit):
     if unit in unit_space_long2short:
         unit = unit_space_long2short[unit]

Check failure on line 325 in /home/runner/work/linc-convert/linc-convert/scripts/utils.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/utils.py#L318-L325

     elif unit in unit_time_scale:
         unit = unit_time_scale[unit]
     elif unit in si_prefix_exponent:
         unit = 10 ** si_prefix_exponent[unit]
     if isinstance(unit, str):
-        raise ValueError('Unknown unit', unit)
+        raise ValueError("Unknown unit", unit)
     return unit

Check failure on line 20 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L10-L20

     scipy
     zarr
     nibabel
     cyclopts
 """
+
 import ast
 import json
 import math
 import os
 import re

Check failure on line 41 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L29-L41

 import numpy as np
 import zarr
 from scipy.io import loadmat
 
 from utils import (
-    ceildiv, make_compressor, convert_unit, to_ome_unit, to_nifti_unit,
-    orientation_to_affine, center_affine
+    ceildiv,
+    make_compressor,
+    convert_unit,
+    to_ome_unit,
+    to_nifti_unit,
+    orientation_to_affine,
+    center_affine,
 )
 
 app = cyclopts.App(help_format="markdown")
 
 

Check failure on line 78 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L43-L78

 
     @wraps(func)
     def wrapper(inp, out=None, **kwargs):
         if out is None:
             out = os.path.splitext(inp[0])[0]
-            out += '.nii.zarr' if kwargs.get('nii', False) else '.ome.zarr'
-        kwargs['nii'] = kwargs.get('nii', False) or out.endswith('.nii.zarr')
-        with mapmat(inp, kwargs.get('key', None)) as dat:
+            out += ".nii.zarr" if kwargs.get("nii", False) else ".ome.zarr"
+        kwargs["nii"] = kwargs.get("nii", False) or out.endswith(".nii.zarr")
+        with mapmat(inp, kwargs.get("key", None)) as dat:
             return func(dat, out, **kwargs)
 
     return wrapper
 
 
 @app.default
 @automap
 def convert(
-        inp: List[str],
-        out: Optional[str] = None,
-        *,
-        key: Optional[str] = None,
-        meta: str = None,
-        chunk: int = 128,
-        compressor: str = 'blosc',
-        compressor_opt: str = "{}",
-        max_load: int = 128,
-        max_levels: int = 5,
-        no_pool: Optional[int] = None,
-        nii: bool = False,
-        orientation: str = 'RAS',
-        center: bool = True,
+    inp: List[str],
+    out: Optional[str] = None,
+    *,
+    key: Optional[str] = None,
+    meta: str = None,
+    chunk: int = 128,
+    compressor: str = "blosc",
+    compressor_opt: str = "{}",
+    max_load: int = 128,
+    max_levels: int = 5,
+    no_pool: Optional[int] = None,
+    nii: bool = False,
+    orientation: str = "RAS",
+    center: bool = True,
 ):
     """
     This command converts OCT volumes stored in raw matlab files
     into a pyramidal OME-ZARR (or NIfTI-Zarr) hierarchy.
 

Check failure on line 130 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L108-L130

     if isinstance(compressor_opt, str):
         compressor_opt = ast.literal_eval(compressor_opt)
 
     # Write OME-Zarr multiscale metadata
     if meta:
-        print('Write JSON')
-        with open(meta, 'r') as f:
+        print("Write JSON")
+        with open(meta, "r") as f:
             meta_txt = f.read()
             meta_json = make_json(meta_txt)
-        path_json = '.'.join(out.split('.')[:-2]) + '.json'
-        with open(path_json, 'w') as f:
+        path_json = ".".join(out.split(".")[:-2]) + ".json"
+        with open(path_json, "w") as f:
             json.dump(meta_json, f, indent=4)
-        vx = meta_json['PixelSize']
-        unit = meta_json['PixelSizeUnits']
+        vx = meta_json["PixelSize"]
+        unit = meta_json["PixelSizeUnits"]
     else:
         vx = [1] * 3
-        unit = 'um'
+        unit = "um"
 
     # Prepare Zarr group
     omz = zarr.storage.DirectoryStore(out)
     omz = zarr.group(store=omz, overwrite=True)
 

Check failure on line 342 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L131-L342

         raise Exception("Input is not a numpy array. This is likely unexpected")
     if len(inp.shape) < 3:
         raise Exception("Input array is not 3d")
     # Prepare chunking options
     opt = {
-        'dimension_separator': r'/',
-        'order': 'F',
-        'dtype': np.dtype(inp.dtype).str,
-        'fill_value': None,
-        'compressor': make_compressor(compressor, **compressor_opt),
+        "dimension_separator": r"/",
+        "order": "F",
+        "dtype": np.dtype(inp.dtype).str,
+        "fill_value": None,
+        "compressor": make_compressor(compressor, **compressor_opt),
     }
 
     inp_chunk = [min(x, max_load) for x in inp.shape]
     nk = ceildiv(inp.shape[0], inp_chunk[0])
     nj = ceildiv(inp.shape[1], inp_chunk[1])
     ni = ceildiv(inp.shape[2], inp_chunk[2])
 
-    nblevels = min([
-        int(math.ceil(math.log2(x)))
-        for i, x in enumerate(inp.shape)
-        if i != no_pool
-    ])
+    nblevels = min(
+        [int(math.ceil(math.log2(x))) for i, x in enumerate(inp.shape) if i != no_pool]
+    )
     nblevels = min(nblevels, int(math.ceil(math.log2(max_load))))
     nblevels = min(nblevels, max_levels)
 
     # create all arrays in the group
     shape_level = inp.shape
     for level in range(nblevels):
-        opt['chunks'] = [min(x, chunk) for x in shape_level]
+        opt["chunks"] = [min(x, chunk) for x in shape_level]
         omz.create_dataset(str(level), shape=shape_level, **opt)
-        shape_level = [
-            x if i == no_pool else x // 2
-            for i, x in enumerate(shape_level)
-        ]
+        shape_level = [x if i == no_pool else x // 2 for i, x in enumerate(shape_level)]
 
     # iterate across input chunks
     for i, j, k in product(range(ni), range(nj), range(nk)):
 
         level_chunk = inp_chunk
         loaded_chunk = inp[
-                       k * level_chunk[0]:(k + 1) * level_chunk[0],
-                       j * level_chunk[1]:(j + 1) * level_chunk[1],
-                       i * level_chunk[2]:(i + 1) * level_chunk[2],
-                       ]
+            k * level_chunk[0] : (k + 1) * level_chunk[0],
+            j * level_chunk[1] : (j + 1) * level_chunk[1],
+            i * level_chunk[2] : (i + 1) * level_chunk[2],
+        ]
 
         for level in range(nblevels):
             out_level = omz[str(level)]
 
-            print(f'[{i + 1:03d}, {j + 1:03d}, {k + 1:03d}]', '/',
-                  f'[{ni:03d}, {nj:03d}, {nk:03d}]',
-                  f'({1 + level}/{nblevels})', end='\r')
+            print(
+                f"[{i + 1:03d}, {j + 1:03d}, {k + 1:03d}]",
+                "/",
+                f"[{ni:03d}, {nj:03d}, {nk:03d}]",
+                f"({1 + level}/{nblevels})",
+                end="\r",
+            )
 
             # save current chunk
             out_level[
-            k * level_chunk[0]:k * level_chunk[0] + loaded_chunk.shape[0],
-            j * level_chunk[1]:j * level_chunk[1] + loaded_chunk.shape[1],
-            i * level_chunk[2]:i * level_chunk[2] + loaded_chunk.shape[2],
+                k * level_chunk[0] : k * level_chunk[0] + loaded_chunk.shape[0],
+                j * level_chunk[1] : j * level_chunk[1] + loaded_chunk.shape[1],
+                i * level_chunk[2] : i * level_chunk[2] + loaded_chunk.shape[2],
             ] = loaded_chunk
             # ensure divisible by 2
             loaded_chunk = loaded_chunk[
                 slice(2 * (loaded_chunk.shape[0] // 2) if 0 != no_pool else None),
                 slice(2 * (loaded_chunk.shape[1] // 2) if 1 != no_pool else None),
                 slice(2 * (loaded_chunk.shape[2] // 2) if 2 != no_pool else None),
             ]
             # mean pyramid (average each 2x2x2 patch)
             if no_pool == 0:
                 loaded_chunk = (
-                    loaded_chunk[:, 0::2, 0::2] +
-                    loaded_chunk[:, 0::2, 1::2] +
-                    loaded_chunk[:, 1::2, 0::2] +
-                    loaded_chunk[:, 1::2, 1::2]
+                    loaded_chunk[:, 0::2, 0::2]
+                    + loaded_chunk[:, 0::2, 1::2]
+                    + loaded_chunk[:, 1::2, 0::2]
+                    + loaded_chunk[:, 1::2, 1::2]
                 ) / 4
             elif no_pool == 1:
                 loaded_chunk = (
-                    loaded_chunk[0::2, :, 0::2] +
-                    loaded_chunk[0::2, :, 1::2] +
-                    loaded_chunk[1::2, :, 0::2] +
-                    loaded_chunk[1::2, :, 1::2]
+                    loaded_chunk[0::2, :, 0::2]
+                    + loaded_chunk[0::2, :, 1::2]
+                    + loaded_chunk[1::2, :, 0::2]
+                    + loaded_chunk[1::2, :, 1::2]
                 ) / 4
             elif no_pool == 2:
                 loaded_chunk = (
-                    loaded_chunk[0::2, 0::2, :] +
-                    loaded_chunk[0::2, 1::2, :] +
-                    loaded_chunk[1::2, 0::2, :] +
-                    loaded_chunk[1::2, 1::2, :]
+                    loaded_chunk[0::2, 0::2, :]
+                    + loaded_chunk[0::2, 1::2, :]
+                    + loaded_chunk[1::2, 0::2, :]
+                    + loaded_chunk[1::2, 1::2, :]
                 ) / 4
             else:
                 loaded_chunk = (
-                    loaded_chunk[0::2, 0::2, 0::2] +
-                    loaded_chunk[0::2, 0::2, 1::2] +
-                    loaded_chunk[0::2, 1::2, 0::2] +
-                    loaded_chunk[0::2, 1::2, 1::2] +
-                    loaded_chunk[1::2, 0::2, 0::2] +
-                    loaded_chunk[1::2, 0::2, 1::2] +
-                    loaded_chunk[1::2, 1::2, 0::2] +
-                    loaded_chunk[1::2, 1::2, 1::2]
+                    loaded_chunk[0::2, 0::2, 0::2]
+                    + loaded_chunk[0::2, 0::2, 1::2]
+                    + loaded_chunk[0::2, 1::2, 0::2]
+                    + loaded_chunk[0::2, 1::2, 1::2]
+                    + loaded_chunk[1::2, 0::2, 0::2]
+                    + loaded_chunk[1::2, 0::2, 1::2]
+                    + loaded_chunk[1::2, 1::2, 0::2]
+                    + loaded_chunk[1::2, 1::2, 1::2]
                 ) / 8
             level_chunk = [
-                x if i == no_pool else x // 2
-                for i, x in enumerate(level_chunk)
+                x if i == no_pool else x // 2 for i, x in enumerate(level_chunk)
             ]
-    print('')
+    print("")
 
     # Write OME-Zarr multiscale metadata
-    print('Write metadata')
+    print("Write metadata")
     print(unit)
     ome_unit = to_ome_unit(unit)
-    multiscales = [{
-        'version': '0.4',
-        'axes': [
-            {"name": "z", "type": "space", "unit": ome_unit},
-            {"name": "y", "type": "space", "unit": ome_unit},
-            {"name": "x", "type": "space", "unit": ome_unit}
-        ],
-        'datasets': [],
-        'type': ('2x2x2' if no_pool is None else '2x2') + 'mean window',
-        'name': '',
-    }]
+    multiscales = [
+        {
+            "version": "0.4",
+            "axes": [
+                {"name": "z", "type": "space", "unit": ome_unit},
+                {"name": "y", "type": "space", "unit": ome_unit},
+                {"name": "x", "type": "space", "unit": ome_unit},
+            ],
+            "datasets": [],
+            "type": ("2x2x2" if no_pool is None else "2x2") + "mean window",
+            "name": "",
+        }
+    ]
 
     for n in range(nblevels):
-        multiscales[0]['datasets'].append({})
-        level = multiscales[0]['datasets'][-1]
+        multiscales[0]["datasets"].append({})
+        level = multiscales[0]["datasets"][-1]
         level["path"] = str(n)
 
         # With a moving window, the scaling factor is exactly 2, and
         # the edges of the top-left voxel are aligned
         level["coordinateTransformations"] = [
             {
                 "type": "scale",
                 "scale": [
-                    (1 if no_pool == 0 else 2 ** n) * vx[0],
-                    (1 if no_pool == 1 else 2 ** n) * vx[1],
-                    (1 if no_pool == 2 else 2 ** n) * vx[2],
-                ]
+                    (1 if no_pool == 0 else 2**n) * vx[0],
+                    (1 if no_pool == 1 else 2**n) * vx[1],
+                    (1 if no_pool == 2 else 2**n) * vx[2],
+                ],
             },
             {
                 "type": "translation",
                 "translation": [
-                    (0 if no_pool == 0 else (2 ** n - 1)) * vx[0] * 0.5,
-                    (0 if no_pool == 1 else (2 ** n - 1)) * vx[1] * 0.5,
-                    (0 if no_pool == 2 else (2 ** n - 1)) * vx[2] * 0.5,
-                ]
-            }
+                    (0 if no_pool == 0 else (2**n - 1)) * vx[0] * 0.5,
+                    (0 if no_pool == 1 else (2**n - 1)) * vx[1] * 0.5,
+                    (0 if no_pool == 2 else (2**n - 1)) * vx[2] * 0.5,
+                ],
+            },
         ]
     multiscales[0]["coordinateTransformations"] = [
-        {
-            "scale": [1.0] * 3,
-            "type": "scale"
-        }
+        {"scale": [1.0] * 3, "type": "scale"}
     ]
     omz.attrs["multiscales"] = multiscales
 
     if not nii:
-        print('done.')
+        print("done.")
         return
 
     # Write NIfTI-Zarr header
     # NOTE: we use nifti2 because dimensions typically do not fit in a short
     # TODO: we do not write the json zattrs, but it should be added in
     #       once the nifti-zarr package is released
-    shape = list(reversed(omz['0'].shape))
+    shape = list(reversed(omz["0"].shape))
     affine = orientation_to_affine(orientation, *vx[::-1])
     if center:
         affine = center_affine(affine, shape[:3])
     header = nib.Nifti2Header()
     header.set_data_shape(shape)
-    header.set_data_dtype(omz['0'].dtype)
+    header.set_data_dtype(omz["0"].dtype)
     header.set_qform(affine)
     header.set_sform(affine)
     header.set_xyzt_units(nib.nifti1.unit_codes.code[to_nifti_unit(unit)])
-    header.structarr['magic'] = b'nz2\0'
-    header = np.frombuffer(header.structarr.tobytes(), dtype='u1')
+    header.structarr["magic"] = b"nz2\0"
+    header = np.frombuffer(header.structarr.tobytes(), dtype="u1")
     opt = {
-        'chunks': [len(header)],
-        'dimension_separator': r'/',
-        'order': 'F',
-        'dtype': '|u1',
-        'fill_value': None,
-        'compressor': None,
+        "chunks": [len(header)],
+        "dimension_separator": r"/",
+        "order": "F",
+        "dtype": "|u1",
+        "fill_value": None,
+        "compressor": None,
     }
-    omz.create_dataset('nifti', data=header, shape=shape, **opt)
-    print('done.')
+    omz.create_dataset("nifti", data=header, shape=shape, **opt)
+    print("done.")
 
 
 @contextmanager
 def mapmat(fnames, key=None):
     """Load or memory-map an array stored in a .mat file"""
     loaded_data = []
 
     for fname in fnames:
         try:
             # "New" .mat file
-            f = h5py.File(fname, 'r')
+            f = h5py.File(fname, "r")
         except Exception:
             # "Old" .mat file
             f = loadmat(fname)
         if key is None:
             if len(f.keys()) > 1:
-                warn(f'More than one key in .mat file {fname}, arbitrarily loading "{f.keys[0]}"')
+                warn(
+                    f'More than one key in .mat file {fname}, arbitrarily loading "{f.keys[0]}"'
+                )
             key = f.keys()[0]
         if key not in f.keys():
             raise Exception(f"Key {key} not found in file {fname}")
 
         if len(fnames) == 1:
             yield f.get(key)
-            if hasattr(f, 'close'):
+            if hasattr(f, "close"):
                 f.close()
             break
         loaded_data.append(f.get(key))
 
     yield np.stack(loaded_data, axis=-1)

Check failure on line 437 in /home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/oct_mat_to_zarr.py#L359-L437

     Slice #:23
     Modality: dBI
     """
 
     def parse_value_unit(string, n=None):
-        number = r'-?(\d+\.?\d*|\d*\.?\d+)(E-?\d+)?'
-        value = 'x'.join([number] * (n or 1))
-        match = re.fullmatch(r'(?P<value>' + value + r')(?P<unit>\w*)', string)
-        value, unit = match.group('value'), match.group('unit')
-        value = list(map(float, value.split('x')))
+        number = r"-?(\d+\.?\d*|\d*\.?\d+)(E-?\d+)?"
+        value = "x".join([number] * (n or 1))
+        match = re.fullmatch(r"(?P<value>" + value + r")(?P<unit>\w*)", string)
+        value, unit = match.group("value"), match.group("unit")
+        value = list(map(float, value.split("x")))
         if n is None:
             value = value[0]
         return value, unit
 
     meta = {
-        'BodyPart': 'BRAIN',
-        'Environment': 'exvivo',
-        'SampleStaining': 'none',
+        "BodyPart": "BRAIN",
+        "Environment": "exvivo",
+        "SampleStaining": "none",
     }
 
-    for line in oct_meta.split('\n'):
-        if ':' not in line:
+    for line in oct_meta.split("\n"):
+        if ":" not in line:
             continue
 
-        key, value = line.split(':')
+        key, value = line.split(":")
         key, value = key.strip(), value.strip()
 
-        if key == 'Image medium':
+        if key == "Image medium":
             parts = value.split()
-            if 'TDE' in parts:
-                parts[parts.index('TDE')] = "2,2' Thiodiethanol (TDE)"
-            meta['SampleMedium'] = ' '.join(parts)
-
-        elif key == 'Center Wavelength':
-            value, unit = parse_value_unit(value)
-            meta['Wavelength'] = value
-            meta['WavelengthUnit'] = unit
-
-        elif key == 'Axial resolution':
-            value, unit = parse_value_unit(value)
-            meta['ResolutionAxial'] = value
-            meta['ResolutionAxialUnit'] = unit
-
-        elif key == 'Lateral resolution':
-            value, unit = parse_value_unit(value)
-            meta['ResolutionLateral'] = value
-            meta['ResolutionLateralUnit'] = unit
-
-        elif key == 'Voxel size':
+            if "TDE" in parts:
+                parts[parts.index("TDE")] = "2,2' Thiodiethanol (TDE)"
+            meta["SampleMedium"] = " ".join(parts)
+
+        elif key == "Center Wavelength":
+            value, unit = parse_value_unit(value)
+            meta["Wavelength"] = value
+            meta["WavelengthUnit"] = unit
+
+        elif key == "Axial resolution":
+            value, unit = parse_value_unit(value)
+            meta["ResolutionAxial"] = value
+            meta["ResolutionAxialUnit"] = unit
+
+        elif key == "Lateral resolution":
+            value, unit = parse_value_unit(value)
+            meta["ResolutionLateral"] = value
+            meta["ResolutionLateralUnit"] = unit
+
+        elif key == "Voxel size":
             value, unit = parse_value_unit(value, n=3)
-            meta['PixelSize'] = value
-            meta['PixelSizeUnits'] = unit
-
-        elif key == 'Depth focus range':
-            value, unit = parse_value_unit(value)
-            meta['DepthFocusRange'] = value
-            meta['DepthFocusRangeUnit'] = unit
-
-        elif key == 'Number of focuses':
-            value, unit = parse_value_unit(value)
-            meta['FocusCount'] = int(value)
-
-        elif key == 'Slice thickness':
-            value, unit = parse_value_unit(value)
-            unit = convert_unit(value, unit[:-1], 'u')
-            meta['SliceThickness'] = value
-
-        elif key == 'Number of slices':
-            value, unit = parse_value_unit(value)
-            meta['SliceCount'] = int(value)
-
-        elif key == 'Modality':
-            meta['OCTModality'] = value
+            meta["PixelSize"] = value
+            meta["PixelSizeUnits"] = unit
+
+        elif key == "Depth focus range":
+            value, unit = parse_value_unit(value)
+            meta["DepthFocusRange"] = value
+            meta["DepthFocusRangeUnit"] = unit
+
+        elif key == "Number of focuses":
+            value, unit = parse_value_unit(value)
+            meta["FocusCount"] = int(value)
+
+        elif key == "Slice thickness":
+            value, unit = parse_value_unit(value)
+            unit = convert_unit(value, unit[:-1], "u")
+            meta["SliceThickness"] = value
+
+        elif key == "Number of slices":
+            value, unit = parse_value_unit(value)
+            meta["SliceCount"] = int(value)
+
+        elif key == "Modality":
+            meta["OCTModality"] = value
 
         else:
             continue
 
     return meta

Check failure on line 36 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L13-L36

     tifffile
     zarr
     nibabel
     cyclopts
 """
+
 import cyclopts
 import zarr
 import os
 import re
 import ast
 import numpy as np
 import nibabel as nib
 from tifffile import TiffFile
 from glob import glob
 from typing import Optional, List
+
 # local modules
-from utils import \
-    make_compressor, ceildiv, orientation_to_affine, center_affine
+from utils import make_compressor, ceildiv, orientation_to_affine, center_affine
 
 
 app = cyclopts.App(help_format="markdown")
 
 

Check failure on line 52 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L37-L52

 def convert(
     inp: str,
     out: str = None,
     *,
     chunk: int = 128,
-    compressor: str = 'blosc',
+    compressor: str = "blosc",
     compressor_opt: str = "{}",
     max_load: int = 512,
     nii: bool = False,
-    orientation: str = 'coronal',
+    orientation: str = "coronal",
     center: bool = True,
     thickness: Optional[float] = None,
     voxel_size: List[float] = (1, 1, 1),
 ):
     """

Check failure on line 116 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L99-L116

 
     if max_load % 2:
         max_load += 1
 
     CHUNK_PATTERN = re.compile(
-        r'^(?P<prefix>\w*)'
-        r'_z(?P<z>[0-9]+)'
-        r'_y(?P<y>[0-9]+)'
-        r'(?P<suffix>\w*)$'
+        r"^(?P<prefix>\w*)" r"_z(?P<z>[0-9]+)" r"_y(?P<y>[0-9]+)" r"(?P<suffix>\w*)$"
     )
 
-    all_chunks_dirnames = list(sorted(glob(os.path.join(inp, '*_z*_y*'))))
+    all_chunks_dirnames = list(sorted(glob(os.path.join(inp, "*_z*_y*"))))
     all_chunks_info = dict(
         dirname=[],
         prefix=[],
         suffix=[],
         z=[],

Check failure on line 200 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L126-L200

     )
 
     # parse all directory names
     for dirname in all_chunks_dirnames:
         parsed = CHUNK_PATTERN.fullmatch(os.path.basename(dirname))
-        all_chunks_info['dirname'].append(dirname)
-        all_chunks_info['prefix'].append(parsed.group('prefix'))
-        all_chunks_info['suffix'].append(parsed.group('suffix'))
-        all_chunks_info['z'].append(int(parsed.group('z')))
-        all_chunks_info['y'].append(int(parsed.group('y')))
+        all_chunks_info["dirname"].append(dirname)
+        all_chunks_info["prefix"].append(parsed.group("prefix"))
+        all_chunks_info["suffix"].append(parsed.group("suffix"))
+        all_chunks_info["z"].append(int(parsed.group("z")))
+        all_chunks_info["y"].append(int(parsed.group("y")))
 
     # default output name
     if not out:
-        out = all_chunks_info['prefix'][0] + all_chunks_info['suffix'][0]
-        out += '.nii.zarr' if nii else '.ome.zarr'
-    nii = nii or out.endswith('.nii.zarr')
+        out = all_chunks_info["prefix"][0] + all_chunks_info["suffix"][0]
+        out += ".nii.zarr" if nii else ".ome.zarr"
+    nii = nii or out.endswith(".nii.zarr")
 
     # parse all individual file names
-    nchunkz = max(all_chunks_info['z'])
-    nchunky = max(all_chunks_info['y'])
+    nchunkz = max(all_chunks_info["z"])
+    nchunky = max(all_chunks_info["y"])
     allshapes = [[(0, 0, 0) for _ in range(nchunky)] for _ in range(nchunkz)]
     nchannels = 0
     dtype = None
     for zchunk in range(nchunkz):
         for ychunk in range(nchunky):
-            for i in range(len(all_chunks_info['dirname'])):
-                if all_chunks_info['z'][i] == zchunk + 1 \
-                        and all_chunks_info['y'][i] == ychunk + 1:
+            for i in range(len(all_chunks_info["dirname"])):
+                if (
+                    all_chunks_info["z"][i] == zchunk + 1
+                    and all_chunks_info["y"][i] == ychunk + 1
+                ):
                     break
-            dirname = all_chunks_info['dirname'][i]
-            planes_filenames \
-                = list(sorted(glob(os.path.join(dirname, '*.tiff'))))
+            dirname = all_chunks_info["dirname"][i]
+            planes_filenames = list(sorted(glob(os.path.join(dirname, "*.tiff"))))
 
             PLANE_PATTERN = re.compile(
-                os.path.basename(dirname) +
-                r'_plane(?P<z>[0-9]+)'
-                r'_c(?P<c>[0-9]+)'
-                r'.tiff$'
+                os.path.basename(dirname) + r"_plane(?P<z>[0-9]+)"
+                r"_c(?P<c>[0-9]+)"
+                r".tiff$"
             )
 
             for fname in planes_filenames:
                 parsed = PLANE_PATTERN.fullmatch(os.path.basename(fname))
-                all_chunks_info['planes'][i]['fname'].append(fname)
-                all_chunks_info['planes'][i]['z'].append(int(parsed.group('z')))
-                all_chunks_info['planes'][i]['c'].append(int(parsed.group('c')))
+                all_chunks_info["planes"][i]["fname"].append(fname)
+                all_chunks_info["planes"][i]["z"].append(int(parsed.group("z")))
+                all_chunks_info["planes"][i]["c"].append(int(parsed.group("c")))
 
                 f = TiffFile(fname)
                 dtype = f.pages[0].dtype
                 yx_shape = f.pages[0].shape
-                all_chunks_info['planes'][i]['yx_shape'].append(yx_shape)
-
-            nplanes = max(all_chunks_info['planes'][i]['z'])
-            nchannels = max(nchannels, max(all_chunks_info['planes'][i]['c']))
-
-            yx_shape = set(all_chunks_info['planes'][i]['yx_shape'])
+                all_chunks_info["planes"][i]["yx_shape"].append(yx_shape)
+
+            nplanes = max(all_chunks_info["planes"][i]["z"])
+            nchannels = max(nchannels, max(all_chunks_info["planes"][i]["c"]))
+
+            yx_shape = set(all_chunks_info["planes"][i]["yx_shape"])
             if not len(yx_shape) == 1:
-                raise ValueError('Incompatible chunk shapes')
+                raise ValueError("Incompatible chunk shapes")
             yx_shape = list(yx_shape)[0]
             allshapes[zchunk][ychunk] = (nplanes, *yx_shape)
 
     # check that all chink shapes are compatible
     for zchunk in range(nchunkz):
         if len(set(shape[1] for shape in allshapes[zchunk])) != 1:
-            raise ValueError('Incompatible Y shapes')
+            raise ValueError("Incompatible Y shapes")
     for ychunk in range(nchunky):
         if len(set(shape[ychunk][0] for shape in allshapes)) != 1:
-            raise ValueError('Incompatible Z shapes')
+            raise ValueError("Incompatible Z shapes")
     if len(set(shape[2] for subshapes in allshapes for shape in subshapes)) != 1:
-        raise ValueError('Incompatible X shapes')
+        raise ValueError("Incompatible X shapes")
 
     # compute full shape
     fullshape = [0, 0, 0]
     fullshape[0] = sum(shape[0][0] for shape in allshapes)
     fullshape[1] = sum(shape[1] for shape in allshapes[0])

Check failure on line 259 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L203-L259

     omz = zarr.storage.DirectoryStore(out)
     omz = zarr.group(store=omz, overwrite=True)
 
     # Prepare chunking options
     opt = {
-        'chunks': [nchannels] + [chunk] * 3,
-        'dimension_separator': r'/',
-        'order': 'F',
-        'dtype': np.dtype(dtype).str,
-        'fill_value': None,
-        'compressor': make_compressor(compressor, **compressor_opt),
+        "chunks": [nchannels] + [chunk] * 3,
+        "dimension_separator": r"/",
+        "order": "F",
+        "dtype": np.dtype(dtype).str,
+        "fill_value": None,
+        "compressor": make_compressor(compressor, **compressor_opt),
     }
 
     # write first level
-    omz.create_dataset('0', shape=[nchannels, *fullshape], **opt)
-    array = omz['0']
-    print('Write level 0 with shape', [nchannels, *fullshape])
-    for i, dirname in enumerate(all_chunks_info['dirname']):
-        chunkz = all_chunks_info['z'][i] - 1
-        chunky = all_chunks_info['y'][i] - 1
-        planes = all_chunks_info['planes'][i]
-        for j, fname in enumerate(planes['fname']):
-            subz = planes['z'][j] - 1
-            subc = planes['c'][j] - 1
-            yx_shape = planes['yx_shape'][j]
+    omz.create_dataset("0", shape=[nchannels, *fullshape], **opt)
+    array = omz["0"]
+    print("Write level 0 with shape", [nchannels, *fullshape])
+    for i, dirname in enumerate(all_chunks_info["dirname"]):
+        chunkz = all_chunks_info["z"][i] - 1
+        chunky = all_chunks_info["y"][i] - 1
+        planes = all_chunks_info["planes"][i]
+        for j, fname in enumerate(planes["fname"]):
+            subz = planes["z"][j] - 1
+            subc = planes["c"][j] - 1
+            yx_shape = planes["yx_shape"][j]
 
             zstart = sum(shape[0][0] for shape in allshapes[:chunkz])
-            ystart = sum(shape[1] for subshapes in allshapes for shape in subshapes[:chunky])
-            print(f'Write plane ({subc}, {zstart + subz}, {ystart}:{ystart + yx_shape[0]})', end='\r')
+            ystart = sum(
+                shape[1] for subshapes in allshapes for shape in subshapes[:chunky]
+            )
+            print(
+                f"Write plane ({subc}, {zstart + subz}, {ystart}:{ystart + yx_shape[0]})",
+                end="\r",
+            )
             slicer = (
                 subc,
                 zstart + subz,
                 slice(ystart, ystart + yx_shape[0]),
                 slice(None),
             )
 
             f = TiffFile(fname)
             array[slicer] = f.asarray()
-    print('')
+    print("")
 
     # build pyramid using median windows
     level = 0
     while any(x > 1 for x in omz[str(level)].shape[-3:]):
         prev_array = omz[str(level)]
         prev_shape = prev_array.shape[-3:]
         level += 1
 
-        new_shape = list(map(lambda x: max(1, x//2), prev_shape))
+        new_shape = list(map(lambda x: max(1, x // 2), prev_shape))
         if all(x < chunk for x in new_shape):
             break
-        print('Compute level', level, 'with shape', new_shape)
+        print("Compute level", level, "with shape", new_shape)
         omz.create_dataset(str(level), shape=[nchannels, *new_shape], **opt)
         new_array = omz[str(level)]
 
         nz, ny, nx = prev_array.shape[-3:]
         ncz = ceildiv(nz, max_load)

Check failure on line 401 in /home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/lsm_tiff_to_zarr.py#L261-L401

 
         for cz in range(ncz):
             for cy in range(ncy):
                 for cx in range(ncx):
 
-                    print(f'chunk ({cz}, {cy}, {cx}) / ({ncz}, {ncy}, {ncx})',
-                          end='\r')
+                    print(f"chunk ({cz}, {cy}, {cx}) / ({ncz}, {ncy}, {ncx})", end="\r")
 
                     dat = prev_array[
                         ...,
-                        cz*max_load:(cz+1)*max_load,
-                        cy*max_load:(cy+1)*max_load,
-                        cx*max_load:(cx+1)*max_load,
+                        cz * max_load : (cz + 1) * max_load,
+                        cy * max_load : (cy + 1) * max_load,
+                        cx * max_load : (cx + 1) * max_load,
                     ]
                     crop = [0 if x == 1 else x % 2 for x in dat.shape[-3:]]
                     slicer = [slice(-1) if x else slice(None) for x in crop]
                     dat = dat[(Ellipsis, *slicer)]
                     pz, py, px = dat.shape[-3:]
 
-                    dat = dat.reshape([
-                        nchannels,
-                        max(pz//2, 1), min(pz, 2),
-                        max(py//2, 1), min(py, 2),
-                        max(px//2, 1), min(px, 2),
-                    ])
+                    dat = dat.reshape(
+                        [
+                            nchannels,
+                            max(pz // 2, 1),
+                            min(pz, 2),
+                            max(py // 2, 1),
+                            min(py, 2),
+                            max(px // 2, 1),
+                            min(px, 2),
+                        ]
+                    )
                     dat = dat.transpose([0, 1, 3, 5, 2, 4, 6])
-                    dat = dat.reshape([
-                        nchannels,
-                        max(pz//2, 1),
-                        max(py//2, 1),
-                        max(px//2, 1),
-                        -1,
-                    ])
+                    dat = dat.reshape(
+                        [
+                            nchannels,
+                            max(pz // 2, 1),
+                            max(py // 2, 1),
+                            max(px // 2, 1),
+                            -1,
+                        ]
+                    )
                     dat = np.median(dat, -1)
 
                     new_array[
                         ...,
-                        cz*max_load//2:(cz+1)*max_load//2,
-                        cy*max_load//2:(cy+1)*max_load//2,
-                        cx*max_load//2:(cx+1)*max_load//2,
+                        cz * max_load // 2 : (cz + 1) * max_load // 2,
+                        cy * max_load // 2 : (cy + 1) * max_load // 2,
+                        cx * max_load // 2 : (cx + 1) * max_load // 2,
                     ] = dat
 
-    print('')
+    print("")
     nblevel = level
 
     # Write OME-Zarr multiscale metadata
-    print('Write metadata')
-    multiscales = [{
-        'version': '0.4',
-        'axes': [
-            {"name": "z", "type": "space", "unit": "micrometer"},
-            {"name": "y", "type": "space", "unit": "micrometer"},
-            {"name": "x", "type": "space", "unit": "micrometer"}
-        ],
-        'datasets': [],
-        'type': 'median window 2x2x2',
-        'name': '',
-    }]
-    multiscales[0]['axes'].insert(0, {"name": "c", "type": "channel"})
+    print("Write metadata")
+    multiscales = [
+        {
+            "version": "0.4",
+            "axes": [
+                {"name": "z", "type": "space", "unit": "micrometer"},
+                {"name": "y", "type": "space", "unit": "micrometer"},
+                {"name": "x", "type": "space", "unit": "micrometer"},
+            ],
+            "datasets": [],
+            "type": "median window 2x2x2",
+            "name": "",
+        }
+    ]
+    multiscales[0]["axes"].insert(0, {"name": "c", "type": "channel"})
 
     voxel_size = list(map(float, reversed(voxel_size)))
     factor = [1] * 3
     for n in range(nblevel):
         shape = omz[str(n)].shape[-3:]
-        multiscales[0]['datasets'].append({})
-        level = multiscales[0]['datasets'][-1]
+        multiscales[0]["datasets"].append({})
+        level = multiscales[0]["datasets"][-1]
         level["path"] = str(n)
 
         # We made sure that the downsampling level is exactly 2
         # However, once a dimension has size 1, we stop downsampling.
         if n > 0:
-            shape_prev = omz[str(n-1)].shape[-3:]
+            shape_prev = omz[str(n - 1)].shape[-3:]
             if shape_prev[0] != shape[0]:
                 factor[0] *= 2
             if shape_prev[1] != shape[1]:
                 factor[1] *= 2
             if shape_prev[2] != shape[2]:
                 factor[2] *= 2
 
         level["coordinateTransformations"] = [
             {
                 "type": "scale",
-                "scale": [1.0] + [
+                "scale": [1.0]
+                + [
                     factor[0] * voxel_size[0],
                     factor[1] * voxel_size[1],
                     factor[2] * voxel_size[2],
-                ]
+                ],
             },
             {
                 "type": "translation",
-                "translation": [0.0] + [
+                "translation": [0.0]
+                + [
                     (factor[0] - 1) * voxel_size[0] * 0.5,
                     (factor[1] - 1) * voxel_size[1] * 0.5,
                     (factor[2] - 1) * voxel_size[2] * 0.5,
-                ]
-            }
+                ],
+            },
         ]
     multiscales[0]["coordinateTransformations"] = [
-        {
-            "scale": [1.0] * 4,
-            "type": "scale"
-        }
+        {"scale": [1.0] * 4, "type": "scale"}
     ]
     omz.attrs["multiscales"] = multiscales
 
     if not nii:
-        print('done.')
+        print("done.")
         return
 
     # Write NIfTI-Zarr header
     # NOTE: we use nifti2 because dimensions typically do not fit in a short
     # TODO: we do not write the json zattrs, but it should be added in
     #       once the nifti-zarr package is released
-    shape = list(reversed(omz['0'].shape))
-    shape = shape[:3] + [1] + shape[3:]    # insert time dimension
+    shape = list(reversed(omz["0"].shape))
+    shape = shape[:3] + [1] + shape[3:]  # insert time dimension
     affine = orientation_to_affine(orientation, *voxel_size)
     if center:
         affine = center_affine(affine, shape[:3])
     header = nib.Nifti2Header()
     header.set_data_shape(shape)
-    header.set_data_dtype(omz['0'].dtype)
+    header.set_data_dtype(omz["0"].dtype)
     header.set_qform(affine)
     header.set_sform(affine)
-    header.set_xyzt_units(nib.nifti1.unit_codes.code['micron'])
-    header.structarr['magic'] = b'nz2\0'
-    header = np.frombuffer(header.structarr.tobytes(), dtype='u1')
+    header.set_xyzt_units(nib.nifti1.unit_codes.code["micron"])
+    header.structarr["magic"] = b"nz2\0"
+    header = np.frombuffer(header.structarr.tobytes(), dtype="u1")
     opt = {
-        'chunks': [len(header)],
-        'dimension_separator': r'/',
-        'order': 'F',
-        'dtype': '|u1',
-        'fill_value': None,
-        'compressor': None,
+        "chunks": [len(header)],
+        "dimension_separator": r"/",
+        "order": "F",
+        "dtype": "|u1",
+        "fill_value": None,
+        "compressor": None,
     }
-    omz.create_dataset('nifti', data=header, shape=shape, **opt)
-    print('done.')
+    omz.create_dataset("nifti", data=header, shape=shape, **opt)
+    print("done.")
 
 
 if __name__ == "__main__":
     app()

Check failure on line 21 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L11-L21

     glymur
     zarr
     nibabel
     cyclopts
 """
+
 import cyclopts
 import glymur
 import zarr
 import ast
 import numcodecs

Check failure on line 55 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L25-L55

 import numpy as np
 from glob import glob
 import nibabel as nib
 from typing import Optional
 
-HOME = '/space/aspasia/2/users/linc/000003'
+HOME = "/space/aspasia/2/users/linc/000003"
 
 # Path to LincBrain dataset
-LINCSET = os.path.join(HOME, 'sourcedata')
-LINCOUT = os.path.join(HOME, 'rawdata')
+LINCSET = os.path.join(HOME, "sourcedata")
+LINCOUT = os.path.join(HOME, "rawdata")
 app = cyclopts.App(help_format="markdown")
 
 
 @app.default
 def convert(
     inp: str = None,
     out: str = None,
     subjects: list = [],
     *,
     chunk: int = 4096,
-    compressor: str = 'blosc',
+    compressor: str = "blosc",
     compressor_opt: str = "{}",
     max_load: int = 16384,
     nii: bool = False,
-    orientation: str = 'coronal',
+    orientation: str = "coronal",
     center: bool = True,
     thickness: Optional[float] = None,
 ):
     """
     This command converts JPEG2000 files generated by MBF-Neurolucida

Check failure on line 298 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L96-L298

         Set RAS[0, 0, 0] at FOV center
     thickness
         Slice thickness
     """
     for LINCSUB in subjects:
-        print('working on subject', LINCSUB)
-        HISTO_FOLDER = os.path.join(LINCSET, f'sub-{LINCSUB}/micr')
-        OUT_FOLDER = os.path.join(LINCOUT,  f'sub-{LINCSUB}/micr')
+        print("working on subject", LINCSUB)
+        HISTO_FOLDER = os.path.join(LINCSET, f"sub-{LINCSUB}/micr")
+        OUT_FOLDER = os.path.join(LINCOUT, f"sub-{LINCSUB}/micr")
         os.makedirs(OUT_FOLDER, exist_ok=True)
-        inp_dir = list(sorted(glob(os.path.join(HISTO_FOLDER, f'*nDF.jp2'))))
-
-        start_num, end_num = 0, len(inp_dir)-1
-        out = os.path.join(OUT_FOLDER, f'sub-{LINCSUB}_sample-slice{start_num:04d}slice{end_num:04d}_stain-LY_DF')
-        out += '.nii.zarr' if nii else '.ome.zarr'
-        nii = nii or out.endswith('.nii.zarr')
+        inp_dir = list(sorted(glob(os.path.join(HISTO_FOLDER, f"*nDF.jp2"))))
+
+        start_num, end_num = 0, len(inp_dir) - 1
+        out = os.path.join(
+            OUT_FOLDER,
+            f"sub-{LINCSUB}_sample-slice{start_num:04d}slice{end_num:04d}_stain-LY_DF",
+        )
+        out += ".nii.zarr" if nii else ".ome.zarr"
+        nii = nii or out.endswith(".nii.zarr")
         print(out)
 
         if isinstance(compressor_opt, str):
             compressor_opt = ast.literal_eval(compressor_opt)
 
         # Prepare Zarr group
         omz = zarr.storage.DirectoryStore(out)
         omz = zarr.group(store=omz, overwrite=True)
 
-        nblevel, has_channel, dtype_jp2 = float('inf'), float('inf'), ''
+        nblevel, has_channel, dtype_jp2 = float("inf"), float("inf"), ""
         # get new_size
         new_height, new_width = 0, 0
         for inp in inp_dir:
             jp2 = glymur.Jp2k(inp)
             nblevel = min(nblevel, jp2.codestream.segment[2].num_res)
             has_channel = min(has_channel, jp2.ndim - 2)
             dtype_jp2 = np.dtype(jp2.dtype).str
             if jp2.shape[0] > new_height:
                 new_height = jp2.shape[0]
-            if  jp2.shape[1] > new_width:
+            if jp2.shape[1] > new_width:
                 new_width = jp2.shape[1]
-        new_size = (new_height, new_width, 3) if has_channel else (new_height, new_width) 
+        new_size = (
+            (new_height, new_width, 3) if has_channel else (new_height, new_width)
+        )
         print(len(inp_dir), new_size, nblevel, has_channel)
-
 
         # Prepare chunking options
         opt = {
-            'chunks': list(new_size[2:]) + [1] + [chunk, chunk],
-            'dimension_separator': r'/',
-            'order': 'F',
-            'dtype': dtype_jp2,
-            'fill_value': None,
-            'compressor': make_compressor(compressor, **compressor_opt),
+            "chunks": list(new_size[2:]) + [1] + [chunk, chunk],
+            "dimension_separator": r"/",
+            "order": "F",
+            "dtype": dtype_jp2,
+            "fill_value": None,
+            "compressor": make_compressor(compressor, **compressor_opt),
         }
         print(opt)
-
 
         # Write each level
         for level in range(nblevel):
             shape = [ceildiv(s, 2**level) for s in new_size[:2]]
-            shape = [new_size[2]]  + [len(inp_dir)] + [s for s in shape]
-
-            omz.create_dataset(f'{level}', shape=shape, **opt)
-            array = omz[f'{level}']
+            shape = [new_size[2]] + [len(inp_dir)] + [s for s in shape]
+
+            omz.create_dataset(f"{level}", shape=shape, **opt)
+            array = omz[f"{level}"]
 
             # Write each slice
-            for idx, inp in enumerate(inp_dir): 
+            for idx, inp in enumerate(inp_dir):
                 j2k = glymur.Jp2k(inp)
                 vxw, vxh = get_pixelsize(j2k)
                 subdat = WrappedJ2K(j2k, level=level)
                 subdat_size = subdat.shape
-                print('Convert level', level, 'with shape', shape, 'for slice', idx, 'with size', subdat_size)
+                print(
+                    "Convert level",
+                    level,
+                    "with shape",
+                    shape,
+                    "for slice",
+                    idx,
+                    "with size",
+                    subdat_size,
+                )
 
                 # offset while attaching
-                x, y = (int((shape[-2] - subdat_size[-2])/2), int((shape[-1] - subdat_size[-1])/2))
-            
+                x, y = (
+                    int((shape[-2] - subdat_size[-2]) / 2),
+                    int((shape[-1] - subdat_size[-1]) / 2),
+                )
+
                 if max_load is None or (shape[-2] < max_load and shape[-1] < max_load):
-                    array[..., idx, :, :] = np.zeros((3, shape[-2], shape[-1]), dtype = np.uint8)
-                    array[...,  idx, x : x + subdat_size[1], y : y + subdat_size[2]] = subdat[...]
+                    array[..., idx, :, :] = np.zeros(
+                        (3, shape[-2], shape[-1]), dtype=np.uint8
+                    )
+                    array[..., idx, x : x + subdat_size[1], y : y + subdat_size[2]] = (
+                        subdat[...]
+                    )
 
                 else:
                     ni = ceildiv(shape[-2], max_load)
                     nj = ceildiv(shape[-1], max_load)
-                    
+
                     for i in range(ni):
                         for j in range(nj):
-                            print(f'\r{i+1}/{ni}, {j+1}/{nj}', end=' ')
-                            start_x, end_x = i*max_load, min((i+1)*max_load, shape[-2])
-                            start_y, end_y = j*max_load, min((j+1)*max_load, shape[-1])
-                            array[..., idx, start_x:end_x, start_y:end_y] = np.zeros((3, end_x-start_x, end_y-start_y), dtype = np.uint8) 
+                            print(f"\r{i+1}/{ni}, {j+1}/{nj}", end=" ")
+                            start_x, end_x = i * max_load, min(
+                                (i + 1) * max_load, shape[-2]
+                            )
+                            start_y, end_y = j * max_load, min(
+                                (j + 1) * max_load, shape[-1]
+                            )
+                            array[..., idx, start_x:end_x, start_y:end_y] = np.zeros(
+                                (3, end_x - start_x, end_y - start_y), dtype=np.uint8
+                            )
                             if end_x <= x or end_y <= y:
-                                continue 
+                                continue
 
                             if start_x >= subdat_size[-2] or start_y >= subdat_size[-1]:
-                                continue 
+                                continue
 
                             array[
                                 ...,
-                                idx, 
-                                x + start_x: x + min(end_x, subdat_size[-2]),
-                                y + start_y: y + min(end_y, subdat_size[-1]),
-                                ] = subdat[
+                                idx,
+                                x + start_x : x + min(end_x, subdat_size[-2]),
+                                y + start_y : y + min(end_y, subdat_size[-1]),
+                            ] = subdat[
                                 ...,
-                                start_x: min((i+1)*max_load, subdat_size[-2]),
-                                start_y: min((j+1)*max_load, subdat_size[-1]),
+                                start_x : min((i + 1) * max_load, subdat_size[-2]),
+                                start_y : min((j + 1) * max_load, subdat_size[-1]),
                             ]
-                    print('')
-            
+                    print("")
+
         # Write OME-Zarr multiscale metadata
-        print('Write metadata')
-        multiscales = [{
-            'version': '0.4',
-            'axes': [
-                {"name": "z", "type": "space", "unit": "micrometer"},
-                {"name": "y", "type": "distance", "unit": "micrometer"},
-                {"name": "x", "type": "space", "unit": "micrometer"}
-            ],
-            'datasets': [],
-            'type': 'jpeg2000',
-            'name': '',
-        }]
+        print("Write metadata")
+        multiscales = [
+            {
+                "version": "0.4",
+                "axes": [
+                    {"name": "z", "type": "space", "unit": "micrometer"},
+                    {"name": "y", "type": "distance", "unit": "micrometer"},
+                    {"name": "x", "type": "space", "unit": "micrometer"},
+                ],
+                "datasets": [],
+                "type": "jpeg2000",
+                "name": "",
+            }
+        ]
         if has_channel:
-            multiscales[0]['axes'].insert(0, {"name": "c", "type": "channel"})
+            multiscales[0]["axes"].insert(0, {"name": "c", "type": "channel"})
 
         for n in range(nblevel):
-            shape0 = omz['0'].shape[-2:]
+            shape0 = omz["0"].shape[-2:]
             shape = omz[str(n)].shape[-2:]
-            multiscales[0]['datasets'].append({})
-            level = multiscales[0]['datasets'][-1]
+            multiscales[0]["datasets"].append({})
+            level = multiscales[0]["datasets"][-1]
             level["path"] = str(n)
 
             # I assume that wavelet transforms end up aligning voxel edges
             # across levels, so the effective scaling is the shape ratio,
             # and there is a half voxel shift wrt to the "center of first voxel"
             # frame
             level["coordinateTransformations"] = [
                 {
                     "type": "scale",
-                    "scale": [1.0] * has_channel + [
-                        1.0, 
-                        (shape0[0]/shape[0])*vxh,
-                        (shape0[1]/shape[1])*vxw,
-                    ]
+                    "scale": [1.0] * has_channel
+                    + [
+                        1.0,
+                        (shape0[0] / shape[0]) * vxh,
+                        (shape0[1] / shape[1]) * vxw,
+                    ],
                 },
                 {
                     "type": "translation",
-                    "translation": [0.0] * has_channel + [
-                        0.0, 
-                        (shape0[0]/shape[0] - 1)*vxh*0.5,
-                        (shape0[1]/shape[1] - 1)*vxw*0.5,
-                    ]
-                }
+                    "translation": [0.0] * has_channel
+                    + [
+                        0.0,
+                        (shape0[0] / shape[0] - 1) * vxh * 0.5,
+                        (shape0[1] / shape[1] - 1) * vxw * 0.5,
+                    ],
+                },
             ]
         multiscales[0]["coordinateTransformations"] = [
-            {
-                "scale": [1.0] * (3 + has_channel),
-                "type": "scale"
-            }
+            {"scale": [1.0] * (3 + has_channel), "type": "scale"}
         ]
         omz.attrs["multiscales"] = multiscales
 
-        
-        # Write sidecar .json file 
-        json_name = os.path.join(OUT_FOLDER, f'sub-{LINCSUB}_sample-slice{start_num:04d}slice{end_num:04d}_stain-LY_DF.json')
+        # Write sidecar .json file
+        json_name = os.path.join(
+            OUT_FOLDER,
+            f"sub-{LINCSUB}_sample-slice{start_num:04d}slice{end_num:04d}_stain-LY_DF.json",
+        )
         dic = {}
-        dic['PixelSize'] = json.dumps([vxw, vxh])
-        dic['PixelSizeUnits'] = 'um'
-        dic['SliceThickness'] = 1.2 
-        dic['SliceThicknessUnits'] = 'mm'
-        dic['SampleStaining'] = 'LY'
+        dic["PixelSize"] = json.dumps([vxw, vxh])
+        dic["PixelSizeUnits"] = "um"
+        dic["SliceThickness"] = 1.2
+        dic["SliceThicknessUnits"] = "mm"
+        dic["SampleStaining"] = "LY"
 
         with open(json_name, "w") as outfile:
-            json.dump(dic, outfile)  
-            outfile.write('\n')  
-
+            json.dump(dic, outfile)
+            outfile.write("\n")
 
 
 def orientation_ensure_3d(orientation):
-    orientation = {
-        'coronal': 'LI',
-        'axial': 'LP',
-        'sagittal': 'PI',
-    }.get(orientation.lower(), orientation).upper()
+    orientation = (
+        {
+            "coronal": "LI",
+            "axial": "LP",
+            "sagittal": "PI",
+        }
+        .get(orientation.lower(), orientation)
+        .upper()
+    )
     if len(orientation) == 2:
-        if 'L' not in orientation and 'R' not in orientation:
-            orientation += 'R'
-        if 'P' not in orientation and 'A' not in orientation:
-            orientation += 'A'
-        if 'I' not in orientation and 'S' not in orientation:
-            orientation += 'S'
+        if "L" not in orientation and "R" not in orientation:
+            orientation += "R"
+        if "P" not in orientation and "A" not in orientation:
+            orientation += "A"
+        if "I" not in orientation and "S" not in orientation:
+            orientation += "S"
     return orientation
 
 
 def orientation_to_affine(orientation, vxw=1, vxh=1, vxd=1):
     orientation = orientation_ensure_3d(orientation)
     affine = np.zeros([4, 4])
     vx = np.asarray([vxw, vxh, vxd])
     for i in range(3):
         letter = orientation[i]
-        sign = -1 if letter in 'LPI' else 1
-        letter = {'L': 'R', 'P': 'A', 'I': 'S'}.get(letter, letter)
-        index = list('RAS').index(letter)
+        sign = -1 if letter in "LPI" else 1
+        letter = {"L": "R", "P": "A", "I": "S"}.get(letter, letter)
+        index = list("RAS").index(letter)
         affine[index, i] = sign * vx[i]
     return affine
 
 
 def center_affine(affine, shape):

Check failure on line 363 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L352-L363

         if not isinstance(index, tuple):
             index = (index,)
         if Ellipsis not in index:
             index += (Ellipsis,)
         if any(idx is None for idx in index):
-            raise TypeError('newaxis not supported')
+            raise TypeError("newaxis not supported")
 
         # substitute ellipses
         new_index = []
         has_seen_ellipsis = False
         last_was_ellipsis = False

Check failure on line 381 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L364-L381

         for idx in index:
             if idx is Ellipsis:
                 if not has_seen_ellipsis:
                     new_index += [slice(None)] * nb_ellipsis
                 elif not last_was_ellipsis:
-                    raise ValueError('Multiple ellipses should be contiguous')
+                    raise ValueError("Multiple ellipses should be contiguous")
                 has_seen_ellipsis = True
                 last_was_ellipsis = True
             elif not isinstance(idx, slice):
-                raise TypeError('Only slices are supported')
+                raise TypeError("Only slices are supported")
             elif idx.step not in (None, 1):
-                raise ValueError('Striding not supported')
+                raise ValueError("Striding not supported")
             else:
                 last_was_ellipsis = False
                 new_index += [idx]
         index = new_index
 

Check failure on line 435 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L404-L435

 
 def make_compressor(name, **prm):
     if not isinstance(name, str):
         return name
     name = name.lower()
-    if name == 'blosc':
+    if name == "blosc":
         Compressor = numcodecs.Blosc
-    elif name == 'zlib':
+    elif name == "zlib":
         Compressor = numcodecs.Zlib
     else:
-        raise ValueError('Unknown compressor', name)
+        raise ValueError("Unknown compressor", name)
     return Compressor(**prm)
 
 
 def get_pixelsize(j2k):
     # Adobe XMP metadata
     # https://en.wikipedia.org/wiki/Extensible_Metadata_Platform
-    XMP_UUID = 'BE7ACFCB97A942E89C71999491E3AFAC'
-    TAG_Images = '{http://ns.adobe.com/xap/1.0/}Images'
-    Tag_Desc = '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description'
-    Tag_PixelWidth = '{http://ns.adobe.com/xap/1.0/}PixelWidth'
-    Tag_PixelHeight = '{http://ns.adobe.com/xap/1.0/}PixelHeight'
+    XMP_UUID = "BE7ACFCB97A942E89C71999491E3AFAC"
+    TAG_Images = "{http://ns.adobe.com/xap/1.0/}Images"
+    Tag_Desc = "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description"
+    Tag_PixelWidth = "{http://ns.adobe.com/xap/1.0/}PixelWidth"
+    Tag_PixelHeight = "{http://ns.adobe.com/xap/1.0/}PixelHeight"
 
     vxw = vxh = 1.0
     for box in j2k.box:
-        if getattr(box, 'uuid', None) == uuid.UUID(XMP_UUID):
+        if getattr(box, "uuid", None) == uuid.UUID(XMP_UUID):
             try:
                 images = list(box.data.iter(TAG_Images))[0]
                 desc = list(images.iter(Tag_Desc))[0]
                 vxw = float(desc.attrib[Tag_PixelWidth])
                 vxh = float(desc.attrib[Tag_PixelHeight])

Check failure on line 443 in /home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py

See this annotation in the file changed.

@github-actions github-actions / Black

/home/runner/work/linc-convert/linc-convert/scripts/jp2_to_zarr.py#L437-L443

     return vxw, vxh
 
 
 if __name__ == "__main__":
     app()
-