diff --git a/.github/build.yml b/.github/workflows/build.yml similarity index 100% rename from .github/build.yml rename to .github/workflows/build.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8ec335c --- /dev/null +++ b/.gitignore @@ -0,0 +1,159 @@ +# Local debuging directory +.debug/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# VSCode +.vscode \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..8b63b02 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.4 + hooks: + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..62e5b43 --- /dev/null +++ b/README.md @@ -0,0 +1,45 @@ +# PyDePSI (Tentative name) + +This is repository is WIP, where we are developing a Python package for inteferometric SAR processing. The software will be inspired by the MATLAB software DePSI, but implemented in Python and include recent developments in the field. + +## Installation for development + +It is assumed that you have `mamba` installed. If not, you can find the installation instructions [here](https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html). Other package managers like `conda` or `venv` can be used as well. + +Clone this repository and `cd` into it: + +```bash +git clone git@github.com:MotionbyLearning/PyDePSI.git +cd PyDePSI +``` + +Create a new conda environment (here we give an example name `pydepsi-dev`) with `mamba`.: + +```bash +mamba create -c conda-forge -n pydepsi-dev python=3.12 +``` + +Here we use Python 3.12 since we aim to support python 3.10 and above. + +Activate the environment: + +```bash +mamba activate pydepsi-dev +``` + +Install this package in development mode: + +```bash +pip install -e .[dev,docs] +``` + +In the end, install the pre-commit hooks: +```bash +pre-commit install +``` + +## Useful reading material + +- [Python packaging user guide](https://packaging.python.org/) +- [Testing in Python](https://docs.kedro.org/en/stable/development/automated_testing.html) +- [Code formatting and linting](https://docs.kedro.org/en/stable/development/linting.html) diff --git a/examples/matlab2python/calculateMatrixMult.m b/examples/matlab2python/calculateMatrixMult.m deleted file mode 100644 index 2cd9e58..0000000 --- a/examples/matlab2python/calculateMatrixMult.m +++ /dev/null @@ -1,3 +0,0 @@ -function matMult = calculateMatrixMult(A,B) - matMult = A*B; -end diff --git a/examples/matlab2python/example_mat4mpy.ipynb b/examples/matlab2python/example_mat4mpy.ipynb deleted file mode 100644 index 1d455c6..0000000 --- a/examples/matlab2python/example_mat4mpy.ipynb +++ /dev/null @@ -1,152 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matlab.engine # Function to load matlab and use functions\n", - "import numpy as np\n", - "import xarray as xr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a = np.random.random((2000,1000))\n", - "b = np.random.random((1000,2000))\n", - "c = np.matmul(a,b)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "eng = matlab.engine.start_matlab()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "am = matlab.double(a.tolist())\n", - "bm = matlab.double(b.tolist())\n", - "# This conversion is most likely to be the bottleneck taking around 1 min for a size of (10000,8000)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cm = eng.calculateMatrixMult(am,bm)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cmp = np.array(cm)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (max(cmp-c))\n", - "# Test results " - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For Xarrays:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a_xr = xr.array(a)\n", - "b_xr = xr.array(b)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a_xr_m = matlab.double(np.asarray(a_xr).tolist())\n", - "b_xr_m = matlab.double(np.asarray(b_xr).tolist())\n", - "# This conversion is most likely to be the bottleneck taking around 1 min for a size of (10000,8000)\n", - "# This also requires a conversion from xarray to numpy to matlab which is quite roundabout.\n", - "# You could try to save it as a mat file and then modify script to read it." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Reading and writing to file" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import scipy.io\n", - "scipy.io.savemat('test.mat', dict(a=a, b=b))\n", - "#This also works directly with xarray data arrays" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cm = eng.mytestfunc()\n", - "#We could get the result automatically or save it to a file and read it" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "c = scipy.io.loadmat('result.mat')\n", - "#This reads a dictionary file with variables and other metadata" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/matlab2python/mytestfunc.m b/examples/matlab2python/mytestfunc.m deleted file mode 100644 index a694587..0000000 --- a/examples/matlab2python/mytestfunc.m +++ /dev/null @@ -1,5 +0,0 @@ -function c = mytestfunc() - load ('test.mat'); - c = calculateMatrixMult(a,b); - save ('result.mat', 'c'); -end \ No newline at end of file diff --git a/examples/matlab2python/mytestfunc_fast.m b/examples/matlab2python/mytestfunc_fast.m deleted file mode 100644 index 6833bbd..0000000 --- a/examples/matlab2python/mytestfunc_fast.m +++ /dev/null @@ -1,5 +0,0 @@ -function c = mytestfunc() - load ('test.mat'); - c = calculateMatrixMult(a,b); - savefast ('result.mat', 'c'); -end \ No newline at end of file diff --git a/examples/matlab2python/savefast.m b/examples/matlab2python/savefast.m deleted file mode 100644 index cdbebb5..0000000 --- a/examples/matlab2python/savefast.m +++ /dev/null @@ -1,70 +0,0 @@ -function savefast(filename, varargin) -% savefast: fast saves of large arrays to .mat files -% -% Matlab's 'save' command can be very slow when saving large arrays, -% because by default Matlab attempts to use compression. This function -% provides a much faster alternative, at the cost of larger files. -% -% The syntax is identical to that of the Matlab save command. -% -% Example: -% >> ops = struct('algorithm', 'greedy'); -% >> A = int32(randi(20, 1000, 1200, 40)); -% >> B = randn(500, 1800, 60); -% >> tic; save /tmp/test ops A B; toc -% Elapsed time is 22.980294 seconds. -% >> tic; savefast /tmp/test ops A B; toc -% Elapsed time is 0.571098 seconds. - -% Copyright 2013 by Timothy E. Holy - - % Extract the variable values - vars = cell(size(varargin)); - for i = 1:numel(vars) - vars{i} = evalin('caller', varargin{i}); - end - - % Separate numeric arrays from the rest - isnum = cellfun(@(x) isa(x, 'numeric'), vars); - - % Append .mat if necessary - [filepath, filebase, ext] = fileparts(filename); - if isempty(ext) - filename = fullfile(filepath, [filebase '.mat']); - end - - create_dummy = false; - if all(isnum) - % Save a dummy variable, just to create the file - dummy = 0; %#ok - save(filename, '-v7.3', 'dummy'); - create_dummy = true; - else - s = struct; - for i = 1:numel(isnum) - if ~isnum(i) - s.(varargin{i}) = vars{i}; - end - end - save(filename, '-v7.3', '-struct', 's'); - end - - % Delete the dummy, if necessary, just in case the user supplied a - % variable called dummy - if create_dummy - fid = H5F.open(filename,'H5F_ACC_RDWR','H5P_DEFAULT'); - H5L.delete(fid,'dummy','H5P_DEFAULT'); - H5F.close(fid); - end - - % Save all numeric variables - for i = 1:numel(isnum) - if ~isnum(i) - continue - end - varname = ['/' varargin{i}]; - h5create(filename, varname, size(vars{i}), 'DataType', class(vars{i})); - h5write(filename, varname, vars{i}); - end -end - diff --git a/examples/notebooks/demo_pydepsi.ipynb b/examples/notebooks/demo_pydepsi.ipynb index 998bca9..89a3c98 100644 --- a/examples/notebooks/demo_pydepsi.ipynb +++ b/examples/notebooks/demo_pydepsi.ipynb @@ -1542,11 +1542,12 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", + "import os\n", "from pathlib import Path\n", + "\n", + "import numpy as np\n", "import sarxarray\n", "\n", - "import os\n", "cwd = Path(os.getcwd())" ] }, @@ -3109,6 +3110,7 @@ "source": [ "# Visualize\n", "from matplotlib import pyplot as plt\n", + "\n", "fig, ax = plt.subplots()\n", "ax.imshow(mrm)\n", "ax.set_aspect(2)\n", @@ -4073,6 +4075,7 @@ ], "source": [ "from matplotlib import pyplot as plt\n", + "\n", "fig, ax = plt.subplots()\n", "plt.scatter(stmat.lon.data, stmat.lat.data, s=0.005)" ] @@ -5024,7 +5027,7 @@ ], "source": [ "import xarray as xr\n", - "import stm\n", + "\n", "path_stm = Path('./stm.zarr')\n", "stm_demo = xr.open_zarr(path_stm)\n", "stm_demo" @@ -6089,8 +6092,8 @@ ], "source": [ "# Visualize results\n", - "from matplotlib import pyplot as plt\n", "import matplotlib.cm as cm\n", + "from matplotlib import pyplot as plt\n", "\n", "colormap = cm.jet\n", "\n", diff --git a/examples/scripts/demo_pydepsi.py b/examples/scripts/demo_pydepsi.py index 7b6da7e..dc4a866 100644 --- a/examples/scripts/demo_pydepsi.py +++ b/examples/scripts/demo_pydepsi.py @@ -1,83 +1,87 @@ -import numpy as np -import xarray as xr -import geopandas as gpd -import socket import logging +import os +import socket from pathlib import Path -from matplotlib import pyplot as plt + +import geopandas as gpd +import numpy as np import sarxarray -import stm -import os +import xarray as xr from dask.distributed import Client from dask_jobqueue import SLURMCluster +from matplotlib import pyplot as plt def get_free_port(): - # Get a non occupied port number + """Get a non occupied port number.""" sock = socket.socket() - sock.bind(('', 0)) # Bind a port, it will be busy now - freesock = sock.getsockname()[1] # get the port number - sock.close() # Free the port, so it can be used later + sock.bind(("", 0)) # Bind a port, it will be busy now + freesock = sock.getsockname()[1] # get the port number + sock.close() # Free the port, so it can be used later return freesock + ## Setup processing # Make a logger to log the stages of processing logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) -ch = logging.StreamHandler() # create console handler +ch = logging.StreamHandler() # create console handler ch.setLevel(logging.INFO) logger.addHandler(ch) # Paths and files cwd = Path(os.getcwd()) -path_slc = Path(cwd / 'data/nl_amsterdam_s1_asc_t088') # SLC stack processed by Doris V5 -f_slc = 'cint_srd.raw' # Data file in each date folder under path_slc -f_lat = [path_slc/ 'lat.raw'] # Geo referenced coordinates, lat -f_lon = [path_slc/ 'lon.raw'] # Geo referenced coordinates, lon -overwrite_zarr = True # Flag for zarr overwrite -path_stm = Path('./stm.zarr') # Zarr output storage for STM -path_figure = Path('./figure') # Output path for figure -path_polygon = Path(cwd / 'data/bag_light_AMS_WGS84.gpkg') #Path to the BRP polygon of NL. Need a absolute path for cluster processing +path_slc = Path(cwd / "data/nl_amsterdam_s1_asc_t088") # SLC stack processed by Doris V5 +f_slc = "cint_srd.raw" # Data file in each date folder under path_slc +f_lat = [path_slc / "lat.raw"] # Geo referenced coordinates, lat +f_lon = [path_slc / "lon.raw"] # Geo referenced coordinates, lon +overwrite_zarr = True # Flag for zarr overwrite +path_stm = Path("./stm.zarr") # Zarr output storage for STM +path_figure = Path("./figure") # Output path for figure +path_polygon = Path(cwd / "data/bag_light_AMS_WGS84.gpkg") # Path to the BRP polygon of NL. # Metadata of the SLC stack -shape = (2000, 4000) # Shape per SLC image -dtype = np.dtype(np.float64) # Data type per image -reading_chunks = (500, 500) # Reading chunk size +shape = (2000, 4000) # Shape per SLC image +dtype = np.dtype(np.float64) # Data type per image +reading_chunks = (500, 500) # Reading chunk size # Size of subset slice. this demoe will only process the slice -azimuth_subset = range(0, 2000) # Subset slice, azimuth direction +azimuth_subset = range(0, 2000) # Subset slice, azimuth direction range_subset = range(0, 4000) # Subset slice, range direction # Dask setup -n_workers = 16 # number of workers +n_workers = 16 # number of workers # Config SlurmCluster freesock = get_free_port() cluster = SLURMCluster( - name='dask-worker', # Name of the Slurm job - queue='normal', + name="dask-worker", # Name of the Slurm job + queue="normal", cores=4, - memory="30 GB", # Total amount of memory per job + memory="30 GB", # Total amount of memory per job processes=1, # Number of Python processes per job - walltime='1:00:00', # reserve each worker for 1 hour - scheduler_options={'dashboard_address': ':{}'.format(freesock)}, # Host Dashboard in a free socket + walltime="1:00:00", # reserve each worker for 1 hour + scheduler_options={"dashboard_address": f":{freesock}"}, # Host Dashboard in a free socket +) +logger.info(f"Dask dashboard hosted at port: {freesock}.") +logger.info( + f"If you are forwarding Jupyter Server to a local port 8889, \ + you can access it at: localhost:8889/proxy/{freesock}/status" ) -logger.info('Dask dashboard hosted at port: {}.'.format(freesock)) -logger.info('If you are forwarding Jupyter Server to a local port 8889, you can access it at: localhost:8889/proxy/{}/status'.format(freesock, freesock)) if __name__ == "__main__": ## Step0: Setup environment - logger.info('Initializing ...') - # cluster.scale(jobs=n_workers) # Scale a certain number workers, each worker will appear as a Slurm job - # client = Client(cluster) - + logger.info("Initializing ...") + # Scale a certain number workers, each worker will appear as a Slurm job + cluster.scale(jobs=n_workers) + client = Client(cluster) + # Make figure directory if not exists - path_figure.mkdir(exist_ok=True) - - + path_figure.mkdir(exist_ok=True) + ## Step1: Data loading - logger.info('Loading data ...') + logger.info("Loading data ...") # Build slcs lists - list_slcs = [p for p in path_slc.rglob('*_cint_srd.raw')] + list_slcs = [p for p in path_slc.rglob("*_cint_srd.raw")] list_slcs.sort() # Load complex data @@ -86,34 +90,33 @@ def get_free_port(): # Load coordinates lat = sarxarray.from_binary(f_lat, shape, vlabel="lat", dtype=np.float32, chunks=reading_chunks) lon = sarxarray.from_binary(f_lon, shape, vlabel="lon", dtype=np.float32, chunks=reading_chunks) - stack = stack.assign_coords(lat = (("azimuth", "range"), lat.squeeze().lat.data), lon = (("azimuth", "range"), lon.squeeze().lon.data)) - - + stack = stack.assign_coords( + lat=(("azimuth", "range"), lat.squeeze().lat.data), lon=(("azimuth", "range"), lon.squeeze().lon.data) + ) + ## Step2: Make a spatial subset - logger.info('Slicing SLC stack ...') + logger.info("Slicing SLC stack ...") stack_subset = stack.sel(azimuth=azimuth_subset, range=range_subset) - ## Step3: Make mean reflection map (MRM) - logger.info('Computing MRM ...') + logger.info("Computing MRM ...") mrm = stack_subset.slcstack.mrm() mrm = mrm.compute() fig, ax = plt.subplots() ax.imshow(mrm) ax.set_aspect(2) - im = mrm.plot(ax=ax, cmap='gray') + im = mrm.plot(ax=ax, cmap="gray") im.set_clim([0, 40000]) - fig.savefig(path_figure/ 'mrm.png') + fig.savefig(path_figure / "mrm.png") - ## Step4: Point selection - logger.info('Point selection ...') - stmat = stack_subset.slcstack.point_selection(threshold=4, method="amplitude_dispersion",chunks=5000) + logger.info("Point selection ...") + stmat = stack_subset.slcstack.point_selection(threshold=4, method="amplitude_dispersion", chunks=5000) fig, ax = plt.subplots() plt.scatter(stmat.lon.data, stmat.lat.data, s=0.005) - fig.savefig(path_figure / 'selected_points.png') + fig.savefig(path_figure / "selected_points.png") # Export point selection to Zarr if overwrite_zarr: @@ -122,39 +125,39 @@ def get_free_port(): if not path_stm.exists(): stmat.to_zarr(path_stm) - ## Step5: STM enrichment from Polygon file - logger.info('STM enrichment ...') + logger.info("STM enrichment ...") # Load SpaceTime Matrix from Zarr stm_demo = xr.open_zarr(path_stm) - - # Compute the bounding box + + # Compute the bounding box xmin, ymin, xmax, ymax = [ - stm_demo['lon'].data.min().compute(), - stm_demo['lat'].data.min().compute(), - stm_demo['lon'].data.max().compute(), - stm_demo['lat'].data.max().compute(), - ] + stm_demo["lon"].data.min().compute(), + stm_demo["lat"].data.min().compute(), + stm_demo["lon"].data.max().compute(), + stm_demo["lat"].data.max().compute(), + ] polygons = gpd.read_file(path_polygon, bbox=(xmin, ymin, xmax, ymax)) polygons.plot() # Data enrichment - fields_to_query = ['bouwjaar'] + fields_to_query = ["bouwjaar"] stm_demo = stm_demo.stm.enrich_from_polygon(polygons, fields_to_query) # Subset by Polygons - stm_demo_subset = stm_demo.stm.subset(method='polygon', polygon=path_polygon) - bouwjaar = stm_demo_subset['bouwjaar'].compute() + stm_demo_subset = stm_demo.stm.subset(method="polygon", polygon=path_polygon) + bouwjaar = stm_demo_subset["bouwjaar"].compute() # Visualize the classes import matplotlib.cm as cm + colormap = cm.jet fig, ax = plt.subplots() plt.title("Construction year, PS") plt.scatter(stm_demo_subset.lon.data, stm_demo_subset.lat.data, c=bouwjaar, s=0.002, cmap=colormap) plt.clim([1900, 2023]) plt.colorbar() - fig.savefig(path_figure / 'construction_year.png') - + fig.savefig(path_figure / "construction_year.png") + ## Close the client when finishing client.close() diff --git a/pydepsi/__init__.py b/pydepsi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..625010d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +name = "pydepsi" +version = "0.1.0" +requires-python = ">=3.10" +dependencies = [ + "matplotlib", + "sarxarray", + "stmtools", +] +description = "Python package for interferometric synthetic aperture radar (InSAR) data processing." +readme = "README.md" +license = {file = "LICENSE"} +authors = [ + {name = "Ou Ku", email = "o.ku@esciencecenter.nl"}, +] +keywords = ["radar", "sar", "insar", "earth observation", "distributed computing"] +classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', +] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-cov", + "pycodestyle", + "pre-commit", + "ruff", +] +docs = [ + "mkdocs", + "mkdocs-material", + "mkdocs-jupyter", + "mkdocstrings[python]", + "mkdocs-gen-files", +] +demo = [ + "jupyterlab", +] + +[tool.setuptools] +packages = ["pydepsi"] + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "--cov --cov-report term" +testpaths = [ + "tests", +] + +[tool.coverage.run] +branch = true +source = ["pydepsi"] + +[tool.ruff] +lint.select = [ + "E", # pycodestyle + "F", # pyflakes + "B", # flake8-bugbear + "D", # pydocstyle + "I", # isort + "N", # PEP8-naming + "UP", # pyupgrade (upgrade syntax to current syntax) + "PLE", # Pylint error https://github.com/charliermarsh/ruff#error-ple +] +lint.ignore = [ +"D100", "D101", "D104", "D105", "D106", "D107", "D203", "D213", "D413" +] # docstring style + +line-length = 120 +exclude = ["docs", "build", "*.ipynb"] +# Allow unused variables when underscore-prefixed. +lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" +target-version = "py310" + +[tool.ruff.lint.per-file-ignores] +"tests/**" = ["D"] diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..10e7295 --- /dev/null +++ b/setup.py @@ -0,0 +1,5 @@ +"""The setup script.""" + +from setuptools import setup + +setup()