Skip to content

Commit

Permalink
Merge pull request #19 from fpicetti/devel_fp
Browse files Browse the repository at this point in the history
Preparing release 0.1.4
  • Loading branch information
biondiettore authored Mar 28, 2022
2 parents 7dc13d7 + 1a7cdae commit df8a5d3
Show file tree
Hide file tree
Showing 26 changed files with 233 additions and 173 deletions.
24 changes: 21 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,26 @@
# 0.1.0
* First official release.
# 0.1.4
* Added support for F-contiguous arrays
* Added [PyLops](https://pylops.readthedocs.io/en/stable/) interface [operators](ea8505947c926e376a6def40b1fccfbadf3940d2)
* Added plot utilities
* Added [`AxInfo`](tutorials/ax_info.py) class for handling physical vectors
* Added Padding operators different from ZeroPad
* Improvements on VectorTorch methods and attributes
* Added FISTA solver wrapper

# 0.1.3
* Fix circular imports

# 0.1.2
* Added `__getitem__()` method to vector class
* Added PyTorch FFT operators
* Fix convolution in PyTorch
* Added a number of utilities

# 0.1.1
* Derivative operators are now agnostic to the computation engine
* Added Dask Blocky Operator
* fixed rand() in VectorNumpy
* fixed `rand()` in VectorNumpy
* added kwargs for Dask Operators

# 0.1.0
* First official release.
2 changes: 1 addition & 1 deletion LICENSE.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Redistribution and use in source and binary forms, with or without modification,
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Stanford University, nor of Politecnico of Milan, nor the name of Stanford Exploration Project, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
* If the software is used to develop scientific or technical material that is published in any peer-reviewed papers, conference abstracts or similar publications, the recipient agrees to acknowledge the authors of the source code in a manner consistent with industry practice by citing the provided references.
* The authors would appreciate being notified of any errors found in the supplied code by opening a GIT issue or by emailing: ettore88@stanford.edu or [email protected] or [email protected] or [email protected] or [email protected]
* The authors would appreciate being notified of any errors found in the supplied code by opening a GIT issue or by emailing: ebiondi@caltech.edu or [email protected] or [email protected] or [email protected] or [email protected]

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Expand Down
2 changes: 1 addition & 1 deletion occamypy/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.1.3"
__version__ = "0.1.4"
1 change: 1 addition & 0 deletions occamypy/cupy/operator/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
__all__ = [
"GaussianFilter",
"ConvND",
"Padding",
"ZeroPad",
]
37 changes: 22 additions & 15 deletions occamypy/cupy/operator/signal.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,19 +106,23 @@ def adjoint(self, add, model, data):
return


def ZeroPad(domain, pad):
if isinstance(domain, VectorCupy):
return _ZeroPadIC(domain, pad)
elif isinstance(domain, superVector):
def Padding(model, pad, mode: str = "constant"):
if isinstance(model, VectorCupy):
return _Padding(model, pad, mode)
elif isinstance(model, superVector):
# TODO add the possibility to have different padding for each sub-vector
return Dstack([_ZeroPadIC(v, pad) for v in domain.vecs])
return Dstack([_Padding(v, pad, mode) for v in model.vecs])
else:
raise ValueError("ERROR! Provided domain has to be either vector or superVector")


def _pad_vectorIC(vec, pad):
def ZeroPad(model, pad):
return Padding(model, pad, mode="constant")


def _pad_VectorCupy(vec, pad):
if not isinstance(vec, VectorCupy):
raise ValueError("ERROR! Provided vector must be of vectorCcupy type")
raise ValueError("ERROR! Provided vector has to be a VectorCupy")
if len(vec.shape) != len(pad):
raise ValueError("Dimensions of vector and padding mismatch!")

Expand All @@ -129,42 +133,45 @@ def _pad_vectorIC(vec, pad):
raise ValueError("ERROR! For now only vectorCupy is supported!")


class _ZeroPadIC(Operator):
class _Padding(Operator):

def __init__(self, domain, pad):
""" Zero Pad operator.
def __init__(self, domain, pad, mode: str = "constant"):
"""Padding operator.
To pad 2 values to each side of the first dim, and 3 values to each side of the second dim, use:
pad=((2,2), (3,3))
:param domain: vectorIC class
:param pad: scalar or sequence of scalars
Number of samples to pad in each dimension.
If a single scalar is provided, it is assigned to every dimension.
:param mode: str
Padding mode (see https://docs.cupy.dev/en/stable/reference/generated/cupy.pad.html)
"""
if isinstance(domain, VectorCupy):
self.dims = domain.shape
pad = [(pad, pad)] * len(self.dims) if pad is cp.isscalar else list(pad)
if (cp.array(pad) < 0).any():
raise ValueError('Padding must be positive or zero')
self.pad = pad
super(_ZeroPadIC, self).__init__(domain, _pad_vectorIC(domain, self.pad))
self.mode = mode
super(_Padding, self).__init__(domain, _pad_VectorCupy(domain, self.pad))

def __str__(self):
return "ZeroPad "
return "Padding "

def forward(self, add, model, data):
"""Zero padding"""
"""Padding"""
self.checkDomainRange(model, data)
if add:
temp = data.clone()
y = cp.padding(model.getNdArray(), self.pad, mode='constant')
y = cp.padding(model.getNdArray(), self.pad, mode=self.mode)
data.getNdArray()[:] = y
if add:
data.scaleAdd(temp, 1., 1.)
return

def adjoint(self, add, model, data):
"""Extract non-zero subsequence"""
"""Extract original subsequence"""
self.checkDomainRange(model, data)
if add:
temp = model.clone()
Expand Down
2 changes: 2 additions & 0 deletions occamypy/dask/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def _add_from_NdArray(vecObj, NdArray):
vecObj.getNdArray()[:] += NdArray
return


def _check_dask_error(futures):
"""Function to check error on futures related to Dask operators"""
for idx, fut in enumerate(futures):
Expand All @@ -73,6 +74,7 @@ def _check_dask_error(futures):
print(fut.result())
return


class DaskOperator(Operator):
"""
Class to apply multiple operators in parallel through Dask and DaskVectors
Expand Down
13 changes: 9 additions & 4 deletions occamypy/dask/vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,20 @@ def _copy_from_NdArray(vecObj, NdArray):
vecObj.getNdArray()[:] = NdArray
return


# Functions to scatter/gather large arrays
def _copy_chunk_data(arr, chunk, shift):
nele = chunk.size
arr.ravel()[shift:shift+nele] = chunk
arr.ravel()[shift:shift + nele] = chunk
return


def _gather_chunk_data(arr, nele, shift):
# chunk = np.copy(arr.ravel()[shift:shift+nele])
chunk = arr.ravel()[shift:shift+nele]
chunk = arr.ravel()[shift:shift + nele]
return chunk


def scatter_large_data(arr, wrkId, client, buffer=27000000):
"""Function to scatter large array to worker by chunks"""
shape = arr.shape
Expand All @@ -44,7 +47,7 @@ def scatter_large_data(arr, wrkId, client, buffer=27000000):
daskD.wait(arrD)
shift = 0
while shift < nele:
chunk = client.scatter(arr.ravel()[shift:shift+buffer], workers=[wrkId])
chunk = client.scatter(arr.ravel()[shift:shift + buffer], workers=[wrkId])
daskD.wait(client.submit(_copy_chunk_data, arrD, chunk, shift, workers=[wrkId], pure=False))
shift += buffer
return arrD
Expand All @@ -56,11 +59,13 @@ def _call_getNdArray(vecObj):
res = vecObj.getNdArray()
return res


def _call_getDtype(vecObj):
"""Function to call getNdArray method"""
res = vecObj.getNdArray().dtype
return res


def _call_shape(vecObj):
"""Function to return shape attribute"""
res = vecObj.shape
Expand Down Expand Up @@ -281,7 +286,7 @@ def __init__(self, dask_client, **kwargs):
vec_tmplt = kwargs.get("vector_template")
self.chunks = kwargs.get("chunks")
# Spreading chunks across available workers
self. chunks = [np.sum(ix) for ix in np.array_split(self.chunks, N_wrk)]
self.chunks = [np.sum(ix) for ix in np.array_split(self.chunks, N_wrk)]
# Checking if an SepVector was passed (by getting Hypercube)
hyper = False
if SepVector:
Expand Down
1 change: 1 addition & 0 deletions occamypy/numpy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"VectorNumpy",
"ConvND",
"GaussianFilter",
"Padding",
"ZeroPad",
"FFT",
]
3 changes: 2 additions & 1 deletion occamypy/numpy/operator/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from .signal_processing import *
from .signal import *
from .transform import *

__all__ = [
"ConvND",
"GaussianFilter",
"Padding",
"ZeroPad",
"FFT",
]
Original file line number Diff line number Diff line change
Expand Up @@ -100,29 +100,33 @@ def adjoint(self, add, model, data):
return


def ZeroPad(model, pad):
def Padding(model, pad, mode: str = "constant"):
if isinstance(model, VectorNumpy):
return _ZeroPadIC(model, pad)
return _Padding(model, pad, mode)
elif isinstance(model, superVector):
# TODO add the possibility to have different padding for each sub-vector
return Dstack([_ZeroPadIC(v, pad) for v in model.vecs])
return Dstack([_Padding(v, pad, mode) for v in model.vecs])
else:
raise ValueError("ERROR! Provided domain has to be either vector or superVector")


def _pad_vectorIC(vec, pad):
def ZeroPad(model, pad):
return Padding(model, pad, mode="constant")


def _pad_VectorNumpy(vec, pad):
if not isinstance(vec, VectorNumpy):
raise ValueError("ERROR! Provided vector must be of vectorIC type")
raise ValueError("ERROR! Provided vector must be a VectorNumpy")
if len(vec.shape) != len(pad):
raise ValueError("Dimensions of vector and padding mismatch!")

vec_new_shape = tuple(np.asarray(vec.shape) + [sum(pad[_]) for _ in range(len(pad))])
return VectorNumpy(np.empty(vec_new_shape, dtype=vec.getNdArray().dtype))


class _ZeroPadIC(Operator):
class _Padding(Operator):

def __init__(self, model, pad):
def __init__(self, model: VectorNumpy, pad, mode: str = "constant"):
""" Zero Pad operator.
To pad 2 values to each side of the first dim, and 3 values to each side of the second dim, use:
Expand All @@ -131,31 +135,33 @@ def __init__(self, model, pad):
:param pad: scalar or sequence of scalars
Number of samples to pad in each dimension.
If a single scalar is provided, it is assigned to every dimension.
:param mode: str
Padding mode (see https://numpy.org/doc/stable/reference/generated/numpy.pad.html)
"""
if isinstance(model, VectorNumpy):
self.dims = model.shape
pad = [(pad, pad)] * len(self.dims) if pad is np.isscalar else list(pad)
if (np.array(pad) < 0).any():
raise ValueError('Padding must be positive or zero')
self.pad = pad
super(_ZeroPadIC, self).__init__(model, _pad_vectorIC(model, self.pad))
self.dims = model.shape
pad = [(pad, pad)] * len(self.dims) if pad is np.isscalar else list(pad)
if (np.array(pad) < 0).any():
raise ValueError('Padding must be positive or zero')
self.pad = pad
self.mode = mode
super(_Padding, self).__init__(model, _pad_VectorNumpy(model, self.pad))

def __str__(self):
return "ZeroPad "
return "Padding "

def forward(self, add, model, data):
"""Zero padding"""
"""Padding"""
self.checkDomainRange(model, data)
if add:
temp = data.clone()
y = np.pad(model.arr, self.pad, mode='constant')
y = np.pad(model.arr, self.pad, mode=self.mode)
data.arr = y
if add:
data.scaleAdd(temp, 1., 1.)
return

def adjoint(self, add, model, data):
"""Extract non-zero subsequence"""
"""Extract original subsequence"""
self.checkDomainRange(model, data)
if add:
temp = model.clone()
Expand Down
2 changes: 1 addition & 1 deletion occamypy/operator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def dotTest(self, verbose=False, tol=1e-4):
def _process_complex(x):
if isinstance(x, complex):
x = np.conj(x)
elif isinstance(x, torch.Tensor) and x.dtype in [torch.complex32, torch.complex64, torch.complex128]:
elif isinstance(x, torch.Tensor) and x.dtype in [torch.complex64, torch.complex128]:
x = x.real
return x

Expand Down
2 changes: 1 addition & 1 deletion occamypy/operator/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def __init__(self, matrix: Vector, domain: Vector, range: Vector, outcore=False)
if matrix.shape[0] != range.size:
raise ValueError

super().__init__(domain, range)
super(Matrix, self).__init__(domain=domain, range=range)
self.backend = get_backend(matrix)
self.matrix_type = get_vector_type(matrix)

Expand Down
2 changes: 1 addition & 1 deletion occamypy/operator/nonlinear.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def __add__(self, other): # self + other

class NonlinearComb(NonlinearOperator):
"""
Combination of non-linear opeartors: f(g(m))
Combination of non-linear operators: f(g(m))
"""

def __init__(self, f, g):
Expand Down
1 change: 1 addition & 0 deletions occamypy/solver/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
"TNewton",
"MCMC",
"ISTA",
"FISTA",
"ISTC",
"SplitBregman",
"Stepper",
Expand Down
Loading

0 comments on commit df8a5d3

Please sign in to comment.