diff --git a/make/makefile b/make/makefile
index 6b71b70c792..e334b1c2c01 100644
--- a/make/makefile
+++ b/make/makefile
@@ -5,37 +5,36 @@ include ./makedefaults
# Define the source file directories
SOURCEDIR1=../src
-SOURCEDIR2=../src/Distributed
-SOURCEDIR3=../src/Exchange
-SOURCEDIR4=../src/Model
-SOURCEDIR5=../src/Model/Connection
-SOURCEDIR6=../src/Model/Geometry
-SOURCEDIR7=../src/Model/GroundWaterEnergy
-SOURCEDIR8=../src/Model/GroundWaterFlow
-SOURCEDIR9=../src/Model/GroundWaterTransport
-SOURCEDIR10=../src/Model/ModelUtilities
-SOURCEDIR11=../src/Model/TransportModel
-SOURCEDIR12=../src/Solution
-SOURCEDIR13=../src/Solution/LinearMethods
-SOURCEDIR14=../src/Solution/PETSc
-SOURCEDIR15=../src/Timing
-SOURCEDIR16=../src/Utilities
+SOURCEDIR2=../src/Model
+SOURCEDIR3=../src/Model/TransportModel
+SOURCEDIR4=../src/Model/GroundWaterFlow
+SOURCEDIR5=../src/Model/Geometry
+SOURCEDIR6=../src/Model/ModelUtilities
+SOURCEDIR7=../src/Model/GroundWaterTransport
+SOURCEDIR8=../src/Model/Connection
+SOURCEDIR9=../src/Distributed
+SOURCEDIR10=../src/Utilities
+SOURCEDIR11=../src/Utilities/Idm
+SOURCEDIR12=../src/Utilities/Idm/mf6blockfile
+SOURCEDIR13=../src/Utilities/Idm/selector
+SOURCEDIR14=../src/Utilities/Vector
+SOURCEDIR15=../src/Utilities/Matrix
+SOURCEDIR16=../src/Utilities/Observation
SOURCEDIR17=../src/Utilities/ArrayRead
-SOURCEDIR18=../src/Utilities/Idm
-SOURCEDIR19=../src/Utilities/Idm/mf6blockfile
-SOURCEDIR20=../src/Utilities/Idm/selector
-SOURCEDIR21=../src/Utilities/Libraries
-SOURCEDIR22=../src/Utilities/Libraries/blas
-SOURCEDIR23=../src/Utilities/Libraries/daglib
-SOURCEDIR24=../src/Utilities/Libraries/rcm
-SOURCEDIR25=../src/Utilities/Libraries/sparsekit
-SOURCEDIR26=../src/Utilities/Libraries/sparskit2
-SOURCEDIR27=../src/Utilities/Matrix
-SOURCEDIR28=../src/Utilities/Memory
-SOURCEDIR29=../src/Utilities/Observation
-SOURCEDIR30=../src/Utilities/OutputControl
-SOURCEDIR31=../src/Utilities/TimeSeries
-SOURCEDIR32=../src/Utilities/Vector
+SOURCEDIR18=../src/Utilities/OutputControl
+SOURCEDIR19=../src/Utilities/Libraries
+SOURCEDIR20=../src/Utilities/Libraries/blas
+SOURCEDIR21=../src/Utilities/Libraries/rcm
+SOURCEDIR22=../src/Utilities/Libraries/sparsekit
+SOURCEDIR23=../src/Utilities/Libraries/sparskit2
+SOURCEDIR24=../src/Utilities/Libraries/daglib
+SOURCEDIR25=../src/Utilities/Memory
+SOURCEDIR26=../src/Utilities/TimeSeries
+SOURCEDIR27=../src/Timing
+SOURCEDIR28=../src/Solution
+SOURCEDIR29=../src/Solution/PETSc
+SOURCEDIR30=../src/Solution/LinearMethods
+SOURCEDIR31=../src/Exchange
VPATH = \
${SOURCEDIR1} \
diff --git a/msvs/mf6core.vfproj b/msvs/mf6core.vfproj
index a82c0ae076c..32f92daee02 100644
--- a/msvs/mf6core.vfproj
+++ b/msvs/mf6core.vfproj
@@ -54,6 +54,11 @@
+
+
+
+
+
@@ -64,6 +69,11 @@
+
+
+
+
+
diff --git a/pymake/excludefiles.txt b/pymake/excludefiles.txt
index 33694f4ea5d..c2677d02b3d 100644
--- a/pymake/excludefiles.txt
+++ b/pymake/excludefiles.txt
@@ -5,6 +5,8 @@
../src/Utilities/Matrix/PetscMatrix.F90
../src/Utilities/Vector/PetscVector.F90
../src/Distributed/MpiMessageBuilder.f90
+../src/Distributed/MpiMessageCache.f90
../src/Distributed/MpiRouter.f90
../src/Distributed/MpiRunControl.F90
+../src/Distributed/MpiUnitCache.f90
../src/Distributed/MpiWorld.f90
diff --git a/src/Distributed/MpiMessageBuilder.f90 b/src/Distributed/MpiMessageBuilder.f90
index b92165923f8..8c9be363329 100644
--- a/src/Distributed/MpiMessageBuilder.f90
+++ b/src/Distributed/MpiMessageBuilder.f90
@@ -42,7 +42,6 @@ module MpiMessageBuilderModule
procedure, private :: create_vdc_snd_map
procedure, private :: create_vdc_snd_body
procedure, private :: create_vdc_rcv_body
- procedure, private :: create_element_map
end type
contains
@@ -114,7 +113,7 @@ subroutine create_header_snd(this, rank, stage, hdrs_snd_type)
class(MpiMessageBuilderType) :: this
integer(I4B) :: rank
integer(I4B) :: stage
- integer :: hdrs_snd_type
+ integer, intent(out) :: hdrs_snd_type
! local
integer(I4B) :: i, offset, nr_types
class(VirtualDataContainerType), pointer :: vdc
@@ -186,7 +185,7 @@ end subroutine create_header_snd
subroutine create_header_rcv(this, hdr_rcv_type)
class(MpiMessageBuilderType) :: this
- integer :: hdr_rcv_type
+ integer, intent(out) :: hdr_rcv_type
! local
integer :: ierr
@@ -203,7 +202,7 @@ subroutine create_map_snd(this, rank, stage, map_snd_type)
class(MpiMessageBuilderType) :: this
integer(I4B) :: rank
integer(I4B) :: stage
- integer :: map_snd_type
+ integer, intent(out) :: map_snd_type
! local
integer(I4B) :: i, offset, nr_types
class(VirtualDataContainerType), pointer :: vdc
@@ -280,7 +279,7 @@ subroutine create_map_rcv(this, rcv_map, nr_headers, map_rcv_type)
class(MpiMessageBuilderType) :: this
type(VdcReceiverMapsType), dimension(:) :: rcv_map
integer(I4B) :: nr_headers
- integer :: map_rcv_type
+ integer, intent(out) :: map_rcv_type
! local
integer(I4B) :: i, j, nr_elems, type_cnt
integer :: ierr, max_nr_maps
@@ -323,7 +322,7 @@ subroutine create_body_rcv(this, rank, stage, body_rcv_type)
class(MpiMessageBuilderType) :: this
integer(I4B) :: rank
integer(I4B) :: stage
- integer :: body_rcv_type
+ integer, intent(out) :: body_rcv_type
! local
integer(I4B) :: i, nr_types, offset
class(VirtualDataContainerType), pointer :: vdc
@@ -400,7 +399,7 @@ subroutine create_body_snd(this, rank, stage, headers, maps, body_snd_type)
integer(I4B) :: stage
type(VdcHeaderType), dimension(:) :: headers
type(VdcReceiverMapsType), dimension(:) :: maps
- integer :: body_snd_type
+ integer, intent(out) :: body_snd_type
! local
integer(I4B) :: i, nr_headers
class(VirtualDataContainerType), pointer :: vdc
@@ -627,33 +626,6 @@ function create_vdc_snd_body(this, vdc, vdc_maps, rank, stage) result(new_type)
end function create_vdc_snd_body
- !> @brief Temp. function to generate a dummy (complete) map
- !<
- function create_element_map(this, rank, vdc, vd) result(el_map)
- use MemoryManagerModule, only: get_mem_shape, get_mem_rank
- use ConstantsModule, only: MAXMEMRANK
- class(MpiMessageBuilderType) :: this
- integer(I4B) :: rank
- class(VirtualDataContainerType), pointer :: vdc
- class(VirtualDataType), pointer :: vd
- integer(I4B), dimension(:), pointer, contiguous :: el_map
- ! local
- integer(I4B), dimension(MAXMEMRANK) :: mem_shp
- integer(I4B) :: i, nrow, mem_rank
-
- el_map => null()
- call get_mem_rank(vd%virtual_mt%name, vd%virtual_mt%path, mem_rank)
- call get_mem_shape(vd%virtual_mt%name, vd%virtual_mt%path, mem_shp)
- if (mem_rank > 0) then
- nrow = mem_shp(mem_rank)
- allocate (el_map(nrow))
- do i = 1, nrow
- el_map(i) = i - 1
- end do
- end if
-
- end function create_element_map
-
function get_vdc_from_hdr(this, header) result(vdc)
class(MpiMessageBuilderType) :: this
type(VdcHeaderType) :: header
diff --git a/src/Distributed/MpiMessageCache.f90 b/src/Distributed/MpiMessageCache.f90
new file mode 100644
index 00000000000..222a4f2ac27
--- /dev/null
+++ b/src/Distributed/MpiMessageCache.f90
@@ -0,0 +1,130 @@
+module MpiMessageCacheModule
+ use KindModule, only: I4B
+ use SimStagesModule, only: NR_SIM_STAGES
+ use ListModule
+ use STLVecIntModule
+ use MpiUnitCacheModule
+ implicit none
+ private
+
+ ! the message types for caching during a simulation stage:
+ integer(I4B), public, parameter :: MPI_BDY_RCV = 1 !< receiving data (body) from ranks
+ integer(I4B), public, parameter :: MPI_BDY_SND = 2 !< sending data (body) to ranks
+ integer(I4B), public, parameter :: NR_MSG_TYPES = 2 !< the total number of message types to be cached
+
+ ! expose this from the unit cache module
+ public :: NO_CACHED_VALUE
+
+ !> @brief Facility to cache the constructed MPI datatypes.
+ !! This will avoid having to construct them over and over
+ !! again for the communication inside the timestep loop.
+ !! This class deals with separate caches for different
+ !! units (solutions or global) and for different types of
+ !< messages within the communication stage.
+ type, public :: MpiMessageCacheType
+ type(STLVecInt) :: cached_ids !< a vector with ids for the cached units (solution ids)
+ type(ListType) :: unit_caches !< a list with caches per unit
+ contains
+ procedure :: init => mmc_init
+ procedure :: get => mmc_get
+ procedure :: put => mmc_put
+ procedure :: destroy => mmc_destroy
+ end type MpiMessageCacheType
+
+contains
+
+ !< @brief Initialize the MPI type cache system.
+ !<
+ subroutine mmc_init(this)
+ class(MpiMessageCacheType) :: this !< the message cache
+
+ call this%cached_ids%init()
+
+ end subroutine mmc_init
+
+ !< @brief Get the cached mpi datatype for the given
+ !! unit, rank, stage, and message element. Returns
+ !< NO_CACHED_VALUE when not in cache.
+ function mmc_get(this, unit, rank, stage, msg_id) result(mpi_type)
+ class(MpiMessageCacheType) :: this !< the message cache
+ integer(I4B) :: unit !< the unit (solution or global)
+ integer(I4B) :: rank !< the rank of the MPI process to communicate with
+ integer(I4B) :: stage !< the simulation stage at which the message is sent
+ integer(I4B) :: msg_id !< the message type as an integer between 1 and NR_MSG_TYPES (see above for predefined values)
+ integer :: mpi_type !< the resulting mpi datatype
+ ! local
+ integer(I4B) :: unit_idx
+ class(*), pointer :: obj_ptr
+
+ mpi_type = NO_CACHED_VALUE
+
+ unit_idx = this%cached_ids%get_index(unit)
+ if (unit_idx == -1) return ! not cached
+
+ obj_ptr => this%unit_caches%GetItem(unit_idx)
+ select type (obj_ptr)
+ class is (MpiUnitCacheType)
+ mpi_type = obj_ptr%get_cached(rank, stage, msg_id)
+ end select
+
+ end function mmc_get
+
+ !> @brief Put the mpi datatype for this particular unit,
+ !! rank, and stage in cache. The datatype should be
+ !< committed to the type database externally.
+ subroutine mmc_put(this, unit, rank, stage, msg_id, mpi_type)
+ class(MpiMessageCacheType) :: this !< the message cache
+ integer(I4B) :: unit !< the unit (solution or global)
+ integer(I4B) :: rank !< the rank of the MPI process to communicate with
+ integer(I4B) :: stage !< the simulation stage at which the message is sent
+ integer(I4B) :: msg_id !< the message type as an integer between 1 and NR_MSG_TYPES (see above for predefined values)
+ integer :: mpi_type !< the mpi datatype to cache
+ ! local
+ integer(I4B) :: unit_idx
+ type(MpiUnitCacheType), pointer :: new_cache
+ class(*), pointer :: obj_ptr
+
+ unit_idx = this%cached_ids%get_index(unit)
+ if (unit_idx == -1) then
+ ! add to vector with cached unit ids
+ call this%cached_ids%push_back(unit)
+ ! create and add unit cache
+ allocate (new_cache)
+ call new_cache%init(NR_SIM_STAGES, NR_MSG_TYPES)
+ obj_ptr => new_cache
+ call this%unit_caches%Add(obj_ptr)
+ unit_idx = this%cached_ids%size
+ end if
+
+ ! get the cache for this unit
+ obj_ptr => this%unit_caches%GetItem(unit_idx)
+ select type (obj_ptr)
+ class is (MpiUnitCacheType)
+ call obj_ptr%cache(rank, stage, msg_id, mpi_type)
+ end select
+
+ end subroutine mmc_put
+
+ !< @brief Destroy the MPI type cache system.
+ !<
+ subroutine mmc_destroy(this)
+ class(MpiMessageCacheType) :: this !< the message cache
+ ! local
+ integer(I4B) :: i
+ class(*), pointer :: obj_ptr
+
+ ! clear caches
+ do i = 1, this%cached_ids%size
+ obj_ptr => this%unit_caches%GetItem(i)
+ select type (obj_ptr)
+ class is (MpiUnitCacheType)
+ call obj_ptr%destroy()
+ end select
+ end do
+ call this%unit_caches%Clear(destroy=.true.)
+
+ call this%cached_ids%destroy()
+
+ end subroutine mmc_destroy
+
+end module
diff --git a/src/Distributed/MpiRouter.f90 b/src/Distributed/MpiRouter.f90
index e3cc403b832..b4fcd6c1eff 100644
--- a/src/Distributed/MpiRouter.f90
+++ b/src/Distributed/MpiRouter.f90
@@ -11,6 +11,7 @@ module MpiRouterModule
use VirtualExchangeModule, only: VirtualExchangeType
use VirtualSolutionModule
use MpiMessageBuilderModule
+ use MpiMessageCacheModule
use MpiWorldModule
use mpi
implicit none
@@ -27,6 +28,7 @@ module MpiRouterModule
type(VdcPtrType), dimension(:), pointer :: rte_models => null() !< the currently active models to be routed
type(VdcPtrType), dimension(:), pointer :: rte_exchanges => null() !< the currently active exchanges to be routed
type(MpiMessageBuilderType) :: message_builder
+ type(MpiMessageCacheType) :: msg_cache
type(MpiWorldType), pointer :: mpi_world => null()
integer(I4B) :: imon !< the output file unit for the mpi monitor
logical(LGP) :: enable_monitor !< when true, log diagnostics
@@ -38,11 +40,14 @@ module MpiRouterModule
! private
procedure, private :: activate
procedure, private :: deactivate
- procedure, private :: mr_update_senders
- procedure, private :: mr_update_senders_sln
- procedure, private :: mr_update_receivers
- procedure, private :: mr_update_receivers_sln
- procedure, private :: mr_route_active
+ procedure, private :: update_senders
+ procedure, private :: update_senders_sln
+ procedure, private :: update_receivers
+ procedure, private :: update_receivers_sln
+ procedure, private :: route_active
+ procedure, private :: is_cached
+ procedure, private :: compose_messages
+ procedure, private :: load_messages
end type MpiRouterType
contains
@@ -73,8 +78,9 @@ subroutine mr_initialize(this)
! to log or not to log
this%enable_monitor = .false.
- ! initialize the MPI message builder
+ ! initialize the MPI message builder and cache
call this%message_builder%init()
+ call this%msg_cache%init()
! get mpi world for our process
this%mpi_world => get_mpi_world()
@@ -102,6 +108,7 @@ subroutine mr_initialize(this)
call MPI_Allreduce(MPI_IN_PLACE, this%model_proc_ids, nr_models, &
MPI_INTEGER, MPI_SUM, this%mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
! set the process id to the models and exchanges
do i = 1, nr_models
@@ -181,7 +188,7 @@ subroutine mr_route_all(this, stage)
! route all
call this%activate(this%all_models, this%all_exchanges)
- call this%mr_route_active(stage)
+ call this%route_active(0, stage)
call this%deactivate()
if (this%enable_monitor) then
@@ -206,7 +213,7 @@ subroutine mr_route_sln(this, virtual_sol, stage)
! route for this solution
call this%activate(virtual_sol%models, virtual_sol%exchanges)
- call this%mr_route_active(stage)
+ call this%route_active(virtual_sol%solution_id, stage)
call this%deactivate()
if (this%enable_monitor) then
@@ -215,39 +222,50 @@ subroutine mr_route_sln(this, virtual_sol, stage)
end subroutine mr_route_sln
- !> @brief Routes the models and exchanges. This is the
- !< workhorse routine
- subroutine mr_route_active(this, stage)
- class(MpiRouterType) :: this
- integer(I4B) :: stage
+ !> @brief Routes the models and exchanges over MPI,
+ !! either constructing the message bodies in a sequence
+ !! of communication steps, or by loading from cache
+ !< for efficiency.
+ subroutine route_active(this, unit, stage)
+ use SimModule, only: store_error
+ class(MpiRouterType) :: this !< this mpi router
+ integer(I4B) :: unit !< the solution id, or equal to 0 when global
+ integer(I4B) :: stage !< the stage to route
! local
- integer(I4B) :: i, j, k
+ integer(I4B) :: i
integer(I4B) :: rnk
integer :: ierr, msg_size
+ logical(LGP) :: from_cache
! mpi handles
integer, dimension(:), allocatable :: rcv_req
integer, dimension(:), allocatable :: snd_req
integer, dimension(:, :), allocatable :: rcv_stat
- integer, dimension(:, :), allocatable :: snd_stat
- ! message header
- integer(I4B) :: max_headers
- type(VdcHeaderType), dimension(:, :), allocatable :: headers
- integer, dimension(:), allocatable :: hdr_rcv_t
- integer, dimension(:), allocatable :: hdr_snd_t
- integer, dimension(:), allocatable :: hdr_rcv_cnt
-
- ! maps
- type(VdcReceiverMapsType), dimension(:, :), allocatable :: rcv_maps
- integer, dimension(:), allocatable :: map_rcv_t
- integer, dimension(:), allocatable :: map_snd_t
! message body
integer, dimension(:), allocatable :: body_rcv_t
integer, dimension(:), allocatable :: body_snd_t
! update adress list
- call this%mr_update_senders()
- call this%mr_update_receivers()
+ call this%update_senders()
+ call this%update_receivers()
+
+ if (this%senders%size /= this%receivers%size) then
+ call store_error("Internal error: receivers should equal senders", &
+ terminate=.true.)
+ end if
+
+ ! allocate body data
+ allocate (body_rcv_t(this%senders%size))
+ allocate (body_snd_t(this%receivers%size))
+
+ ! allocate handles
+ allocate (rcv_req(this%receivers%size))
+ allocate (snd_req(this%senders%size))
+ allocate (rcv_stat(MPI_STATUS_SIZE, this%receivers%size))
+
+ ! always initialize request handles
+ rcv_req = MPI_REQUEST_NULL
+ snd_req = MPI_REQUEST_NULL
if (this%enable_monitor) then
write (this%imon, '(2x,a,*(i3))') "process ids sending data: ", &
@@ -256,11 +274,132 @@ subroutine mr_route_active(this, stage)
this%receivers%get_values()
end if
+ ! from cache or build
+ from_cache = this%is_cached(unit, stage)
+ if (.not. from_cache) then
+ call this%compose_messages(unit, stage, body_snd_t, body_rcv_t)
+ else
+ call this%load_messages(unit, stage, body_snd_t, body_rcv_t)
+ end if
+
+ if (this%enable_monitor) then
+ write (this%imon, '(2x,a)') "== communicating bodies =="
+ end if
+
+ ! recv bodies
+ do i = 1, this%senders%size
+ rnk = this%senders%at(i)
+ if (this%enable_monitor) then
+ write (this%imon, '(4x,a,i0)') "receiving from process: ", rnk
+ end if
+
+ call MPI_Type_size(body_rcv_t(i), msg_size, ierr)
+ if (msg_size > 0) then
+ call MPI_Irecv(MPI_BOTTOM, 1, body_rcv_t(i), rnk, stage, &
+ this%mpi_world%comm, rcv_req(i), ierr)
+ call CHECK_MPI(ierr)
+ end if
+
+ if (this%enable_monitor) then
+ write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
+ end if
+ end do
+
+ ! send bodies
+ do i = 1, this%receivers%size
+ rnk = this%receivers%at(i)
+ if (this%enable_monitor) then
+ write (this%imon, '(4x,a,i0)') "sending to process: ", rnk
+ end if
+
+ call MPI_Type_size(body_snd_t(i), msg_size, ierr)
+ if (msg_size > 0) then
+ call MPI_Isend(MPI_Bottom, 1, body_snd_t(i), rnk, stage, &
+ this%mpi_world%comm, snd_req(i), ierr)
+ call CHECK_MPI(ierr)
+ end if
+
+ if (this%enable_monitor) then
+ write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
+ end if
+ call flush (this%imon)
+ end do
+
+ ! wait for exchange of all messages
+ call MPI_WaitAll(this%senders%size, rcv_req, rcv_stat, ierr)
+ call CHECK_MPI(ierr)
+
+ deallocate (rcv_req, snd_req, rcv_stat)
+ deallocate (body_rcv_t, body_snd_t)
+
+ end subroutine route_active
+
+ !> @brief Constructs the message bodies' MPI datatypes.
+ !!
+ !! Constructs the message bodies' MPI datatypes for a
+ !! unit (a solution) and a given stage. This is done in
+ !! a sequence of 6 steps (distributed over 3 phases):
+ !!
+ !! == synchronizing headers (VdcHeaderType) ==
+ !!
+ !! step 1:
+ !! Receive headers from remote addresses requesting data
+ !! from virtual data containers (models, exchanges, ...) local to this process
+ !! step 2:
+ !! Send headers to remote addresses to indicate for which
+ !! virtual data containers (models, exchanges, ...) data is requested
+ !!
+ !! == synchronizing maps (VdcReceiverMapsType) ==
+ !!
+ !! step 3:
+ !! Based on the received headers, receive element maps (which elements are
+ !! to be sent from a contiguous array) for outgoing data
+ !! step 4:
+ !! Send element maps to remote addresses to specify incoming data
+ !!
+ !! == construct message body data types (VirtualDataContainerType) ==
+ !!
+ !! step 5:
+ !! Construct the message receive body based on the virtual
+ !! data items in the virtual data containers, and cache
+ !!
+ !! step 6:
+ !! Construct the message send body, based on received header data and
+ !! and maps, from the virtual data containers, and cache
+ !<
+ subroutine compose_messages(this, unit, stage, body_snd_t, body_rcv_t)
+ class(MpiRouterType) :: this
+ integer(I4B) :: unit
+ integer(I4B) :: stage
+ integer, dimension(:) :: body_snd_t
+ integer, dimension(:) :: body_rcv_t
+ ! local
+ integer(I4B) :: i, j, k
+ integer(I4B) :: rnk
+ integer :: ierr
+ ! mpi handles
+ integer, dimension(:), allocatable :: rcv_req
+ integer, dimension(:), allocatable :: snd_req
+ integer, dimension(:, :), allocatable :: rcv_stat
+ ! message header
+ integer(I4B) :: max_headers
+ type(VdcHeaderType), dimension(:, :), allocatable :: headers
+ integer, dimension(:), allocatable :: hdr_rcv_t
+ integer, dimension(:), allocatable :: hdr_snd_t
+ integer, dimension(:), allocatable :: hdr_rcv_cnt
+ ! maps
+ type(VdcReceiverMapsType), dimension(:, :), allocatable :: rcv_maps
+ integer, dimension(:), allocatable :: map_rcv_t
+ integer, dimension(:), allocatable :: map_snd_t
+
! allocate handles
allocate (rcv_req(this%receivers%size))
allocate (snd_req(this%senders%size))
allocate (rcv_stat(MPI_STATUS_SIZE, this%receivers%size))
- allocate (snd_stat(MPI_STATUS_SIZE, this%senders%size))
+
+ ! init handles
+ rcv_req = MPI_REQUEST_NULL
+ snd_req = MPI_REQUEST_NULL
! allocate header data
max_headers = size(this%rte_models) + size(this%rte_exchanges)
@@ -274,10 +413,6 @@ subroutine mr_route_active(this, stage)
allocate (map_rcv_t(this%receivers%size))
allocate (rcv_maps(max_headers, this%receivers%size)) ! for every header, we potentially need the maps
- ! allocate body data
- allocate (body_rcv_t(this%senders%size))
- allocate (body_snd_t(this%receivers%size))
-
if (this%enable_monitor) then
write (this%imon, '(2x,a)') "== communicating headers =="
end if
@@ -291,7 +426,7 @@ subroutine mr_route_active(this, stage)
call this%message_builder%create_header_rcv(hdr_rcv_t(i))
call MPI_Irecv(headers(:, i), max_headers, hdr_rcv_t(i), rnk, stage, &
this%mpi_world%comm, rcv_req(i), ierr)
- ! don't free mpi datatype, we need the count below
+ call CHECK_MPI(ierr)
end do
! send header for incoming data
@@ -303,11 +438,16 @@ subroutine mr_route_active(this, stage)
call this%message_builder%create_header_snd(rnk, stage, hdr_snd_t(i))
call MPI_Isend(MPI_BOTTOM, 1, hdr_snd_t(i), rnk, stage, &
this%mpi_world%comm, snd_req(i), ierr)
- call MPI_Type_free(hdr_snd_t(i), ierr)
+ call CHECK_MPI(ierr)
end do
! wait for exchange of all headers
call MPI_WaitAll(this%receivers%size, rcv_req, rcv_stat, ierr)
+ call CHECK_MPI(ierr)
+
+ ! reinit handles
+ rcv_req = MPI_REQUEST_NULL
+ snd_req = MPI_REQUEST_NULL
! after WaitAll we can count incoming headers from statuses
do i = 1, this%receivers%size
@@ -323,9 +463,15 @@ subroutine mr_route_active(this, stage)
write (this%imon, '(6x,a,99i6)') "map sizes: ", headers(j, i)%map_sizes
end do
end if
+ end do
+ ! clean up types
+ do i = 1, this%receivers%size
call MPI_Type_free(hdr_rcv_t(i), ierr)
end do
+ do i = 1, this%senders%size
+ call MPI_Type_free(hdr_snd_t(i), ierr)
+ end do
if (this%enable_monitor) then
write (this%imon, '(2x,a)') "== communicating maps =="
@@ -347,9 +493,9 @@ subroutine mr_route_active(this, stage)
call this%message_builder%create_map_rcv(rcv_maps(:, i), hdr_rcv_cnt(i), &
map_rcv_t(i))
-
call MPI_Irecv(MPI_BOTTOM, 1, map_rcv_t(i), rnk, stage, &
this%mpi_world%comm, rcv_req(i), ierr)
+ call CHECK_MPI(ierr)
end do
! send maps
@@ -358,13 +504,16 @@ subroutine mr_route_active(this, stage)
if (this%enable_monitor) then
write (this%imon, '(4x,a,i0)') "send map to process: ", rnk
end if
+
call this%message_builder%create_map_snd(rnk, stage, map_snd_t(i))
call MPI_Isend(MPI_BOTTOM, 1, map_snd_t(i), rnk, stage, &
this%mpi_world%comm, snd_req(i), ierr)
+ call CHECK_MPI(ierr)
end do
! wait on receiving maps
call MPI_WaitAll(this%receivers%size, rcv_req, rcv_stat, ierr)
+ call CHECK_MPI(ierr)
! print maps
if (this%enable_monitor) then
@@ -395,77 +544,75 @@ subroutine mr_route_active(this, stage)
end do
if (this%enable_monitor) then
- write (this%imon, '(2x,a)') "== communicating bodies =="
+ write (this%imon, '(2x,a)') "== composing message bodies =="
end if
- ! recv bodies
+ ! construct recv bodies and cache
do i = 1, this%senders%size
rnk = this%senders%at(i)
if (this%enable_monitor) then
- write (this%imon, '(4x,a,i0)') "receiving from process: ", rnk
+ write (this%imon, '(4x,a,i0)') "build recv body for process: ", rnk
end if
call this%message_builder%create_body_rcv(rnk, stage, body_rcv_t(i))
- call MPI_Type_size(body_rcv_t(i), msg_size, ierr)
- if (msg_size > 0) then
- call MPI_Irecv(MPI_BOTTOM, 1, body_rcv_t(i), rnk, stage, &
- this%mpi_world%comm, rcv_req(i), ierr)
- end if
-
- if (this%enable_monitor) then
- write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
- end if
+ call this%msg_cache%put(unit, rnk, stage, MPI_BDY_RCV, body_rcv_t(i))
end do
- ! send bodies
+ ! construct send bodies and cache
do i = 1, this%receivers%size
rnk = this%receivers%at(i)
if (this%enable_monitor) then
- write (this%imon, '(4x,a,i0)') "sending to process: ", rnk
+ write (this%imon, '(4x,a,i0)') "build send body for process: ", rnk
end if
+
call this%message_builder%create_body_snd( &
rnk, stage, headers(1:hdr_rcv_cnt(i), i), &
rcv_maps(:, i), body_snd_t(i))
- call MPI_Type_size(body_snd_t(i), msg_size, ierr)
- if (msg_size > 0) then
- call MPI_Isend(MPI_Bottom, 1, body_snd_t(i), rnk, stage, &
- this%mpi_world%comm, snd_req(i), ierr)
- end if
-
- if (this%enable_monitor) then
- write (this%imon, '(6x,a,i0)') "message body size: ", msg_size
- end if
- call flush (this%imon)
- end do
-
- ! wait for exchange of all messages
- call MPI_WaitAll(this%senders%size, rcv_req, snd_stat, ierr)
-
- ! clean up types
- do i = 1, this%senders%size
- call MPI_Type_free(body_rcv_t(i), ierr)
- end do
- do i = 1, this%receivers%size
- call MPI_Type_free(body_snd_t(i), ierr)
+ call this%msg_cache%put(unit, rnk, stage, MPI_BDY_SND, body_snd_t(i))
end do
- ! done sending, clean up element maps
+ ! clean up element maps
do i = 1, this%receivers%size
do j = 1, hdr_rcv_cnt(i)
call rcv_maps(j, i)%destroy()
end do
end do
- deallocate (rcv_req, snd_req, rcv_stat, snd_stat)
+ deallocate (rcv_req, snd_req, rcv_stat)
deallocate (hdr_rcv_t, hdr_snd_t, hdr_rcv_cnt)
deallocate (headers)
deallocate (map_rcv_t, map_snd_t)
deallocate (rcv_maps)
- deallocate (body_rcv_t, body_snd_t)
- end subroutine mr_route_active
+ end subroutine compose_messages
- subroutine mr_update_senders(this)
+ !> @brief Load the message body MPI datatypes from cache
+ !<
+ subroutine load_messages(this, unit, stage, body_snd_t, body_rcv_t)
+ class(MpiRouterType) :: this
+ integer(I4B) :: unit
+ integer(I4B) :: stage
+ integer, dimension(:), allocatable :: body_snd_t
+ integer, dimension(:), allocatable :: body_rcv_t
+ ! local
+ integer(I4B) :: i, rnk
+
+ if (this%enable_monitor) then
+ write (this%imon, '(2x,a)') "... running from cache ..."
+ end if
+
+ do i = 1, this%receivers%size
+ rnk = this%receivers%at(i)
+ body_snd_t(i) = this%msg_cache%get(unit, rnk, stage, MPI_BDY_SND)
+ end do
+ do i = 1, this%senders%size
+ rnk = this%senders%at(i)
+ body_rcv_t(i) = this%msg_cache%get(unit, rnk, stage, MPI_BDY_RCV)
+ end do
+
+ end subroutine load_messages
+
+ subroutine update_senders(this)
class(MpiRouterType) :: this
! local
integer(I4B) :: i
@@ -486,9 +633,9 @@ subroutine mr_update_senders(this)
end if
end do
- end subroutine mr_update_senders
+ end subroutine update_senders
- subroutine mr_update_senders_sln(this, virtual_sol)
+ subroutine update_senders_sln(this, virtual_sol)
class(MpiRouterType) :: this
type(VirtualSolutionType) :: virtual_sol
! local
@@ -510,9 +657,9 @@ subroutine mr_update_senders_sln(this, virtual_sol)
end if
end do
- end subroutine mr_update_senders_sln
+ end subroutine update_senders_sln
- subroutine mr_update_receivers(this)
+ subroutine update_receivers(this)
class(MpiRouterType) :: this
! local
integer(I4B) :: i
@@ -524,9 +671,9 @@ subroutine mr_update_receivers(this)
call this%receivers%push_back(this%senders%at(i))
end do
- end subroutine mr_update_receivers
+ end subroutine update_receivers
- subroutine mr_update_receivers_sln(this, virtual_sol)
+ subroutine update_receivers_sln(this, virtual_sol)
class(MpiRouterType) :: this
type(VirtualSolutionType) :: virtual_sol
! local
@@ -539,11 +686,51 @@ subroutine mr_update_receivers_sln(this, virtual_sol)
call this%receivers%push_back(this%senders%at(i))
end do
- end subroutine mr_update_receivers_sln
+ end subroutine update_receivers_sln
+
+ !> @brief Check if this stage is cached
+ !<
+ function is_cached(this, unit, stage) result(in_cache)
+ use SimModule, only: ustop
+ class(MpiRouterType) :: this
+ integer(I4B) :: unit
+ integer(I4B) :: stage
+ logical(LGP) :: in_cache
+ ! local
+ integer(I4B) :: i, rnk
+ integer(I4B) :: no_cache_cnt
+ integer :: cached_type
+
+ in_cache = .false.
+ no_cache_cnt = 0
+
+ do i = 1, this%receivers%size
+ rnk = this%receivers%at(i)
+ cached_type = this%msg_cache%get(unit, rnk, stage, MPI_BDY_SND)
+ if (cached_type == NO_CACHED_VALUE) no_cache_cnt = no_cache_cnt + 1
+ end do
+ do i = 1, this%senders%size
+ rnk = this%senders%at(i)
+ cached_type = this%msg_cache%get(unit, rnk, stage, MPI_BDY_RCV)
+ if (cached_type == NO_CACHED_VALUE) no_cache_cnt = no_cache_cnt + 1
+ end do
+
+ ! it should be all or nothing
+ if (no_cache_cnt == this%receivers%size + this%senders%size) then
+ in_cache = .false.
+ else if (no_cache_cnt == 0) then
+ in_cache = .true.
+ else
+ call ustop("Internal error: MPI message cache corrupt...")
+ end if
+
+ end function is_cached
subroutine mr_destroy(this)
class(MpiRouterType) :: this
+ call this%msg_cache%destroy()
+
call this%senders%destroy()
call this%receivers%destroy()
diff --git a/src/Distributed/MpiRunControl.F90 b/src/Distributed/MpiRunControl.F90
index e37f50c174a..796b59bf9b6 100644
--- a/src/Distributed/MpiRunControl.F90
+++ b/src/Distributed/MpiRunControl.F90
@@ -79,6 +79,7 @@ subroutine mpi_ctrl_start(this)
#else
if (.not. mpi_world%has_comm()) then
call MPI_Init(ierr)
+ call CHECK_MPI(ierr)
call mpi_world%set_comm(MPI_COMM_WORLD)
end if
#endif
@@ -130,6 +131,7 @@ subroutine mpi_ctrl_finish(this)
CHKERRQ(ierr)
#else
call MPI_Finalize(ierr)
+ call CHECK_MPI(ierr)
#endif
! finish everything else by calling parent
diff --git a/src/Distributed/MpiUnitCache.f90 b/src/Distributed/MpiUnitCache.f90
new file mode 100644
index 00000000000..51bf4a546f1
--- /dev/null
+++ b/src/Distributed/MpiUnitCache.f90
@@ -0,0 +1,166 @@
+module MpiUnitCacheModule
+ use KindModule, only: I4B, LGP
+ use ListModule
+ use STLVecIntModule
+ use SimStagesModule, only: NR_SIM_STAGES
+ use mpi
+ implicit none
+ private
+
+ integer(I4B), public, parameter :: NO_CACHED_VALUE = -1
+
+ type, public :: MpiUnitCacheType
+ ! private
+ type(STLVecInt), private :: cached_ranks
+ type(STLVecInt), private :: cached_messages
+ integer(I4B), private :: nr_stages
+ integer(I4B), private :: nr_msg_types
+ contains
+ procedure :: init => cc_init
+ procedure :: get_cached => cc_get_cached
+ procedure :: cache => mc_cache
+ procedure :: destroy => cc_destroy
+ ! private
+ procedure, private :: is_rank_cached
+ procedure, private :: add_rank_cache
+ procedure, private :: get_rank_index
+ procedure, private :: get_msg_index
+ end type MpiUnitCacheType
+
+contains
+
+ !> @brief Initialize the unit cache.
+ !<
+ subroutine cc_init(this, nr_stages, nr_msg_types)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: nr_stages !< number of (simulation) stages
+ integer(I4B) :: nr_msg_types !< number of message types to be cached during a stage
+
+ this%nr_stages = nr_stages
+ this%nr_msg_types = nr_msg_types
+ call this%cached_ranks%init()
+ call this%cached_messages%init()
+
+ end subroutine cc_init
+
+ !> @brief Get the cached mpi type for this rank and
+ !< stage. Equal to NO_CACHED_VALUE when not present.
+ function cc_get_cached(this, rank, stage, msg_id) result(mpi_type)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ integer(I4B) :: stage
+ integer(I4B) :: msg_id
+ integer :: mpi_type
+ ! local
+ integer(I4B) :: msg_idx
+
+ mpi_type = NO_CACHED_VALUE
+ msg_idx = this%get_msg_index(rank, stage, msg_id)
+ if (msg_idx > 0) then
+ mpi_type = this%cached_messages%at(msg_idx)
+ end if
+
+ end function cc_get_cached
+
+ !> @brief Cache the mpi datatype for this particular
+ !! rank and stage. The datatype should be committed
+ !< to the type database externally.
+ subroutine mc_cache(this, rank, stage, msg_id, mpi_type)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ integer(I4B) :: stage
+ integer(I4B) :: msg_id
+ integer :: mpi_type
+ ! local
+ integer(I4B) :: msg_idx
+
+ ! add if rank not present in cache yet
+ if (.not. this%is_rank_cached(rank)) then
+ call this%add_rank_cache(rank)
+ end if
+
+ ! rank has been added to cache, now set
+ ! mpi datatype for this stage's message:
+ msg_idx = this%get_msg_index(rank, stage, msg_id)
+ call this%cached_messages%set(msg_idx, mpi_type)
+
+ end subroutine mc_cache
+
+ function is_rank_cached(this, rank) result(in_cache)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ logical(LGP) :: in_cache
+
+ in_cache = this%cached_ranks%contains(rank)
+
+ end function is_rank_cached
+
+ subroutine add_rank_cache(this, rank)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ ! local
+ integer(I4B) :: i, j
+
+ call this%cached_ranks%push_back(rank)
+ do i = 1, this%nr_stages
+ do j = 1, this%nr_msg_types
+ call this%cached_messages%push_back(NO_CACHED_VALUE)
+ end do
+ end do
+
+ end subroutine add_rank_cache
+
+ !> @Brief returns -1 when not present
+ !<
+ function get_rank_index(this, rank) result(rank_index)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ integer(I4B) :: rank_index
+
+ rank_index = this%cached_ranks%get_index(rank)
+
+ end function get_rank_index
+
+ !> @Brief returns -1 when not present
+ !<
+ function get_msg_index(this, rank, stage, msg_id) result(msg_index)
+ class(MpiUnitCacheType) :: this
+ integer(I4B) :: rank
+ integer(I4B) :: stage
+ integer(I4B) :: msg_id
+ integer(I4B) :: msg_index
+ ! local
+ integer(I4B) :: rank_idx
+ integer(I4B) :: rank_offset, stage_offset
+
+ msg_index = -1
+ rank_idx = this%get_rank_index(rank)
+ if (rank_idx < 1) return
+
+ rank_offset = (rank_idx - 1) * (this%nr_stages * this%nr_msg_types)
+ stage_offset = (stage - 1) * this%nr_msg_types
+ msg_index = rank_offset + stage_offset + msg_id
+
+ end function get_msg_index
+
+ !> @brief Clean up the unit cache.
+ !<
+ subroutine cc_destroy(this)
+ class(MpiUnitCacheType) :: this
+ ! local
+ integer(I4B) :: i
+ integer :: mpi_type, ierr
+
+ do i = 1, this%cached_messages%size
+ mpi_type = this%cached_messages%at(i)
+ if (mpi_type /= NO_CACHED_VALUE) then
+ call MPI_Type_free(mpi_type, ierr)
+ end if
+ end do
+
+ call this%cached_ranks%destroy()
+ call this%cached_messages%destroy()
+
+ end subroutine cc_destroy
+
+end module
diff --git a/src/Distributed/MpiWorld.f90 b/src/Distributed/MpiWorld.f90
index 8962c99d757..7ee3498467e 100644
--- a/src/Distributed/MpiWorld.f90
+++ b/src/Distributed/MpiWorld.f90
@@ -6,6 +6,7 @@ module MpiWorldModule
private
public :: get_mpi_world
+ public :: CHECK_MPI
type, public :: MpiWorldType
integer(I4B) :: mpi_rank !< the id for this process
@@ -108,4 +109,21 @@ subroutine mpiw_destroy(this)
end subroutine mpiw_destroy
+ !> @brief Check the MPI error code, report, and
+ !< terminate when not MPI_SUCCESS
+ subroutine CHECK_MPI(mpi_error_code)
+ use SimModule, only: store_error
+ integer :: mpi_error_code
+ ! local
+ character(len=1024) :: mpi_err_msg
+ integer :: err_len
+ integer :: ierr
+
+ if (mpi_error_code /= MPI_SUCCESS) then
+ call MPI_Error_string(mpi_error_code, mpi_err_msg, err_len, ierr)
+ call store_error("Internal error: "//trim(mpi_err_msg), terminate=.true.)
+ end if
+
+ end subroutine CHECK_MPI
+
end module MpiWorldModule
diff --git a/src/Distributed/VirtualDataContainer.f90 b/src/Distributed/VirtualDataContainer.f90
index 36b74f95f1a..3350bd64e5b 100644
--- a/src/Distributed/VirtualDataContainer.f90
+++ b/src/Distributed/VirtualDataContainer.f90
@@ -329,6 +329,9 @@ subroutine print_items(this, imon, items)
vdi => get_virtual_data_from_list(this%virtual_data_list, items%at(i))
write (imon, *) vdi%var_name, ":", vdi%mem_path
end do
+ if (items%size == 0) then
+ write (imon, *) "... empty ...", this%name
+ end if
write (imon, *) "<===== items"
end subroutine print_items
diff --git a/src/Model/Connection/GwfGwfConnection.f90 b/src/Model/Connection/GwfGwfConnection.f90
index 625c41d8525..bbfdb6961dc 100644
--- a/src/Model/Connection/GwfGwfConnection.f90
+++ b/src/Model/Connection/GwfGwfConnection.f90
@@ -13,7 +13,7 @@ module GwfGwfConnectionModule
use DisConnExchangeModule
use GwfGwfExchangeModule, only: GwfExchangeType, GetGwfExchangeFromList, &
CastAsGwfExchange
- use GwfNpfModule, only: GwfNpfType, hcond, vcond
+ use GwfNpfModule, only: GwfNpfType
use GwfBuyModule, only: GwfBuyType
use BaseDisModule, only: DisBaseType
use ConnectionsModule, only: ConnectionsType
diff --git a/src/Solution/PETSc/PetscConvergence.F90 b/src/Solution/PETSc/PetscConvergence.F90
index 7c4dcd2cf81..0928d7aa017 100644
--- a/src/Solution/PETSc/PetscConvergence.F90
+++ b/src/Solution/PETSc/PetscConvergence.F90
@@ -92,10 +92,11 @@ subroutine petsc_check_convergence(ksp, n, rnorm, flag, context, ierr)
CHKERRQ(ierr)
call VecCopy(res, context%res_old, ierr)
CHKERRQ(ierr)
- call VecDestroy(res, ierr)
- CHKERRQ(ierr)
flag = KSP_CONVERGED_ITERATING
end if
+
+ call VecDestroy(res, ierr)
+ CHKERRQ(ierr)
return
end if
@@ -131,10 +132,10 @@ subroutine petsc_check_convergence(ksp, n, rnorm, flag, context, ierr)
call VecDestroy(res, ierr)
CHKERRQ(ierr)
- ! get dv and dr per local model
- call VecGetArrayF90(context%delta_x, local_dx, ierr)
+ ! get dv and dr per local model (readonly!)
+ call VecGetArrayReadF90(context%delta_x, local_dx, ierr)
CHKERRQ(ierr)
- call VecGetArrayF90(context%delta_res, local_dr, ierr)
+ call VecGetArrayReadF90(context%delta_res, local_dr, ierr)
CHKERRQ(ierr)
do i = 1, summary%convnmod
! reset
@@ -162,9 +163,9 @@ subroutine petsc_check_convergence(ksp, n, rnorm, flag, context, ierr)
summary%convlocdr(i, iter_cnt) = idx_dr
end if
end do
- call VecRestoreArrayF90(x, local_dx, ierr)
+ call VecRestoreArrayF90(context%delta_x, local_dx, ierr)
CHKERRQ(ierr)
- call VecRestoreArrayF90(x, local_dr, ierr)
+ call VecRestoreArrayF90(context%delta_res, local_dr, ierr)
CHKERRQ(ierr)
if (norm < context%dvclose) then
diff --git a/src/Solution/ParallelSolution.f90 b/src/Solution/ParallelSolution.f90
index 687755e71ae..09c00392ef9 100644
--- a/src/Solution/ParallelSolution.f90
+++ b/src/Solution/ParallelSolution.f90
@@ -43,6 +43,7 @@ function par_has_converged(this, max_dvc) result(has_converged)
abs_max_dvc = abs(max_dvc)
call MPI_Allreduce(abs_max_dvc, global_max_dvc, 1, MPI_DOUBLE_PRECISION, &
MPI_MAX, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
if (global_max_dvc <= this%dvclose) then
has_converged = .true.
end if
@@ -68,6 +69,7 @@ function par_package_convergence(this, dpak, cpakout, iend) &
call MPI_Allreduce(icnvg_local, icnvg_global, 1, MPI_INTEGER, &
MPI_MIN, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
end function par_package_convergence
@@ -82,6 +84,7 @@ function par_sync_newtonur_flag(this, inewtonur) result(ivalue)
mpi_world => get_mpi_world()
call MPI_Allreduce(inewtonur, ivalue, 1, MPI_INTEGER, &
MPI_MAX, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
end function par_sync_newtonur_flag
@@ -108,6 +111,7 @@ function par_nur_has_converged(this, dxold_max, hncg) &
call MPI_Allreduce(icnvg_local, icnvg_global, 1, MPI_INTEGER, &
MPI_MIN, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
if (icnvg_global == 1) has_converged = .true.
end function par_nur_has_converged
@@ -131,6 +135,7 @@ subroutine par_calc_ptc(this, iptc, ptcf)
! now reduce
call MPI_Allreduce(ptcf_loc, ptcf_glo_max, 1, MPI_DOUBLE_PRECISION, &
MPI_MAX, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
iptc = 0
ptcf = DZERO
@@ -161,8 +166,10 @@ subroutine par_underrelax(this, kiter, bigch, neq, active, x, xtemp)
! first reduce largest change over all processes
call MPI_Allreduce(bigch, dvc_global_max, 1, MPI_DOUBLE_PRECISION, &
MPI_MAX, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
call MPI_Allreduce(bigch, dvc_global_min, 1, MPI_DOUBLE_PRECISION, &
MPI_MIN, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
if (abs(dvc_global_min) > abs(dvc_global_max)) then
dvc_global_max = dvc_global_min
@@ -192,6 +199,7 @@ subroutine par_backtracking_xupdate(this, btflag)
call MPI_Allreduce(btflag_local, btflag, 1, MPI_INTEGER, &
MPI_MAX, mpi_world%comm, ierr)
+ call CHECK_MPI(ierr)
end subroutine par_backtracking_xupdate
diff --git a/src/Utilities/STLVecInt.f90 b/src/Utilities/STLVecInt.f90
index 6551e697705..7431da49026 100644
--- a/src/Utilities/STLVecInt.f90
+++ b/src/Utilities/STLVecInt.f90
@@ -21,10 +21,12 @@ module STLVecIntModule
procedure, pass(this) :: add_array_unique !< adds elements of array at the end of the vector, if not present yet
procedure, pass(this) :: at !< random access, unsafe, no bounds checking
procedure, pass(this) :: at_safe !< random access with bounds checking
+ procedure, pass(this) :: set !< set value at index, no bounds checking
procedure, pass(this) :: clear !< empties the vector, leaves memory unchanged
procedure, pass(this) :: shrink_to_fit !< reduces the allocated memory to fit the actual vector size
procedure, pass(this) :: destroy !< deletes the memory
procedure, pass(this) :: contains !< true when element already present
+ procedure, pass(this) :: get_index !< return index of first occurrence of value in array, -1 when not present
procedure, pass(this) :: get_values !< returns a copy of the values
! private
procedure, private, pass(this) :: expand
@@ -119,6 +121,15 @@ function at_safe(this, idx) result(value)
end function at_safe
+ subroutine set(this, idx, value)
+ class(STLVecInt), intent(inout) :: this
+ integer(I4B), intent(in) :: idx
+ integer(I4B) :: value
+
+ this%values(idx) = value
+
+ end subroutine set
+
subroutine clear(this)
class(STLVecInt), intent(inout) :: this
@@ -200,6 +211,25 @@ function contains(this, val) result(res)
end function contains
+ !> @brief Return index of first occurrence,
+ !< returns -1 when not present
+ function get_index(this, val) result(idx)
+ class(STLVecInt), intent(inout) :: this
+ integer(I4B) :: val
+ integer(I4B) :: idx
+ ! local
+ integer(I4B) :: i
+
+ idx = -1
+ do i = 1, this%size
+ if (this%at(i) == val) then
+ idx = i
+ return
+ end if
+ end do
+
+ end function get_index
+
function get_values(this) result(values)
class(STLVecInt), intent(in) :: this
integer(I4B), dimension(:), allocatable :: values
diff --git a/src/Utilities/SimStages.f90 b/src/Utilities/SimStages.f90
index 9168f05b9a8..01bd7d1e312 100644
--- a/src/Utilities/SimStages.f90
+++ b/src/Utilities/SimStages.f90
@@ -21,6 +21,7 @@ module SimStagesModule
integer(I4B), public, parameter :: STG_BFR_EXG_AD = 12 !< before exchange advance (per solution)
integer(I4B), public, parameter :: STG_BFR_EXG_CF = 13 !< before exchange calculate (per solution)
integer(I4B), public, parameter :: STG_BFR_EXG_FC = 14 !< before exchange formulate (per solution)
+ integer(I4B), public, parameter :: NR_SIM_STAGES = 14 !< before exchange formulate (per solution)
contains
diff --git a/src/meson.build b/src/meson.build
index c4a8bcc395d..69ebe64dab6 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -302,6 +302,8 @@ modflow_petsc_sources = files(
modflow_mpi_sources = files(
'Distributed' / 'MpiMessageBuilder.f90',
+ 'Distributed' / 'MpiMessageCache.f90',
+ 'Distributed' / 'MpiUnitCache.f90',
'Distributed' / 'MpiRouter.f90',
'Distributed' / 'MpiRunControl.F90',
'Distributed' / 'MpiWorld.f90',
diff --git a/utils/mf5to6/make/makefile b/utils/mf5to6/make/makefile
index ccf5741d638..0e3b414c298 100644
--- a/utils/mf5to6/make/makefile
+++ b/utils/mf5to6/make/makefile
@@ -5,10 +5,10 @@ include ./makedefaults
# Define the source file directories
SOURCEDIR1=../src
-SOURCEDIR2=../src/LGR
-SOURCEDIR3=../src/Preproc
-SOURCEDIR4=../src/MF2005
-SOURCEDIR5=../src/NWT
+SOURCEDIR2=../src/MF2005
+SOURCEDIR3=../src/NWT
+SOURCEDIR4=../src/LGR
+SOURCEDIR5=../src/Preproc
SOURCEDIR6=../../../src/Utilities/Memory
SOURCEDIR7=../../../src/Utilities/TimeSeries
SOURCEDIR8=../../../src/Utilities