FV3 Bundle
fv_mp_nlm_mod.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU General Public License *
3 !* This file is a part of fvGFS. *
4 !* *
5 !* fvGFS is free software; you can redistribute it and/or modify it *
6 !* and are expected to follow the terms of the GNU General Public *
7 !* License as published by the Free Software Foundation; either *
8 !* version 2 of the License, or (at your option) any later version. *
9 !* *
10 !* fvGFS is distributed in the hope that it will be useful, but *
11 !* WITHOUT ANY WARRANTY; without even the implied warranty of *
12 !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
13 !* General Public License for more details. *
14 !* *
15 !* For the full text of the GNU General Public License, *
16 !* write to: Free Software Foundation, Inc., *
17 !* 675 Mass Ave, Cambridge, MA 02139, USA. *
18 !* or see: http://www.gnu.org/licenses/gpl.html *
19 !***********************************************************************
20 !------------------------------------------------------------------------------
21 !BOP
22 !
23 ! !MODULE: fv_mp_nlm_mod --- SPMD parallel decompostion/communication module
25 
26 #if defined(SPMD)
27 ! !USES:
28  use fms_mod, only : fms_init, fms_end
29  use mpp_mod, only : fatal, mpp_debug, note, mpp_clock_sync,mpp_clock_detailed, warning
30  use mpp_mod, only : mpp_pe, mpp_npes, mpp_node, mpp_root_pe, mpp_error, mpp_set_warn_level
31  use mpp_mod, only : mpp_declare_pelist, mpp_set_current_pelist, mpp_sync
32  use mpp_mod, only : mpp_clock_begin, mpp_clock_end, mpp_clock_id
33  use mpp_mod, only : mpp_chksum, stdout, stderr, mpp_broadcast
34  use mpp_mod, only : mpp_send, mpp_recv, mpp_sync_self, event_recv, mpp_gather
35  use mpp_domains_mod, only : global_data_domain, bitwise_exact_sum, bgrid_ne, fold_north_edge, cgrid_ne
36  use mpp_domains_mod, only : mpp_domain_time, cyclic_global_domain, nupdate,eupdate, xupdate, yupdate, scalar_pair
37  use mpp_domains_mod, only : domain1d, domain2d, domaincommunicator2d, mpp_get_ntile_count
38  use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size
40  use mpp_domains_mod, only : mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain
42  use mpp_domains_mod, only : mpp_get_neighbor_pe, mpp_define_mosaic, mpp_define_io_domain
43  use mpp_domains_mod, only : north, north_east, east, south_east
44  use mpp_domains_mod, only : south, south_west, west, north_west
46  use mpp_domains_mod, only : mpp_group_update_initialized, mpp_do_group_update
48  use mpp_domains_mod, only : group_halo_update_type => mpp_group_update_type
51  use fms_io_mod, only: set_domain
52  use mpp_mod, only : mpp_get_current_pelist, mpp_set_current_pelist
54  use mpp_domains_mod, only : mpp_define_nest_domains, nest_domain_type
55  use mpp_domains_mod, only : mpp_get_c2f_index, mpp_update_nest_fine
56  use mpp_domains_mod, only : mpp_get_f2c_index, mpp_update_nest_coarse
57  use mpp_domains_mod, only : mpp_get_domain_shift
59 
60  implicit none
61  private
62 
63  integer, parameter:: ng = 3 ! Number of ghost zones required
64 
65 #include "mpif.h"
66  integer, parameter :: XDir=1
67  integer, parameter :: YDir=2
68  integer :: commglobal, ierror, npes
69 
70  !need tile as a module variable so that some of the mp_ routines below will work
71  integer::tile
72 
73  integer, allocatable, dimension(:) :: npes_tile, tile1, tile2
74  integer, allocatable, dimension(:) :: istart1, iend1, jstart1, jend1
75  integer, allocatable, dimension(:) :: istart2, iend2, jstart2, jend2
76  integer, allocatable, dimension(:,:) :: layout2D, global_indices
77  integer :: numthreads, gid, masterproc
78 
79  logical :: master
80 
81  type(nest_domain_type), allocatable, dimension(:) :: nest_domain
82  integer :: this_pe_grid = 0
83  integer, EXTERNAL :: omp_get_thread_num, omp_get_num_threads
84 
85  integer :: npes_this_grid
86 
87  !! CLEANUP: these are currently here for convenience
88  !! Right now calling switch_current_atm sets these to the value on the "current" grid
89  !! (as well as changing the "current" domain)
90  integer :: is, ie, js, je
91  integer :: isd, ied, jsd, jed
92  integer :: isc, iec, jsc, jec
93 
94  public mp_start, mp_assign_gid, mp_barrier, mp_stop!, npes
95  public domain_decomp, mp_bcst, mp_reduce_max, mp_reduce_sum, mp_gather
96  public mp_reduce_min
97  public fill_corners, xdir, ydir
98  public switch_current_domain, switch_current_atm, broadcast_domains
99  public is_master, setup_master
100  !The following variables are declared public by this module for convenience;
101  !they will need to be switched when domains are switched
102 !!! CLEANUP: ng is a PARAMETER and is OK to be shared by a use statement
103  public is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec, ng, tile
104  public start_group_halo_update, complete_group_halo_update
105  public group_halo_update_type
106 
107  interface start_group_halo_update
108  module procedure start_var_group_update_2d
109  module procedure start_var_group_update_3d
110  module procedure start_var_group_update_4d
111  module procedure start_vector_group_update_2d
112  module procedure start_vector_group_update_3d
113  end interface start_group_halo_update
114 
115  INTERFACE fill_corners
116  MODULE PROCEDURE fill_corners_2d_r4
117  MODULE PROCEDURE fill_corners_2d_r8
118  MODULE PROCEDURE fill_corners_xy_2d_r4
119  MODULE PROCEDURE fill_corners_xy_2d_r8
120  MODULE PROCEDURE fill_corners_xy_3d_r4
121  MODULE PROCEDURE fill_corners_xy_3d_r8
122  END INTERFACE
123 
124  INTERFACE fill_corners_agrid
125  MODULE PROCEDURE fill_corners_agrid_r4
126  MODULE PROCEDURE fill_corners_agrid_r8
127  END INTERFACE
128 
129  INTERFACE fill_corners_cgrid
130  MODULE PROCEDURE fill_corners_cgrid_r4
131  MODULE PROCEDURE fill_corners_cgrid_r8
132  END INTERFACE
133 
134  INTERFACE fill_corners_dgrid
135  MODULE PROCEDURE fill_corners_dgrid_r4
136  MODULE PROCEDURE fill_corners_dgrid_r8
137  END INTERFACE
138 
139  INTERFACE mp_bcst
140  MODULE PROCEDURE mp_bcst_i4
141  MODULE PROCEDURE mp_bcst_r4
142  MODULE PROCEDURE mp_bcst_r8
143  MODULE PROCEDURE mp_bcst_3d_r4
144  MODULE PROCEDURE mp_bcst_3d_r8
145  MODULE PROCEDURE mp_bcst_4d_r4
146  MODULE PROCEDURE mp_bcst_4d_r8
147  MODULE PROCEDURE mp_bcst_3d_i8
148  MODULE PROCEDURE mp_bcst_4d_i8
149  END INTERFACE
150 
151  INTERFACE mp_reduce_min
152  MODULE PROCEDURE mp_reduce_min_r4
153  MODULE PROCEDURE mp_reduce_min_r8
154  END INTERFACE
155 
156  INTERFACE mp_reduce_max
157  MODULE PROCEDURE mp_reduce_max_r4_1d
158  MODULE PROCEDURE mp_reduce_max_r4
159  MODULE PROCEDURE mp_reduce_max_r8_1d
160  MODULE PROCEDURE mp_reduce_max_r8
161  MODULE PROCEDURE mp_reduce_max_i4
162  END INTERFACE
163 
164  INTERFACE mp_reduce_sum
165  MODULE PROCEDURE mp_reduce_sum_r4
166  MODULE PROCEDURE mp_reduce_sum_r4_1d
167  MODULE PROCEDURE mp_reduce_sum_r8
168  MODULE PROCEDURE mp_reduce_sum_r8_1d
169  END INTERFACE
170 
171  INTERFACE mp_gather !WARNING only works with one level (ldim == 1)
172  MODULE PROCEDURE mp_gather_4d_r4
173  MODULE PROCEDURE mp_gather_3d_r4
174  MODULE PROCEDURE mp_gather_3d_r8
175  END INTERFACE
176 
177  integer :: halo_update_type = 1
178 
179 contains
180 
181  subroutine mp_assign_gid
182 
183  gid = mpp_pe()
184  npes = mpp_npes()
185 
186  end subroutine mp_assign_gid
187 
188 !-------------------------------------------------------------------------------
189 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
190 !
191 ! mp_start :: Start SPMD processes
192 !
193  subroutine mp_start(commID, halo_update_type_in)
194  integer, intent(in), optional :: commID
195  integer, intent(in), optional :: halo_update_type_in
196 
197  integer :: ios
198  integer :: unit
199 
200  masterproc = mpp_root_pe()
201  commglobal = mpi_comm_world
202  if( PRESENT(commid) )then
203  commglobal = commid
204  end if
205  halo_update_type = halo_update_type_in
206 
207  numthreads = 1
208 !$OMP PARALLEL
209 !$OMP MASTER
210 !$ numthreads = omp_get_num_threads()
211 !$OMP END MASTER
212 !$OMP END PARALLEL
213 
214  if ( mpp_pe()==mpp_root_pe() ) then
215  master = .true.
216  unit = stdout()
217  write(unit,*) 'Starting PEs : ', mpp_npes() !Should be for current pelist
218  write(unit,*) 'Starting Threads : ', numthreads
219  else
220  master = .false.
221  endif
222 
223  if (mpp_npes() > 1) call mpi_barrier(commglobal, ierror)
224 
225  end subroutine mp_start
226 !
227 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
228 !-------------------------------------------------------------------------------
229 
230  logical function is_master()
231 
232  is_master = master
233 
234  end function is_master
235 
236  subroutine setup_master(pelist_local)
237 
238  integer, intent(IN) :: pelist_local(:)
239 
240  if (any(gid == pelist_local)) then
241 
242  masterproc = pelist_local(1)
243  master = (gid == masterproc)
244 
245  endif
246 
247  end subroutine setup_master
248 
249 !-------------------------------------------------------------------------------
250 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
251 !
252 ! mp_barrier :: Wait for all SPMD processes
253 !
254  subroutine mp_barrier()
255 
256  call mpi_barrier(commglobal, ierror)
257 
258  end subroutine mp_barrier
259 !
260 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
261 !-------------------------------------------------------------------------------
262 
263 !-------------------------------------------------------------------------------
264 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
265 !
266 ! mp_stop :: Stop SPMD processes
267 !
268  subroutine mp_stop()
269 
270  call mpi_barrier(commglobal, ierror)
271  if (gid==masterproc) print*, 'Stopping PEs : ', npes
272  call fms_end()
273  ! call MPI_FINALIZE (ierror)
274 
275  end subroutine mp_stop
276 !
277 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
278 !-------------------------------------------------------------------------------
279 
280 !-------------------------------------------------------------------------------
281 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
282 !
283 ! domain_decomp :: Setup domain decomp
284 !
285  subroutine domain_decomp(npx,npy,nregions,grid_type,nested,Atm,layout,io_layout)
286 
287  integer, intent(IN) :: npx,npy,grid_type
288  integer, intent(INOUT) :: nregions
289  logical, intent(IN):: nested
290  type(fv_atmos_type), intent(INOUT), target :: Atm
291  integer, intent(INOUT) :: layout(2), io_layout(2)
292 
293  integer, allocatable :: pe_start(:), pe_end(:)
294 
295  integer :: nx,ny,n,num_alloc
296  character(len=32) :: type = "unknown"
297  logical :: is_symmetry
298  logical :: debug=.false.
299  integer, allocatable :: tile_id(:)
300 
301  integer i
302  integer :: npes_x, npes_y
303 
304  integer, pointer :: pelist(:), grid_number, num_contact, npes_per_tile
305  logical, pointer :: square_domain
306  type(domain2D), pointer :: domain, domain_for_coupler
307 
308  nx = npx-1
309  ny = npy-1
310 
311  !! Init pointers
312  pelist => atm%pelist
313  grid_number => atm%grid_number
314  num_contact => atm%num_contact
315  domain => atm%domain
316  domain_for_coupler => atm%domain_for_coupler
317  npes_per_tile => atm%npes_per_tile
318 
319  npes_x = layout(1)
320  npes_y = layout(2)
321 
322 
323  call mpp_domains_init(mpp_domain_time)
324 
325  ! call mpp_domains_set_stack_size(10000)
326  ! call mpp_domains_set_stack_size(900000)
327  ! call mpp_domains_set_stack_size(1500000)
328 #ifdef SMALL_PE
329  call mpp_domains_set_stack_size(6000000)
330 #else
331  call mpp_domains_set_stack_size(3000000)
332 #endif
333 
334  select case(nregions)
335  case ( 1 ) ! Lat-Lon "cyclic"
336 
337  select case (grid_type)
338  case (0,1,2) !Gnomonic nested grid
339  if (nested) then
340  type = "Cubed-sphere nested grid"
341  else
342  type = "Cubed-sphere, single face"
343  end if
344  nregions = 1
345  num_contact = 0
346  npes_per_tile = npes_x*npes_y !/nregions !Set up for concurrency
347  if (npes_per_tile==0) then
348  npes_per_tile = npes/nregions
349  endif
350  is_symmetry = .true.
351  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
352 
353  if ( npes_x == 0 ) then
354  npes_x = layout(1)
355  endif
356  if ( npes_y == 0 ) then
357  npes_y = layout(2)
358  endif
359 
360  if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) atm%gridstruct%square_domain = .true.
361 
362  if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then
363  write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y
364  call mp_stop
365  call exit(1)
366  endif
367 
368  layout = (/npes_x,npes_y/)
369  case (3) ! Lat-Lon "cyclic"
370  type="Lat-Lon: cyclic"
371  nregions = 4
372  num_contact = 8
373  if( mod(npes,nregions) .NE. 0 ) then
374  call mpp_error(note,'TEST_MPP_DOMAINS: for Cyclic mosaic, npes should be multiple of nregions. ' // &
375  'No test is done for Cyclic mosaic. ' )
376  return
377  end if
378  npes_per_tile = npes/nregions
379  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
380  layout = (/1,npes_per_tile/) ! force decomp only in Lat-Direction
381  case (4) ! Cartesian, double periodic
382  type="Cartesian: double periodic"
383  nregions = 1
384  num_contact = 2
385  npes_per_tile = npes/nregions
386  if(npes_x*npes_y == npes_per_tile) then
387  layout = (/npes_x,npes_y/)
388  else
389  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
390  endif
391  case (5) ! latlon patch
392  type="Lat-Lon: patch"
393  nregions = 1
394  num_contact = 0
395  npes_per_tile = npes/nregions
396  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
397  case (6) ! latlon strip
398  type="Lat-Lon: strip"
399  nregions = 1
400  num_contact = 1
401  npes_per_tile = npes/nregions
402  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
403  case (7) ! Cartesian, channel
404  type="Cartesian: channel"
405  nregions = 1
406  num_contact = 1
407  npes_per_tile = npes/nregions
408  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
409  end select
410 
411  case ( 6 ) ! Cubed-Sphere
412  type="Cubic: cubed-sphere"
413  if (nested) then
414  call mpp_error(fatal, 'For a nested grid with grid_type < 3 nregions_domain must equal 1.')
415  endif
416  nregions = 6
417  num_contact = 12
418  !--- cubic grid always have six tiles, so npes should be multiple of 6
419  npes_per_tile = npes_x*npes_y
420  if (npes_per_tile==0) then
421  npes_per_tile = npes/nregions
422  endif
423  call mpp_define_layout( (/1,npx-1,1,npy-1/), npes_per_tile, layout )
424 
425  if ( npes_x == 0 ) then
426  npes_x = layout(1)
427  endif
428  if ( npes_y == 0 ) then
429  npes_y = layout(2)
430  endif
431 
432  if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) atm%gridstruct%square_domain = .true.
433 
434  if ( (npx/npes_x < ng) .or. (npy/npes_y < ng) ) then
435  write(*,310) npes_x, npes_y, npx/npes_x, npy/npes_y
436  310 format('Invalid layout, NPES_X:',i4.4,'NPES_Y:',i4.4,'ncells_X:',i4.4,'ncells_Y:',i4.4)
437  call mp_stop
438  call exit(1)
439  endif
440 
441  layout = (/npes_x,npes_y/)
442  case default
443  call mpp_error(fatal, 'domain_decomp: no such test: '//type)
444  end select
445 
446  allocate(layout2d(2,nregions), global_indices(4,nregions), npes_tile(nregions) )
447  allocate(pe_start(nregions),pe_end(nregions))
448  npes_tile = npes_per_tile
449  do n = 1, nregions
450  global_indices(:,n) = (/1,npx-1,1,npy-1/)
451  layout2d(:,n) = layout
452  pe_start(n) = pelist(1) + (n-1)*layout(1)*layout(2)
453  pe_end(n) = pe_start(n) + layout(1)*layout(2) -1
454  end do
455  num_alloc=max(1,num_contact)
456  allocate(tile1(num_alloc), tile2(num_alloc) )
457  allocate(istart1(num_alloc), iend1(num_alloc), jstart1(num_alloc), jend1(num_alloc) )
458  allocate(istart2(num_alloc), iend2(num_alloc), jstart2(num_alloc), jend2(num_alloc) )
459 
460  is_symmetry = .true.
461  select case(nregions)
462  case ( 1 )
463 
464  select case (grid_type)
465  case (0,1,2) !Gnomonic nested grid
466  !No contacts, don't need to do anything
467  case (3) ! Lat-Lon "cyclic"
468  !--- Contact line 1, between tile 1 (EAST) and tile 2 (WEST)
469  tile1(1) = 1; tile2(1) = 2
470  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
471  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
472  !--- Contact line 2, between tile 1 (SOUTH) and tile 3 (NORTH) --- cyclic
473  tile1(2) = 1; tile2(2) = 3
474  istart1(2) = 1; iend1(2) = nx; jstart1(2) = 1; jend1(2) = 1
475  istart2(2) = 1; iend2(2) = nx; jstart2(2) = ny; jend2(2) = ny
476  !--- Contact line 3, between tile 1 (WEST) and tile 2 (EAST) --- cyclic
477  tile1(3) = 1; tile2(3) = 2
478  istart1(3) = 1; iend1(3) = 1; jstart1(3) = 1; jend1(3) = ny
479  istart2(3) = nx; iend2(3) = nx; jstart2(3) = 1; jend2(3) = ny
480  !--- Contact line 4, between tile 1 (NORTH) and tile 3 (SOUTH)
481  tile1(4) = 1; tile2(4) = 3
482  istart1(4) = 1; iend1(4) = nx; jstart1(4) = ny; jend1(4) = ny
483  istart2(4) = 1; iend2(4) = nx; jstart2(4) = 1; jend2(4) = 1
484  !--- Contact line 5, between tile 2 (SOUTH) and tile 4 (NORTH) --- cyclic
485  tile1(5) = 2; tile2(5) = 4
486  istart1(5) = 1; iend1(5) = nx; jstart1(5) = 1; jend1(5) = 1
487  istart2(5) = 1; iend2(5) = nx; jstart2(5) = ny; jend2(5) = ny
488  !--- Contact line 6, between tile 2 (NORTH) and tile 4 (SOUTH)
489  tile1(6) = 2; tile2(6) = 4
490  istart1(6) = 1; iend1(6) = nx; jstart1(6) = ny; jend1(6) = ny
491  istart2(6) = 1; iend2(6) = nx; jstart2(6) = 1; jend2(6) = 1
492  !--- Contact line 7, between tile 3 (EAST) and tile 4 (WEST)
493  tile1(7) = 3; tile2(7) = 4
494  istart1(7) = nx; iend1(7) = nx; jstart1(7) = 1; jend1(7) = ny
495  istart2(7) = 1; iend2(7) = 1; jstart2(7) = 1; jend2(7) = ny
496  !--- Contact line 8, between tile 3 (WEST) and tile 4 (EAST) --- cyclic
497  tile1(8) = 3; tile2(8) = 4
498  istart1(8) = 1; iend1(8) = 1; jstart1(8) = 1; jend1(8) = ny
499  istart2(8) = nx; iend2(8) = nx; jstart2(8) = 1; jend2(8) = ny
500  is_symmetry = .false.
501  case (4) ! Cartesian, double periodic
502  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
503  tile1(1) = 1; tile2(1) = 1
504  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
505  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
506  !--- Contact line 2, between tile 1 (SOUTH) and tile 1 (NORTH) --- cyclic
507  tile1(2) = 1; tile2(2) = 1
508  istart1(2) = 1; iend1(2) = nx; jstart1(2) = 1; jend1(2) = 1
509  istart2(2) = 1; iend2(2) = nx; jstart2(2) = ny; jend2(2) = ny
510  case (5) ! latlon patch
511 
512  case (6) !latlon strip
513  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
514  tile1(1) = 1; tile2(1) = 1
515  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
516  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
517  case (7) ! Cartesian, channel
518  !--- Contact line 1, between tile 1 (EAST) and tile 1 (WEST)
519  tile1(1) = 1; tile2(1) = 1
520  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
521  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
522  end select
523 
524  case ( 6 ) ! Cubed-Sphere
525  !--- Contact line 1, between tile 1 (EAST) and tile 2 (WEST)
526  tile1(1) = 1; tile2(1) = 2
527  istart1(1) = nx; iend1(1) = nx; jstart1(1) = 1; jend1(1) = ny
528  istart2(1) = 1; iend2(1) = 1; jstart2(1) = 1; jend2(1) = ny
529  !--- Contact line 2, between tile 1 (NORTH) and tile 3 (WEST)
530  tile1(2) = 1; tile2(2) = 3
531  istart1(2) = 1; iend1(2) = nx; jstart1(2) = ny; jend1(2) = ny
532  istart2(2) = 1; iend2(2) = 1; jstart2(2) = ny; jend2(2) = 1
533  !--- Contact line 3, between tile 1 (WEST) and tile 5 (NORTH)
534  tile1(3) = 1; tile2(3) = 5
535  istart1(3) = 1; iend1(3) = 1; jstart1(3) = 1; jend1(3) = ny
536  istart2(3) = nx; iend2(3) = 1; jstart2(3) = ny; jend2(3) = ny
537  !--- Contact line 4, between tile 1 (SOUTH) and tile 6 (NORTH)
538  tile1(4) = 1; tile2(4) = 6
539  istart1(4) = 1; iend1(4) = nx; jstart1(4) = 1; jend1(4) = 1
540  istart2(4) = 1; iend2(4) = nx; jstart2(4) = ny; jend2(4) = ny
541  !--- Contact line 5, between tile 2 (NORTH) and tile 3 (SOUTH)
542  tile1(5) = 2; tile2(5) = 3
543  istart1(5) = 1; iend1(5) = nx; jstart1(5) = ny; jend1(5) = ny
544  istart2(5) = 1; iend2(5) = nx; jstart2(5) = 1; jend2(5) = 1
545  !--- Contact line 6, between tile 2 (EAST) and tile 4 (SOUTH)
546  tile1(6) = 2; tile2(6) = 4
547  istart1(6) = nx; iend1(6) = nx; jstart1(6) = 1; jend1(6) = ny
548  istart2(6) = nx; iend2(6) = 1; jstart2(6) = 1; jend2(6) = 1
549  !--- Contact line 7, between tile 2 (SOUTH) and tile 6 (EAST)
550  tile1(7) = 2; tile2(7) = 6
551  istart1(7) = 1; iend1(7) = nx; jstart1(7) = 1; jend1(7) = 1
552  istart2(7) = nx; iend2(7) = nx; jstart2(7) = ny; jend2(7) = 1
553  !--- Contact line 8, between tile 3 (EAST) and tile 4 (WEST)
554  tile1(8) = 3; tile2(8) = 4
555  istart1(8) = nx; iend1(8) = nx; jstart1(8) = 1; jend1(8) = ny
556  istart2(8) = 1; iend2(8) = 1; jstart2(8) = 1; jend2(8) = ny
557  !--- Contact line 9, between tile 3 (NORTH) and tile 5 (WEST)
558  tile1(9) = 3; tile2(9) = 5
559  istart1(9) = 1; iend1(9) = nx; jstart1(9) = ny; jend1(9) = ny
560  istart2(9) = 1; iend2(9) = 1; jstart2(9) = ny; jend2(9) = 1
561  !--- Contact line 10, between tile 4 (NORTH) and tile 5 (SOUTH)
562  tile1(10) = 4; tile2(10) = 5
563  istart1(10) = 1; iend1(10) = nx; jstart1(10) = ny; jend1(10) = ny
564  istart2(10) = 1; iend2(10) = nx; jstart2(10) = 1; jend2(10) = 1
565  !--- Contact line 11, between tile 4 (EAST) and tile 6 (SOUTH)
566  tile1(11) = 4; tile2(11) = 6
567  istart1(11) = nx; iend1(11) = nx; jstart1(11) = 1; jend1(11) = ny
568  istart2(11) = nx; iend2(11) = 1; jstart2(11) = 1; jend2(11) = 1
569  !--- Contact line 12, between tile 5 (EAST) and tile 6 (WEST)
570  tile1(12) = 5; tile2(12) = 6
571  istart1(12) = nx; iend1(12) = nx; jstart1(12) = 1; jend1(12) = ny
572  istart2(12) = 1; iend2(12) = 1; jstart2(12) = 1; jend2(12) = ny
573  end select
574 
575  if ( any(pelist == gid) ) then
576  allocate(tile_id(nregions))
577  if( nested ) then
578  if( nregions .NE. 1 ) then
579  call mpp_error(fatal, 'domain_decomp: nregions should be 1 for nested region, contact developer')
580  endif
581  tile_id(1) = 7 ! currently we assuming the nested tile is nested in one face of cubic sphere grid.
582  ! we need a more general way to deal with nested grid tile id.
583  else
584  do n = 1, nregions
585  tile_id(n) = n
586  enddo
587  endif
588  call mpp_define_mosaic(global_indices, layout2d, domain, nregions, num_contact, tile1, tile2, &
589  istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, &
590  pe_start=pe_start, pe_end=pe_end, symmetry=is_symmetry, &
591  shalo = ng, nhalo = ng, whalo = ng, ehalo = ng, tile_id=tile_id, name = type)
592  call mpp_define_mosaic(global_indices, layout2d, domain_for_coupler, nregions, num_contact, tile1, tile2, &
593  istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, &
594  pe_start=pe_start, pe_end=pe_end, symmetry=is_symmetry, &
595  shalo = 1, nhalo = 1, whalo = 1, ehalo = 1, tile_id=tile_id, name = type)
596  deallocate(tile_id)
597  call mpp_define_io_domain(domain, io_layout)
598  call mpp_define_io_domain(domain_for_coupler, io_layout)
599 
600  endif
601 
602  deallocate(pe_start,pe_end)
603  deallocate(layout2d, global_indices, npes_tile)
604  deallocate(tile1, tile2)
605  deallocate(istart1, iend1, jstart1, jend1)
606  deallocate(istart2, iend2, jstart2, jend2)
607 
608  !--- find the tile number
609  atm%tile = (gid-pelist(1))/npes_per_tile+1
610  if (any(pelist == gid)) then
611  npes_this_grid = npes_per_tile*nregions
612  tile = atm%tile
613  call mpp_get_compute_domain( domain, is, ie, js, je )
614  call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
615 
616  atm%bd%is = is
617  atm%bd%js = js
618  atm%bd%ie = ie
619  atm%bd%je = je
620 
621  atm%bd%isd = isd
622  atm%bd%jsd = jsd
623  atm%bd%ied = ied
624  atm%bd%jed = jed
625 
626  atm%bd%isc = is
627  atm%bd%jsc = js
628  atm%bd%iec = ie
629  atm%bd%jec = je
630 
631  if (debug .and. nregions==1) then
632  tile=1
633  write(*,200) tile, is, ie, js, je
634  ! call mp_stop
635  ! stop
636  endif
637 200 format(i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ')
638  else
639 
640  atm%bd%is = 0
641  atm%bd%js = 0
642  atm%bd%ie = -1
643  atm%bd%je = -1
644 
645  atm%bd%isd = 0
646  atm%bd%jsd = 0
647  atm%bd%ied = -1
648  atm%bd%jed = -1
649 
650  atm%bd%isc = 0
651  atm%bd%jsc = 0
652  atm%bd%iec = -1
653  atm%bd%jec = -1
654 
655  endif
656 
657  end subroutine domain_decomp
658 !
659 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
660 !-------------------------------------------------------------------------------
661 
662 subroutine start_var_group_update_2d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
663  type(group_halo_update_type), intent(inout) :: group
664  real, dimension(:,:), intent(inout) :: array
665  type(domain2D), intent(inout) :: domain
666  integer, optional, intent(in) :: flags
667  integer, optional, intent(in) :: position
668  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
669  logical, optional, intent(in) :: complete
670  real :: d_type
671  logical :: is_complete
672 ! Arguments:
673 ! (inout) group - The data type that store information for group update.
674 ! This data will be used in do_group_pass.
675 ! (inout) array - The array which is having its halos points exchanged.
676 ! (in) domain - contains domain information.
677 ! (in) flags - An optional integer indicating which directions the
678 ! data should be sent.
679 ! (in) position - An optional argument indicating the position. This is
680 ! may be CORNER, but is CENTER by default.
681 ! (in) complete - An optional argument indicating whether the halo updates
682 ! should be initiated immediately or wait for second
683 ! pass_..._start call. Omitting complete is the same as
684 ! setting complete to .true.
685 
686  if (mpp_group_update_initialized(group)) then
687  call mpp_reset_group_update_field(group,array)
688  else
689  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
690  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
691  endif
692 
693  is_complete = .true.
694  if(present(complete)) is_complete = complete
695  if(is_complete .and. halo_update_type == 1) then
696  call mpp_start_group_update(group, domain, d_type)
697  endif
698 
699 end subroutine start_var_group_update_2d
700 
701 
702 subroutine start_var_group_update_3d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
703  type(group_halo_update_type), intent(inout) :: group
704  real, dimension(:,:,:), intent(inout) :: array
705  type(domain2D), intent(inout) :: domain
706  integer, optional, intent(in) :: flags
707  integer, optional, intent(in) :: position
708  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
709  logical, optional, intent(in) :: complete
710  real :: d_type
711  logical :: is_complete
712 
713 ! Arguments:
714 ! (inout) group - The data type that store information for group update.
715 ! This data will be used in do_group_pass.
716 ! (inout) array - The array which is having its halos points exchanged.
717 ! (in) domain - contains domain information.
718 ! (in) flags - An optional integer indicating which directions the
719 ! data should be sent.
720 ! (in) position - An optional argument indicating the position. This is
721 ! may be CORNER, but is CENTER by default.
722 ! (in) complete - An optional argument indicating whether the halo updates
723 ! should be initiated immediately or wait for second
724 ! pass_..._start call. Omitting complete is the same as
725 ! setting complete to .true.
726 
727  if (mpp_group_update_initialized(group)) then
728  call mpp_reset_group_update_field(group,array)
729  else
730  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
731  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
732  endif
733 
734  is_complete = .true.
735  if(present(complete)) is_complete = complete
736  if(is_complete .and. halo_update_type == 1 ) then
737  call mpp_start_group_update(group, domain, d_type)
738  endif
739 
740 end subroutine start_var_group_update_3d
741 
742 subroutine start_var_group_update_4d(group, array, domain, flags, position, whalo, ehalo, shalo, nhalo, complete)
743  type(group_halo_update_type), intent(inout) :: group
744  real, dimension(:,:,:,:), intent(inout) :: array
745  type(domain2D), intent(inout) :: domain
746  integer, optional, intent(in) :: flags
747  integer, optional, intent(in) :: position
748  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
749  logical, optional, intent(in) :: complete
750  real :: d_type
751  logical :: is_complete
752 
753 ! Arguments:
754 ! (inout) group - The data type that store information for group update.
755 ! This data will be used in do_group_pass.
756 ! (inout) array - The array which is having its halos points exchanged.
757 ! (in) domain - contains domain information.
758 ! (in) flags - An optional integer indicating which directions the
759 ! data should be sent.
760 ! (in) position - An optional argument indicating the position. This is
761 ! may be CORNER, but is CENTER by default.
762 ! (in) complete - An optional argument indicating whether the halo updates
763 ! should be initiated immediately or wait for second
764 ! pass_..._start call. Omitting complete is the same as
765 ! setting complete to .true.
766 
767  integer :: dirflag
768 
769  if (mpp_group_update_initialized(group)) then
770  call mpp_reset_group_update_field(group,array)
771  else
772  call mpp_create_group_update(group, array, domain, flags=flags, position=position, &
773  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
774  endif
775 
776  is_complete = .true.
777  if(present(complete)) is_complete = complete
778  if(is_complete .and. halo_update_type == 1 ) then
779  call mpp_start_group_update(group, domain, d_type)
780  endif
781 
782 end subroutine start_var_group_update_4d
783 
784 
785 
786 subroutine start_vector_group_update_2d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete)
787  type(group_halo_update_type), intent(inout) :: group
788  real, dimension(:,:), intent(inout) :: u_cmpt, v_cmpt
789  type(domain2d), intent(inout) :: domain
790  integer, optional, intent(in) :: flags
791  integer, optional, intent(in) :: gridtype
792  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
793  logical, optional, intent(in) :: complete
794  real :: d_type
795  logical :: is_complete
796 
797 ! Arguments:
798 ! (inout) group - The data type that store information for group update.
799 ! This data will be used in do_group_pass.
800 ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which
801 ! is having its halos points exchanged.
802 ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair
803 ! which is having its halos points exchanged.
804 ! (in) domain - Contains domain decomposition information.
805 ! (in) flags - An optional integer indicating which directions the
806 ! data should be sent.
807 ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE,
808 ! CGRID_NE or DGRID_NE, indicating where the two components of the
809 ! vector are discretized.
810 ! (in) complete - An optional argument indicating whether the halo updates
811 ! should be initiated immediately or wait for second
812 ! pass_..._start call. Omitting complete is the same as
813 ! setting complete to .true.
814 
815  if (mpp_group_update_initialized(group)) then
816  call mpp_reset_group_update_field(group,u_cmpt, v_cmpt)
817  else
818  call mpp_create_group_update(group, u_cmpt, v_cmpt, domain, &
819  flags=flags, gridtype=gridtype, &
820  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
821  endif
822 
823  is_complete = .true.
824  if(present(complete)) is_complete = complete
825  if(is_complete .and. halo_update_type == 1 ) then
826  call mpp_start_group_update(group, domain, d_type)
827  endif
828 
829 end subroutine start_vector_group_update_2d
830 
831 subroutine start_vector_group_update_3d(group, u_cmpt, v_cmpt, domain, flags, gridtype, whalo, ehalo, shalo, nhalo, complete)
832  type(group_halo_update_type), intent(inout) :: group
833  real, dimension(:,:,:), intent(inout) :: u_cmpt, v_cmpt
834  type(domain2d), intent(inout) :: domain
835  integer, optional, intent(in) :: flags
836  integer, optional, intent(in) :: gridtype
837  integer, optional, intent(in) :: whalo, ehalo, shalo, nhalo
838  logical, optional, intent(in) :: complete
839  real :: d_type
840  logical :: is_complete
841 
842 ! Arguments:
843 ! (inout) group - The data type that store information for group update.
844 ! This data will be used in do_group_pass.
845 ! (inout) u_cmpt - The nominal zonal (u) component of the vector pair which
846 ! is having its halos points exchanged.
847 ! (inout) v_cmpt - The nominal meridional (v) component of the vector pair
848 ! which is having its halos points exchanged.
849 ! (in) domain - Contains domain decomposition information.
850 ! (in) flags - An optional integer indicating which directions the
851 ! data should be sent.
852 ! (in) gridtype - An optional flag, which may be one of A_GRID, BGRID_NE,
853 ! CGRID_NE or DGRID_NE, indicating where the two components of the
854 ! vector are discretized.
855 ! (in) complete - An optional argument indicating whether the halo updates
856 ! should be initiated immediately or wait for second
857 ! pass_..._start call. Omitting complete is the same as
858 ! setting complete to .true.
859 
860  if (mpp_group_update_initialized(group)) then
861  call mpp_reset_group_update_field(group,u_cmpt, v_cmpt)
862  else
863  call mpp_create_group_update(group, u_cmpt, v_cmpt, domain, &
864  flags=flags, gridtype=gridtype, &
865  whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo)
866  endif
867 
868  is_complete = .true.
869  if(present(complete)) is_complete = complete
870  if(is_complete .and. halo_update_type == 1) then
871  call mpp_start_group_update(group, domain, d_type)
872  endif
873 
874 end subroutine start_vector_group_update_3d
875 
876 
877 subroutine complete_group_halo_update(group, domain)
878  type(group_halo_update_type), intent(inout) :: group
879  type(domain2d), intent(inout) :: domain
880  real :: d_type
881 
882 ! Arguments:
883 ! (inout) group - The data type that store information for group update.
884 ! (in) domain - Contains domain decomposition information.
885 
886  if( halo_update_type == 1 ) then
887  call mpp_complete_group_update(group, domain, d_type)
888  else
889  call mpp_do_group_update(group, domain, d_type)
890  endif
891 
892 end subroutine complete_group_halo_update
893 
894 
895 
896 
897 subroutine broadcast_domains(Atm)
898 
899  type(fv_atmos_type), intent(INOUT) :: Atm(:)
900 
901  integer :: n, i1, i2, j1, j2, i
902  integer :: ens_root_pe, ensemble_id
903 
904  !I think the idea is that each process needs to properly be part of a pelist,
905  !the pelist on which the domain is currently defined is ONLY for the pes which have the domain.
906 
907  ! This is needed to set the proper pelist for the ensemble. The pelist
908  ! needs to include the non-nested+nested tile for the ensemble.
909  ensemble_id = get_ensemble_id()
910  ens_root_pe = (ensemble_id-1)*npes
911 
912  !Pelist needs to be set to ALL ensemble PEs for broadcast_domain to work
913  call mpp_set_current_pelist((/ (i,i=ens_root_pe,npes-1+ens_root_pe) /))
914  do n=1,size(atm)
915  call mpp_broadcast_domain(atm(n)%domain)
916  call mpp_broadcast_domain(atm(n)%domain_for_coupler)
917  end do
918 
919 end subroutine broadcast_domains
920 
921 subroutine switch_current_domain(new_domain,new_domain_for_coupler)
922 
923  type(domain2D), intent(in), target :: new_domain, new_domain_for_coupler
924  logical, parameter :: debug = .false.
925 
926  !--- find the tile number
927  !tile = mpp_pe()/npes_per_tile+1
928  !ntiles = mpp_get_ntile_count(new_domain)
929  call mpp_get_compute_domain( new_domain, is, ie, js, je )
930  isc = is ; jsc = js
931  iec = ie ; jec = je
932  call mpp_get_data_domain ( new_domain, isd, ied, jsd, jed )
933 ! if ( npes_x==npes_y .and. (npx-1)==((npx-1)/npes_x)*npes_x ) square_domain = .true.
934 
935 ! if (debug .AND. (gid==masterproc)) write(*,200) tile, is, ie, js, je
936 !200 format('New domain: ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ', i4.4, ' ')
937 
938  call set_domain(new_domain)
939 
940 
941 end subroutine switch_current_domain
942 
943 subroutine switch_current_atm(new_Atm, switch_domain)
944 
945  type(fv_atmos_type), intent(IN), target :: new_Atm
946  logical, intent(IN), optional :: switch_domain
947  logical, parameter :: debug = .false.
948  logical :: swD
949 
950  if (debug .AND. (gid==masterproc)) print*, 'SWITCHING ATM STRUCTURES', new_atm%grid_number
951  if (present(switch_domain)) then
952  swd = switch_domain
953  else
954  swd = .true.
955  end if
956  if (swd) call switch_current_domain(new_atm%domain, new_atm%domain_for_coupler)
957 
958 !!$ if (debug .AND. (gid==masterproc)) WRITE(*,'(A, 6I5)') 'NEW GRID DIMENSIONS: ', &
959 !!$ isd, ied, jsd, jed, new_Atm%npx, new_Atm%npy
960 
961 end subroutine switch_current_atm
962 
963 !-------------------------------------------------------------------------------
964 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
965 !
966  subroutine fill_corners_2d_r4(q, npx, npy, FILL, AGRID, BGRID)
967  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: q
968  integer, intent(IN):: npx,npy
969  integer, intent(IN):: FILL ! X-Dir or Y-Dir
970  logical, OPTIONAL, intent(IN) :: AGRID, BGRID
971  integer :: i,j
972 
973  if (present(bgrid)) then
974  if (bgrid) then
975  select case (fill)
976  case (xdir)
977  do j=1,ng
978  do i=1,ng
979  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
980  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
981  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
982  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
983  enddo
984  enddo
985  case (ydir)
986  do j=1,ng
987  do i=1,ng
988  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner
989  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner
990  if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner
991  if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner
992  enddo
993  enddo
994  case default
995  do j=1,ng
996  do i=1,ng
997  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
998  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
999  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1000  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1001  enddo
1002  enddo
1003  end select
1004  endif
1005  elseif (present(agrid)) then
1006  if (agrid) then
1007  select case (fill)
1008  case (xdir)
1009  do j=1,ng
1010  do i=1,ng
1011  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner
1012  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner
1013  if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner
1014  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner
1015  enddo
1016  enddo
1017  case (ydir)
1018  do j=1,ng
1019  do i=1,ng
1020  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1021  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1022  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1023  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1024  enddo
1025  enddo
1026  case default
1027  do j=1,ng
1028  do i=1,ng
1029  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1030  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1031  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1032  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1033  enddo
1034  enddo
1035  end select
1036  endif
1037  endif
1038 
1039  end subroutine fill_corners_2d_r4
1040 !
1041 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1042 !-------------------------------------------------------------------------------
1043 !-------------------------------------------------------------------------------
1044 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1045 !
1046  subroutine fill_corners_2d_r8(q, npx, npy, FILL, AGRID, BGRID)
1047  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: q
1048  integer, intent(IN):: npx,npy
1049  integer, intent(IN):: FILL ! X-Dir or Y-Dir
1050  logical, OPTIONAL, intent(IN) :: AGRID, BGRID
1051  integer :: i,j
1052 
1053  if (present(bgrid)) then
1054  if (bgrid) then
1055  select case (fill)
1056  case (xdir)
1057  do j=1,ng
1058  do i=1,ng
1059  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1060  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1061  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1062  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1063  enddo
1064  enddo
1065  case (ydir)
1066  do j=1,ng
1067  do i=1,ng
1068  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i+1 ,1-j ) !SW Corner
1069  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy+i) = q(i+1 ,npy+j ) !NW Corner
1070  if ((ie==npx-1) .and. (js== 1)) q(npx+j,1-i ) = q(npx-i,1-j ) !SE Corner
1071  if ((ie==npx-1) .and. (je==npy-1)) q(npx+j,npy+i) = q(npx-i,npy+j ) !NE Corner
1072  enddo
1073  enddo
1074  case default
1075  do j=1,ng
1076  do i=1,ng
1077  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i+1 ) !SW Corner
1078  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy+j) = q(1-j ,npy-i ) !NW Corner
1079  if ((ie==npx-1) .and. (js== 1)) q(npx+i,1-j ) = q(npx+j,i+1 ) !SE Corner
1080  if ((ie==npx-1) .and. (je==npy-1)) q(npx+i,npy+j) = q(npx+j,npy-i ) !NE Corner
1081  enddo
1082  enddo
1083  end select
1084  endif
1085  elseif (present(agrid)) then
1086  if (agrid) then
1087  select case (fill)
1088  case (xdir)
1089  do j=1,ng
1090  do i=1,ng
1091  if ((is== 1) .and. (js== 1)) q(1-i ,1-j ) = q(1-j ,i ) !SW Corner
1092  if ((is== 1) .and. (je==npy-1)) q(1-i ,npy-1+j) = q(1-j ,npy-1-i+1) !NW Corner
1093  if ((ie==npx-1) .and. (js== 1)) q(npx-1+i,1-j ) = q(npx-1+j,i ) !SE Corner
1094  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+i,npy-1+j) = q(npx-1+j,npy-1-i+1) !NE Corner
1095  enddo
1096  enddo
1097  case (ydir)
1098  do j=1,ng
1099  do i=1,ng
1100  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1101  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1102  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1103  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1104  enddo
1105  enddo
1106  case default
1107  do j=1,ng
1108  do i=1,ng
1109  if ((is== 1) .and. (js== 1)) q(1-j ,1-i ) = q(i ,1-j ) !SW Corner
1110  if ((is== 1) .and. (je==npy-1)) q(1-j ,npy-1+i) = q(i ,npy-1+j) !NW Corner
1111  if ((ie==npx-1) .and. (js== 1)) q(npx-1+j,1-i ) = q(npx-1-i+1,1-j ) !SE Corner
1112  if ((ie==npx-1) .and. (je==npy-1)) q(npx-1+j,npy-1+i) = q(npx-1-i+1,npy-1+j) !NE Corner
1113  enddo
1114  enddo
1115  end select
1116  endif
1117  endif
1118 
1119  end subroutine fill_corners_2d_r8
1120 !
1121 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1122 !-------------------------------------------------------------------------------
1123 
1124 !-------------------------------------------------------------------------------
1125 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1126 ! fill_corners_xy_2d_r8
1127  subroutine fill_corners_xy_2d_r8(x, y, npx, npy, DGRID, AGRID, CGRID, VECTOR)
1128  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x !(isd:ied ,jsd:jed+1)
1129  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y !(isd:ied+1,jsd:jed )
1130  integer, intent(IN):: npx,npy
1131  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1132  integer :: i,j
1133 
1134  real(kind=8) :: mySign
1135 
1136  mysign = 1.0
1137  if (present(vector)) then
1138  if (vector) mysign = -1.0
1139  endif
1140 
1141  if (present(dgrid)) then
1142  call fill_corners_dgrid(x, y, npx, npy, mysign)
1143  elseif (present(cgrid)) then
1144  call fill_corners_cgrid(x, y, npx, npy, mysign)
1145  elseif (present(agrid)) then
1146  call fill_corners_agrid(x, y, npx, npy, mysign)
1147  else
1148  call fill_corners_agrid(x, y, npx, npy, mysign)
1149  endif
1150 
1151  end subroutine fill_corners_xy_2d_r8
1152 !
1153 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1154 !-------------------------------------------------------------------------------
1155 
1156 !-------------------------------------------------------------------------------
1157 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1158 ! fill_corners_xy_2d_r4
1159  subroutine fill_corners_xy_2d_r4(x, y, npx, npy, DGRID, AGRID, CGRID, VECTOR)
1160  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x !(isd:ied ,jsd:jed+1)
1161  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y !(isd:ied+1,jsd:jed )
1162  integer, intent(IN):: npx,npy
1163  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1164  integer :: i,j
1165 
1166  real(kind=4) :: mySign
1167 
1168  mysign = 1.0
1169  if (present(vector)) then
1170  if (vector) mysign = -1.0
1171  endif
1172 
1173  if (present(dgrid)) then
1174  call fill_corners_dgrid(x, y, npx, npy, mysign)
1175  elseif (present(cgrid)) then
1176  call fill_corners_cgrid(x, y, npx, npy, mysign)
1177  elseif (present(agrid)) then
1178  call fill_corners_agrid(x, y, npx, npy, mysign)
1179  else
1180  call fill_corners_agrid(x, y, npx, npy, mysign)
1181  endif
1182 
1183  end subroutine fill_corners_xy_2d_r4
1184 !
1185 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1186 !-------------------------------------------------------------------------------
1187 
1188 !-------------------------------------------------------------------------------
1189 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1190 ! fill_corners_xy_3d_r8
1191  subroutine fill_corners_xy_3d_r8(x, y, npx, npy, npz, DGRID, AGRID, CGRID, VECTOR)
1192  real(kind=8), DIMENSION(isd:,jsd:,:), intent(INOUT):: x !(isd:ied ,jsd:jed+1)
1193  real(kind=8), DIMENSION(isd:,jsd:,:), intent(INOUT):: y !(isd:ied+1,jsd:jed )
1194  integer, intent(IN):: npx,npy,npz
1195  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1196  integer :: i,j,k
1197 
1198  real(kind=8) :: mySign
1199 
1200  mysign = 1.0
1201  if (present(vector)) then
1202  if (vector) mysign = -1.0
1203  endif
1204 
1205  if (present(dgrid)) then
1206  do k=1,npz
1207  call fill_corners_dgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1208  enddo
1209  elseif (present(cgrid)) then
1210  do k=1,npz
1211  call fill_corners_cgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1212  enddo
1213  elseif (present(agrid)) then
1214  do k=1,npz
1215  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1216  enddo
1217  else
1218  do k=1,npz
1219  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1220  enddo
1221  endif
1222 
1223  end subroutine fill_corners_xy_3d_r8
1224 !
1225 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1226 !-------------------------------------------------------------------------------
1227 
1228 !-------------------------------------------------------------------------------
1229 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !!
1230 ! fill_corners_xy_3d_r4
1231  subroutine fill_corners_xy_3d_r4(x, y, npx, npy, npz, DGRID, AGRID, CGRID, VECTOR)
1232  real(kind=4), DIMENSION(isd:,jsd:,:), intent(INOUT):: x !(isd:ied ,jsd:jed+1)
1233  real(kind=4), DIMENSION(isd:,jsd:,:), intent(INOUT):: y !(isd:ied+1,jsd:jed )
1234  integer, intent(IN):: npx,npy,npz
1235  logical, OPTIONAL, intent(IN) :: DGRID, AGRID, CGRID, VECTOR
1236  integer :: i,j,k
1237 
1238  real(kind=4) :: mySign
1239 
1240  mysign = 1.0
1241  if (present(vector)) then
1242  if (vector) mysign = -1.0
1243  endif
1244 
1245  if (present(dgrid)) then
1246  do k=1,npz
1247  call fill_corners_dgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1248  enddo
1249  elseif (present(cgrid)) then
1250  do k=1,npz
1251  call fill_corners_cgrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1252  enddo
1253  elseif (present(agrid)) then
1254  do k=1,npz
1255  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1256  enddo
1257  else
1258  do k=1,npz
1259  call fill_corners_agrid(x(:,:,k), y(:,:,k), npx, npy, mysign)
1260  enddo
1261  endif
1262 
1263  end subroutine fill_corners_xy_3d_r4
1264 !
1265 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1266 !-------------------------------------------------------------------------------
1267 
1268 !-------------------------------------------------------------------------------
1269 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1270 !
1271  subroutine fill_corners_dgrid_r8(x, y, npx, npy, mySign)
1272  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1273  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1274  integer, intent(IN):: npx,npy
1275  real(kind=8), intent(IN) :: mySign
1276  integer :: i,j
1277 
1278  do j=1,ng
1279  do i=1,ng
1280  ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner
1281  ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner
1282  ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner
1283  ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner
1284  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1285  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner
1286  if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner
1287  if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mysign*y(npx+j,npy-i) !NE Corner
1288  enddo
1289  enddo
1290  do j=1,ng
1291  do i=1,ng
1292  ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner
1293  ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner
1294  ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner
1295  ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner
1296  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mysign*x(j ,1-i ) !SW Corner
1297  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner
1298  if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner
1299  if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mysign*x(npx-j ,npy+i) !NE Corner
1300  enddo
1301  enddo
1302 
1303  end subroutine fill_corners_dgrid_r8
1304 !
1305 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1306 !-------------------------------------------------------------------------------
1307 
1308 !-------------------------------------------------------------------------------
1309 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1310 !
1311  subroutine fill_corners_dgrid_r4(x, y, npx, npy, mySign)
1312  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1313  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1314  integer, intent(IN):: npx,npy
1315  real(kind=4), intent(IN) :: mySign
1316  integer :: i,j
1317 
1318  do j=1,ng
1319  do i=1,ng
1320  ! if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j+1 ,1-i ) !SW Corner
1321  ! if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = mySign*y(j+1 ,npy-1+i) !NW Corner
1322  ! if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = mySign*y(npx-j,1-i ) !SE Corner
1323  ! if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = y(npx-j,npy-1+i) !NE Corner
1324  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1325  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy+j) = y(1-j ,npy-i) !NW Corner
1326  if ((ie+1==npx) .and. (js == 1)) x(npx-1+i,1-j ) = y(npx+j,i ) !SE Corner
1327  if ((ie+1==npx) .and. (je+1==npy)) x(npx-1+i,npy+j) = mysign*y(npx+j,npy-i) !NE Corner
1328  enddo
1329  enddo
1330  do j=1,ng
1331  do i=1,ng
1332  ! if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i+1 ) !SW Corner
1333  ! if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = mySign*x(1-j ,npy-i) !NW Corner
1334  ! if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = mySign*x(npx-1+j,i+1 ) !SE Corner
1335  ! if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = x(npx-1+j,npy-i) !NE Corner
1336  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = mysign*x(j ,1-i ) !SW Corner
1337  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy-1+j) = x(j ,npy+i) !NW Corner
1338  if ((ie+1==npx) .and. (js == 1)) y(npx+i ,1-j ) = x(npx-j ,1-i ) !SE Corner
1339  if ((ie+1==npx) .and. (je+1==npy)) y(npx+i ,npy-1+j) = mysign*x(npx-j ,npy+i) !NE Corner
1340  enddo
1341  enddo
1342 
1343  end subroutine fill_corners_dgrid_r4
1344 !
1345 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1346 !-------------------------------------------------------------------------------
1347 
1348 !-------------------------------------------------------------------------------
1349 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1350 !
1351  subroutine fill_corners_cgrid_r4(x, y, npx, npy, mySign)
1352  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1353  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1354  integer, intent(IN):: npx,npy
1355  real(kind=4), intent(IN) :: mySign
1356  integer :: i,j
1357 
1358  do j=1,ng
1359  do i=1,ng
1360  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner
1361  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mysign*y(j ,npy+i) !NW Corner
1362  if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mysign*y(npx-j ,1-i ) !SE Corner
1363  if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner
1364  enddo
1365  enddo
1366  do j=1,ng
1367  do i=1,ng
1368  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner
1369  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mysign*x(1-j ,npy-i) !NW Corner
1370  if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mysign*x(npx+j,i ) !SE Corner
1371  if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner
1372  enddo
1373  enddo
1374 
1375  end subroutine fill_corners_cgrid_r4
1376 !
1377 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1378 !-------------------------------------------------------------------------------
1379 
1380 !-------------------------------------------------------------------------------
1381 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1382 !
1383  subroutine fill_corners_cgrid_r8(x, y, npx, npy, mySign)
1384  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1385  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1386  integer, intent(IN):: npx,npy
1387  real(kind=8), intent(IN) :: mySign
1388  integer :: i,j
1389 
1390  do j=1,ng
1391  do i=1,ng
1392  if ((is == 1) .and. (js == 1)) x(1-i ,1-j ) = y(j ,1-i ) !SW Corner
1393  if ((is == 1) .and. (je+1==npy)) x(1-i ,npy-1+j) = mysign*y(j ,npy+i) !NW Corner
1394  if ((ie+1==npx) .and. (js == 1)) x(npx+i ,1-j ) = mysign*y(npx-j ,1-i ) !SE Corner
1395  if ((ie+1==npx) .and. (je+1==npy)) x(npx+i ,npy-1+j) = y(npx-j ,npy+i) !NE Corner
1396  enddo
1397  enddo
1398  do j=1,ng
1399  do i=1,ng
1400  if ((is == 1) .and. (js == 1)) y(1-i ,1-j ) = x(1-j ,i ) !SW Corner
1401  if ((is == 1) .and. (je+1==npy)) y(1-i ,npy+j) = mysign*x(1-j ,npy-i) !NW Corner
1402  if ((ie+1==npx) .and. (js == 1)) y(npx-1+i,1-j ) = mysign*x(npx+j,i ) !SE Corner
1403  if ((ie+1==npx) .and. (je+1==npy)) y(npx-1+i,npy+j) = x(npx+j,npy-i) !NE Corner
1404  enddo
1405  enddo
1406 
1407  end subroutine fill_corners_cgrid_r8
1408 !
1409 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1410 !-------------------------------------------------------------------------------
1411 
1412 !-------------------------------------------------------------------------------
1413 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1414 !
1415  subroutine fill_corners_agrid_r4(x, y, npx, npy, mySign)
1416  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: x
1417  real(kind=4), DIMENSION(isd:,jsd:), intent(INOUT):: y
1418  integer, intent(IN):: npx,npy
1419  real(kind=4), intent(IN) :: mySign
1420  integer :: i,j
1421 
1422  do j=1,ng
1423  do i=1,ng
1424  if ((is== 1) .and. (js== 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1425  if ((is== 1) .and. (je==npy-1)) x(1-i ,npy-1+j) = y(1-j ,npy-1-i+1) !NW Corner
1426  if ((ie==npx-1) .and. (js== 1)) x(npx-1+i,1-j ) = y(npx-1+j,i ) !SE Corner
1427  if ((ie==npx-1) .and. (je==npy-1)) x(npx-1+i,npy-1+j) = mysign*y(npx-1+j,npy-1-i+1) !NE Corner
1428  enddo
1429  enddo
1430  do j=1,ng
1431  do i=1,ng
1432  if ((is== 1) .and. (js== 1)) y(1-j ,1-i ) = mysign*x(i ,1-j ) !SW Corner
1433  if ((is== 1) .and. (je==npy-1)) y(1-j ,npy-1+i) = x(i ,npy-1+j) !NW Corner
1434  if ((ie==npx-1) .and. (js== 1)) y(npx-1+j,1-i ) = x(npx-1-i+1,1-j ) !SE Corner
1435  if ((ie==npx-1) .and. (je==npy-1)) y(npx-1+j,npy-1+i) = mysign*x(npx-1-i+1,npy-1+j) !NE Corner
1436  enddo
1437  enddo
1438 
1439  end subroutine fill_corners_agrid_r4
1440 !
1441 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1442 !-------------------------------------------------------------------------------
1443 
1444 !-------------------------------------------------------------------------------
1445 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1446 !
1447  subroutine fill_corners_agrid_r8(x, y, npx, npy, mySign)
1448  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: x
1449  real(kind=8), DIMENSION(isd:,jsd:), intent(INOUT):: y
1450  integer, intent(IN):: npx,npy
1451  real(kind=8), intent(IN) :: mySign
1452  integer :: i,j
1453 
1454  do j=1,ng
1455  do i=1,ng
1456  if ((is== 1) .and. (js== 1)) x(1-i ,1-j ) = mysign*y(1-j ,i ) !SW Corner
1457  if ((is== 1) .and. (je==npy-1)) x(1-i ,npy-1+j) = y(1-j ,npy-1-i+1) !NW Corner
1458  if ((ie==npx-1) .and. (js== 1)) x(npx-1+i,1-j ) = y(npx-1+j,i ) !SE Corner
1459  if ((ie==npx-1) .and. (je==npy-1)) x(npx-1+i,npy-1+j) = mysign*y(npx-1+j,npy-1-i+1) !NE Corner
1460  enddo
1461  enddo
1462  do j=1,ng
1463  do i=1,ng
1464  if ((is== 1) .and. (js== 1)) y(1-j ,1-i ) = mysign*x(i ,1-j ) !SW Corner
1465  if ((is== 1) .and. (je==npy-1)) y(1-j ,npy-1+i) = x(i ,npy-1+j) !NW Corner
1466  if ((ie==npx-1) .and. (js== 1)) y(npx-1+j,1-i ) = x(npx-1-i+1,1-j ) !SE Corner
1467  if ((ie==npx-1) .and. (je==npy-1)) y(npx-1+j,npy-1+i) = mysign*x(npx-1-i+1,npy-1+j) !NE Corner
1468  enddo
1469  enddo
1470 
1471  end subroutine fill_corners_agrid_r8
1472 !
1473 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1474 !-------------------------------------------------------------------------------
1475 
1476 !!$!-------------------------------------------------------------------------------
1477 !!$! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1478 !!$!
1479 !!$! mp_corner_comm :: Point-based MPI communcation routine for Cubed-Sphere
1480 !!$! ghosted corner point on B-Grid
1481 !!$! this routine sends 24 16-byte messages
1482 !!$!
1483 !!$ subroutine mp_corner_comm(q, npx, npy, tile)
1484 !!$ integer, intent(IN) :: npx,npy, tile
1485 !!$ real , intent(INOUT):: q(isd:ied+1,jsd:jed+1)
1486 !!$
1487 !!$ integer, parameter :: ntiles = 6
1488 !!$
1489 !!$ real :: qsend(24)
1490 !!$ real :: send_tag, recv_tag
1491 !!$ integer :: sqest(24), rqest(24)
1492 !!$ integer :: Stats(24*MPI_STATUS_SIZE)
1493 !!$ integer :: nsend, nrecv, nread
1494 !!$ integer :: dest_gid, src_gid
1495 !!$ integer :: n
1496 !!$
1497 !!$ qsend = 1.e25
1498 !!$ nsend=0
1499 !!$ nrecv=0
1500 !!$
1501 !!$ if ( mod(tile,2) == 0 ) then
1502 !!$! Even Face LL and UR pairs 6 2-way
1503 !!$ if ( (is==1) .and. (js==1) ) then
1504 !!$ nsend=nsend+1
1505 !!$ qsend(nsend) = q(is,js+1)
1506 !!$ send_tag = 300+tile
1507 !!$ dest_gid = (tile-2)*npes_x*npes_y - 1
1508 !!$ if (dest_gid < 0) dest_gid=npes+dest_gid
1509 !!$ recv_tag = 100+(tile-2)
1510 !!$ if (tile==2) recv_tag = 100+(ntiles)
1511 !!$ src_gid = (tile-3)*npes_x*npes_y
1512 !!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1
1513 !!$ if (src_gid < 0) src_gid=npes+src_gid
1514 !!$ if (npes>6) then
1515 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1516 !!$ dest_gid, send_tag, &
1517 !!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, &
1518 !!$ src_gid, recv_tag, &
1519 !!$ commglobal, Stats, ierror )
1520 !!$ nsend=nsend-1
1521 !!$ else
1522 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1523 !!$ send_tag, commglobal, sqest(nsend), ierror )
1524 !!$ nrecv=nrecv+1
1525 !!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1526 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1527 !!$ endif
1528 !!$ endif
1529 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1530 !!$ nsend=nsend+1
1531 !!$ qsend(nsend) = q(ie,je+1)
1532 !!$ send_tag = 100+tile
1533 !!$ dest_gid = (tile+1)*npes_x*npes_y
1534 !!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes
1535 !!$ recv_tag = 300+(tile+2)
1536 !!$ if (tile==6) recv_tag = 300+2
1537 !!$ src_gid = (tile+1)*npes_x*npes_y
1538 !!$ if (src_gid+1 > npes) src_gid=src_gid-npes
1539 !!$ if (npes>6) then
1540 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1541 !!$ dest_gid, send_tag, &
1542 !!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, &
1543 !!$ src_gid, recv_tag, &
1544 !!$ commglobal, Stats, ierror )
1545 !!$ nsend=nsend-1
1546 !!$ else
1547 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1548 !!$ send_tag, commglobal, sqest(nsend), ierror )
1549 !!$ nrecv=nrecv+1
1550 !!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1551 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1552 !!$ endif
1553 !!$ endif
1554 !!$! wait for comm to complete
1555 !!$ if (npes==6) then
1556 !!$ if (nsend>0) then
1557 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1558 !!$
1559 !!$
1560 !!$
1561 !!$ endif
1562 !!$ if (nrecv>0) then
1563 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1564 !!$
1565 !!$
1566 !!$
1567 !!$ endif
1568 !!$ nsend=0 ; nrecv=0
1569 !!$ endif
1570 !!$
1571 !!$! Even Face LR 1 pair ; 1 1-way
1572 !!$ if ( (tile==2) .and. (ie==npx-1) .and. (js==1) ) then
1573 !!$ nsend=nsend+1
1574 !!$ qsend(nsend) = q(ie,js)
1575 !!$ send_tag = 200+tile
1576 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1
1577 !!$ recv_tag = 200+(tile+2)
1578 !!$ src_gid = dest_gid
1579 !!$ if (npes>6) then
1580 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1581 !!$ dest_gid, send_tag, &
1582 !!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, &
1583 !!$ src_gid, recv_tag, &
1584 !!$ commglobal, Stats, ierror )
1585 !!$ nsend=nsend-1
1586 !!$ else
1587 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1588 !!$ send_tag, commglobal, sqest(nsend), ierror )
1589 !!$ nrecv=nrecv+1
1590 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1591 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1592 !!$ endif
1593 !!$ endif
1594 !!$ if ( (tile==4) .and. (ie==npx-1) .and. (js==1) ) then
1595 !!$ nsend=nsend+1
1596 !!$ qsend(nsend) = q(ie+1,js+1)
1597 !!$ send_tag = 200+tile
1598 !!$ dest_gid = (tile-3)*npes_x*npes_y + npes_x-1
1599 !!$ recv_tag = 200+(tile-2)
1600 !!$ src_gid = dest_gid
1601 !!$ if (npes>6) then
1602 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1603 !!$ dest_gid, send_tag, &
1604 !!$ q(ie+2,js), 1, MPI_DOUBLE_PRECISION, &
1605 !!$ src_gid, recv_tag, &
1606 !!$ commglobal, Stats, ierror )
1607 !!$ nsend=nsend-1
1608 !!$ else
1609 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1610 !!$ send_tag, commglobal, sqest(nsend), ierror )
1611 !!$ nrecv=nrecv+1
1612 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1613 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1614 !!$ endif
1615 !!$ nsend=nsend+1
1616 !!$ qsend(nsend) = q(ie,js)
1617 !!$ send_tag = 200+tile
1618 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x-1
1619 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1620 !!$ send_tag, commglobal, sqest(nsend), ierror )
1621 !!$ endif
1622 !!$ if ( (tile==6) .and. (ie==npx-1) .and. (js==1) ) then
1623 !!$ recv_tag = 200+(tile-2)
1624 !!$ src_gid = (tile-3)*npes_x*npes_y + npes_x-1
1625 !!$ nrecv=nrecv+1
1626 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1627 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1628 !!$ endif
1629 !!$
1630 !!$! wait for comm to complete
1631 !!$ if (npes==6) then
1632 !!$ if (nsend>0) then
1633 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1634 !!$
1635 !!$
1636 !!$
1637 !!$ endif
1638 !!$ if (nrecv>0) then
1639 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1640 !!$
1641 !!$
1642 !!$
1643 !!$ endif
1644 !!$ nsend=0 ; nrecv=0
1645 !!$ endif
1646 !!$
1647 !!$! Send to Odd face LR 3 1-way
1648 !!$ if ( (is==1) .and. (js==1) ) then
1649 !!$ nsend=nsend+1
1650 !!$ qsend(nsend) = q(is+1,js)
1651 !!$ send_tag = 200+tile
1652 !!$ dest_gid = (tile-2)*npes_x*npes_y + npes_x-1
1653 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1654 !!$ send_tag, commglobal, sqest(nsend), ierror )
1655 !!$ endif
1656 !!$
1657 !!$! Receive Even Face UL 3 1-way
1658 !!$ if ( (is==1) .and. (je==npy-1) ) then
1659 !!$ recv_tag = 400+(tile-1)
1660 !!$ src_gid = (tile-2)*npes_x*npes_y + npes_x*(npes_y-1) + npes_x-1
1661 !!$ nrecv=nrecv+1
1662 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1663 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1664 !!$ endif
1665 !!$
1666 !!$ else
1667 !!$
1668 !!$! Odd Face LL and UR pairs 6 2-way
1669 !!$ if ( (is==1) .and. (js==1) ) then
1670 !!$ nsend=nsend+1
1671 !!$ qsend(nsend) = q(is+1,js)
1672 !!$ send_tag = 300+tile
1673 !!$ dest_gid = (tile-2)*npes_x*npes_y - 1
1674 !!$ if (dest_gid < 0) dest_gid=npes+dest_gid
1675 !!$ recv_tag = 100+(tile-2)
1676 !!$ if (tile==1) recv_tag = 100+(ntiles-tile)
1677 !!$ src_gid = (tile-3)*npes_x*npes_y
1678 !!$ src_gid = src_gid + npes_x*(npes_y-1) + npes_x - 1
1679 !!$ if (src_gid < 0) src_gid=npes+src_gid
1680 !!$ if (npes>6) then
1681 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1682 !!$ dest_gid, send_tag, &
1683 !!$ q(is-1,js), 1, MPI_DOUBLE_PRECISION, &
1684 !!$ src_gid, recv_tag, &
1685 !!$ commglobal, Stats, ierror )
1686 !!$ nsend=nsend-1
1687 !!$ else
1688 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1689 !!$ send_tag, commglobal, sqest(nsend), ierror )
1690 !!$ nrecv=nrecv+1
1691 !!$ call MPI_IRECV( q(is-1,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1692 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1693 !!$ endif
1694 !!$ endif
1695 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1696 !!$ nsend=nsend+1
1697 !!$ qsend(nsend) = q(ie+1,je)
1698 !!$ send_tag = 100+tile
1699 !!$ dest_gid = (tile+1)*npes_x*npes_y
1700 !!$ if (dest_gid+1 > npes) dest_gid=dest_gid-npes
1701 !!$ recv_tag = 300+(tile+2)
1702 !!$ if (tile==5) recv_tag = 300+1
1703 !!$ src_gid = (tile+1)*npes_x*npes_y
1704 !!$ if (src_gid+1 > npes) src_gid=src_gid-npes
1705 !!$ if (npes>6) then
1706 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1707 !!$ dest_gid, send_tag, &
1708 !!$ q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, &
1709 !!$ src_gid, recv_tag, &
1710 !!$ commglobal, Stats, ierror )
1711 !!$ nsend=nsend-1
1712 !!$ else
1713 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1714 !!$ send_tag, commglobal, sqest(nsend), ierror )
1715 !!$ nrecv=nrecv+1
1716 !!$ call MPI_IRECV( q(ie+2,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1717 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1718 !!$ endif
1719 !!$ endif
1720 !!$! wait for comm to complete
1721 !!$ if (npes==6) then
1722 !!$ if (nsend>0) then
1723 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1724 !!$
1725 !!$
1726 !!$
1727 !!$ endif
1728 !!$ if (nrecv>0) then
1729 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1730 !!$
1731 !!$
1732 !!$
1733 !!$ endif
1734 !!$ nsend=0 ; nrecv=0
1735 !!$ endif
1736 !!$
1737 !!$! Odd Face UL 1 pair ; 1 1-way
1738 !!$ if ( (tile==1) .and. (is==1) .and. (je==npy-1) ) then
1739 !!$ nsend=nsend+1
1740 !!$ qsend(nsend) = q(is,je)
1741 !!$ send_tag = 400+tile
1742 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1)
1743 !!$ recv_tag = 400+(tile+2)
1744 !!$ src_gid = dest_gid
1745 !!$ if (npes>6) then
1746 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1747 !!$ dest_gid, send_tag, &
1748 !!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, &
1749 !!$ src_gid, recv_tag, &
1750 !!$ commglobal, Stats, ierror )
1751 !!$ nsend=nsend-1
1752 !!$ else
1753 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1754 !!$ send_tag, commglobal, sqest(nsend), ierror )
1755 !!$ nrecv=nrecv+1
1756 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1757 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1758 !!$ endif
1759 !!$ endif
1760 !!$ if ( (tile==3) .and. (is==1) .and. (je==npy-1) ) then
1761 !!$ nsend=nsend+1
1762 !!$ qsend(nsend) = q(is+1,je+1)
1763 !!$ send_tag = 400+tile
1764 !!$ dest_gid = npes_x*(npes_y-1)
1765 !!$ recv_tag = 400+(tile-2)
1766 !!$ src_gid = dest_gid
1767 !!$ if (npes>6) then
1768 !!$ call MPI_SENDRECV( qsend(nsend), 1, MPI_DOUBLE_PRECISION, &
1769 !!$ dest_gid, send_tag, &
1770 !!$ q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, &
1771 !!$ src_gid, recv_tag, &
1772 !!$ commglobal, Stats, ierror )
1773 !!$ nsend=nsend-1
1774 !!$ else
1775 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1776 !!$ send_tag, commglobal, sqest(nsend), ierror )
1777 !!$ nrecv=nrecv+1
1778 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1779 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1780 !!$ endif
1781 !!$ nsend=nsend+1
1782 !!$ qsend(nsend) = q(is,je)
1783 !!$ send_tag = 400+tile
1784 !!$ dest_gid = (tile+1)*npes_x*npes_y + npes_x*(npes_y-1)
1785 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1786 !!$ send_tag, commglobal, sqest(nsend), ierror )
1787 !!$ endif
1788 !!$ if ( (tile==5) .and. (is==1) .and. (je==npy-1) ) then
1789 !!$ recv_tag = 400+(tile-2)
1790 !!$ src_gid = (tile-3)*npes_x*npes_y + npes_x*(npes_y-1)
1791 !!$ nrecv=nrecv+1
1792 !!$ call MPI_IRECV( q(is-1,je+1), 1, MPI_DOUBLE_PRECISION, src_gid, &
1793 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1794 !!$ endif
1795 !!$
1796 !!$! wait for comm to complete
1797 !!$ if (npes==6) then
1798 !!$ if (nsend>0) then
1799 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1800 !!$
1801 !!$
1802 !!$
1803 !!$ endif
1804 !!$ if (nrecv>0) then
1805 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1806 !!$
1807 !!$
1808 !!$
1809 !!$ endif
1810 !!$ nsend=0 ; nrecv=0
1811 !!$ endif
1812 !!$
1813 !!$! Send to Even face UL 3 1-way
1814 !!$ if ( (ie==npx-1) .and. (je==npy-1) ) then
1815 !!$ nsend=nsend+1
1816 !!$ qsend(nsend) = q(ie,je+1)
1817 !!$ send_tag = 400+tile
1818 !!$ dest_gid = tile*npes_x*npes_y + npes_x*(npes_y-1)
1819 !!$ call MPI_ISEND( qsend(nsend), 1, MPI_DOUBLE_PRECISION, dest_gid, &
1820 !!$ send_tag, commglobal, sqest(nsend), ierror )
1821 !!$ endif
1822 !!$
1823 !!$! Receive Odd Face LR 3 1-way
1824 !!$ if ( (ie==npx-1) .and. (js==1) ) then
1825 !!$ recv_tag = 200+(tile+1)
1826 !!$ src_gid = (tile-1)*npes_x*npes_y + npes_x*npes_y
1827 !!$ nrecv=nrecv+1
1828 !!$ call MPI_IRECV( q(ie+2,js), 1, MPI_DOUBLE_PRECISION, src_gid, &
1829 !!$ recv_tag, commglobal, rqest(nrecv), ierror )
1830 !!$ endif
1831 !!$
1832 !!$ endif
1833 !!$
1834 !!$! wait for comm to complete
1835 !!$ if (nsend>0) then
1836 !!$ call MPI_WAITALL(nsend, sqest, Stats, ierror)
1837 !!$
1838 !!$
1839 !!$
1840 !!$ endif
1841 !!$ if (nrecv>0) then
1842 !!$ call MPI_WAITALL(nrecv, rqest, Stats, ierror)
1843 !!$
1844 !!$
1845 !!$
1846 !!$ endif
1847 !!$
1848 !!$ end subroutine mp_corner_comm
1849 !!$!
1850 !!$! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1851 !!$!-------------------------------------------------------------------------------
1852 
1853 !-------------------------------------------------------------------------------
1854 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1855 !
1856 ! mp_gather_4d_r4 :: Call SPMD Gather
1857 !
1858  subroutine mp_gather_4d_r4(q, i1,i2, j1,j2, idim, jdim, kdim, ldim)
1859  integer, intent(IN) :: i1,i2, j1,j2
1860  integer, intent(IN) :: idim, jdim, kdim, ldim
1861  real(kind=4), intent(INOUT):: q(idim,jdim,kdim,ldim)
1862  integer :: i,j,k,l,n,icnt
1863  integer :: Lsize, Lsize_buf(1)
1864  integer :: Gsize
1865  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
1866  integer :: Ldims(5), Gdims(5*npes_this_grid)
1867  real(kind=4), allocatable, dimension(:) :: larr, garr
1868 
1869  ldims(1) = i1
1870  ldims(2) = i2
1871  ldims(3) = j1
1872  ldims(4) = j2
1873  ldims(5) = tile
1874  do l=1,npes_this_grid
1875  cnts(l) = 5
1876  ldispl(l) = 5*(l-1)
1877  enddo
1878  call mpp_gather(ldims, gdims)
1879 ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1880 
1881  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) ) * kdim
1882  do l=1,npes_this_grid
1883  cnts(l) = 1
1884  ldispl(l) = l-1
1885  enddo
1886  lsizes(:)=1
1887  lsize_buf(1) = lsize
1888  call mpp_gather(lsize_buf, lsizes)
1889 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1890 
1891  allocate ( larr(lsize) )
1892  icnt = 1
1893  do k=1,kdim
1894  do j=j1,j2
1895  do i=i1,i2
1896  larr(icnt) = q(i,j,k,tile)
1897  icnt=icnt+1
1898  enddo
1899  enddo
1900  enddo
1901  ldispl(1) = 0.0
1902 ! call mp_bcst(LsizeS(1))
1903  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
1904  gsize = lsizes(1)
1905  do l=2,npes_this_grid
1906 ! call mp_bcst(LsizeS(l))
1907  ldispl(l) = ldispl(l-1) + lsizes(l-1)
1908  gsize = gsize + lsizes(l)
1909  enddo
1910  allocate ( garr(gsize) )
1911 
1912  call mpp_gather(larr, lsize, garr, lsizes)
1913 ! call MPI_GATHERV(larr, Lsize, MPI_REAL, garr, LsizeS, Ldispl, MPI_REAL, masterproc, commglobal, ierror)
1914 
1915  if (gid==masterproc) then
1916  do n=2,npes_this_grid
1917  icnt=1
1918  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
1919  do k=1,kdim
1920  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
1921  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
1922  q(i,j,k,l) = garr(ldispl(n)+icnt)
1923  icnt=icnt+1
1924  enddo
1925  enddo
1926  enddo
1927  enddo
1928  enddo
1929  endif
1930  deallocate( larr )
1931  deallocate( garr )
1932 
1933  end subroutine mp_gather_4d_r4
1934 !
1935 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
1936 !-------------------------------------------------------------------------------
1937 
1938 
1939 !-------------------------------------------------------------------------------
1940 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
1941 !
1942 ! mp_gather_3d_r4 :: Call SPMD Gather
1943 !
1944  subroutine mp_gather_3d_r4(q, i1,i2, j1,j2, idim, jdim, ldim)
1945  integer, intent(IN) :: i1,i2, j1,j2
1946  integer, intent(IN) :: idim, jdim, ldim
1947  real(kind=4), intent(INOUT):: q(idim,jdim,ldim)
1948  integer :: i,j,l,n,icnt
1949  integer :: Lsize, Lsize_buf(1)
1950  integer :: Gsize
1951  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
1952  integer :: Ldims(5), Gdims(5*npes_this_grid)
1953  real(kind=4), allocatable, dimension(:) :: larr, garr
1954 
1955  ldims(1) = i1
1956  ldims(2) = i2
1957  ldims(3) = j1
1958  ldims(4) = j2
1959  ldims(5) = tile
1960  do l=1,npes_this_grid
1961  cnts(l) = 5
1962  ldispl(l) = 5*(l-1)
1963  enddo
1964 ! call MPI_GATHERV(Ldims, 5, MPI_INTEGER, Gdims, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1965  call mpp_gather(ldims, gdims)
1966 
1967  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) )
1968  do l=1,npes_this_grid
1969  cnts(l) = 1
1970  ldispl(l) = l-1
1971  enddo
1972  lsizes(:)=1
1973  lsize_buf(1) = lsize
1974  call mpp_gather(lsize_buf, lsizes)
1975 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
1976 
1977  allocate ( larr(lsize) )
1978  icnt = 1
1979  do j=j1,j2
1980  do i=i1,i2
1981  larr(icnt) = q(i,j,tile)
1982  icnt=icnt+1
1983  enddo
1984  enddo
1985  ldispl(1) = 0.0
1986 ! call mp_bcst(LsizeS(1))
1987  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
1988  gsize = lsizes(1)
1989  do l=2,npes_this_grid
1990 ! call mp_bcst(LsizeS(l))
1991  ldispl(l) = ldispl(l-1) + lsizes(l-1)
1992  gsize = gsize + lsizes(l)
1993  enddo
1994  allocate ( garr(gsize) )
1995  call mpp_gather(larr, lsize, garr, lsizes)
1996 ! call MPI_GATHERV(larr, Lsize, MPI_REAL, garr, LsizeS, Ldispl, MPI_REAL, masterproc, commglobal, ierror)
1997  if (gid==masterproc) then
1998  do n=2,npes_this_grid
1999  icnt=1
2000  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
2001  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
2002  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
2003  q(i,j,l) = garr(ldispl(n)+icnt)
2004  icnt=icnt+1
2005  enddo
2006  enddo
2007  enddo
2008  enddo
2009  endif
2010  deallocate( larr )
2011  deallocate( garr )
2012 
2013  end subroutine mp_gather_3d_r4
2014 !
2015 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2016 !-------------------------------------------------------------------------------
2017 
2018 !-------------------------------------------------------------------------------
2019 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2020 !
2021 ! mp_gather_3d_r8 :: Call SPMD Gather
2022 !
2023  subroutine mp_gather_3d_r8(q, i1,i2, j1,j2, idim, jdim, ldim)
2024  integer, intent(IN) :: i1,i2, j1,j2
2025  integer, intent(IN) :: idim, jdim, ldim
2026  real(kind=8), intent(INOUT):: q(idim,jdim,ldim)
2027  integer :: i,j,l,n,icnt
2028  integer :: Lsize, Lsize_buf(1)
2029  integer :: Gsize
2030  integer :: LsizeS(npes_this_grid), Ldispl(npes_this_grid), cnts(npes_this_grid)
2031  integer :: Ldims(5), Gdims(5*npes_this_grid)
2032  real(kind=8), allocatable, dimension(:) :: larr, garr
2033 
2034  ldims(1) = i1
2035  ldims(2) = i2
2036  ldims(3) = j1
2037  ldims(4) = j2
2038  ldims(5) = tile
2039  do l=1,npes_this_grid
2040  cnts(l) = 5
2041  ldispl(l) = 5*(l-1)
2042  enddo
2043 ! call MPI_GATHER(Ldims, 5, MPI_INTEGER, Gdims, cnts, MPI_INTEGER, masterproc, commglobal, ierror)
2044  call mpp_gather(ldims, gdims)
2045  lsize = ( (i2 - i1 + 1) * (j2 - j1 + 1) )
2046  do l=1,npes_this_grid
2047  cnts(l) = 1
2048  ldispl(l) = l-1
2049  enddo
2050  lsizes(:)=0.
2051 
2052 ! call MPI_GATHERV(Lsize, 1, MPI_INTEGER, LsizeS, cnts, Ldispl, MPI_INTEGER, masterproc, commglobal, ierror)
2053  lsize_buf(1) = lsize
2054  call mpp_gather(lsize_buf, lsizes)
2055 
2056  allocate ( larr(lsize) )
2057  icnt = 1
2058  do j=j1,j2
2059  do i=i1,i2
2060  larr(icnt) = q(i,j,tile)
2061  icnt=icnt+1
2062  enddo
2063  enddo
2064  ldispl(1) = 0.0
2065  call mpp_broadcast(lsizes, npes_this_grid, masterproc)
2066 ! call mp_bcst(LsizeS(1))
2067  gsize = lsizes(1)
2068  do l=2,npes_this_grid
2069 ! call mp_bcst(LsizeS(l))
2070  ldispl(l) = ldispl(l-1) + lsizes(l-1)
2071  gsize = gsize + lsizes(l)
2072  enddo
2073 
2074  allocate ( garr(gsize) )
2075  call mpp_gather(larr, lsize, garr, lsizes)
2076 ! call MPI_GATHERV(larr, Lsize, MPI_DOUBLE_PRECISION, garr, LsizeS, Ldispl, MPI_DOUBLE_PRECISION, masterproc, commglobal, ierror)
2077  if (gid==masterproc) then
2078  do n=2,npes_this_grid
2079  icnt=1
2080  do l=gdims( (n-1)*5 + 5 ), gdims( (n-1)*5 + 5 )
2081  do j=gdims( (n-1)*5 + 3 ), gdims( (n-1)*5 + 4 )
2082  do i=gdims( (n-1)*5 + 1 ), gdims( (n-1)*5 + 2 )
2083  q(i,j,l) = garr(ldispl(n)+icnt)
2084  icnt=icnt+1
2085  enddo
2086  enddo
2087  enddo
2088  enddo
2089  endif
2090  deallocate( larr )
2091  deallocate( garr )
2092 
2093  end subroutine mp_gather_3d_r8
2094 !
2095 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2096 !-------------------------------------------------------------------------------
2097 
2098 !-------------------------------------------------------------------------------
2099 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2100 !
2101 ! mp_bcst_i4 :: Call SPMD broadcast
2102 !
2103  subroutine mp_bcst_i4(q)
2104  integer, intent(INOUT) :: q
2105 
2106  call mpi_bcast(q, 1, mpi_integer, masterproc, commglobal, ierror)
2107 
2108  end subroutine mp_bcst_i4
2109 !
2110 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2111 !-------------------------------------------------------------------------------
2112 
2113 !-------------------------------------------------------------------------------
2114 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2115 !
2116 ! mp_bcst_r4 :: Call SPMD broadcast
2117 !
2118  subroutine mp_bcst_r4(q)
2119  real(kind=4), intent(INOUT) :: q
2120 
2121  call mpi_bcast(q, 1, mpi_real, masterproc, commglobal, ierror)
2122 
2123  end subroutine mp_bcst_r4
2124 !
2125 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2126 !-------------------------------------------------------------------------------
2127 
2128 !-------------------------------------------------------------------------------
2129 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2130 !
2131 ! mp_bcst_r8 :: Call SPMD broadcast
2132 !
2133  subroutine mp_bcst_r8(q)
2134  real(kind=8), intent(INOUT) :: q
2135 
2136  call mpi_bcast(q, 1, mpi_double_precision, masterproc, commglobal, ierror)
2137 
2138  end subroutine mp_bcst_r8
2139 !
2140 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2141 !-------------------------------------------------------------------------------
2142 
2143 !-------------------------------------------------------------------------------
2144 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2145 !
2146 ! mp_bcst_3d_r4 :: Call SPMD broadcast
2147 !
2148  subroutine mp_bcst_3d_r4(q, idim, jdim, kdim)
2149  integer, intent(IN) :: idim, jdim, kdim
2150  real(kind=4), intent(INOUT) :: q(idim,jdim,kdim)
2151 
2152  call mpi_bcast(q, idim*jdim*kdim, mpi_real, masterproc, commglobal, ierror)
2153 
2154  end subroutine mp_bcst_3d_r4
2155 !
2156 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2157 !-------------------------------------------------------------------------------
2158 
2159 !-------------------------------------------------------------------------------
2160 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2161 !
2162 ! mp_bcst_3d_r8 :: Call SPMD broadcast
2163 !
2164  subroutine mp_bcst_3d_r8(q, idim, jdim, kdim)
2165  integer, intent(IN) :: idim, jdim, kdim
2166  real(kind=8), intent(INOUT) :: q(idim,jdim,kdim)
2167 
2168  call mpi_bcast(q, idim*jdim*kdim, mpi_double_precision, masterproc, commglobal, ierror)
2169 
2170  end subroutine mp_bcst_3d_r8
2171 !
2172 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2173 !-------------------------------------------------------------------------------
2174 
2175 !-------------------------------------------------------------------------------
2176 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2177 !
2178 ! mp_bcst_4d_r4 :: Call SPMD broadcast
2179 !
2180  subroutine mp_bcst_4d_r4(q, idim, jdim, kdim, ldim)
2181  integer, intent(IN) :: idim, jdim, kdim, ldim
2182  real(kind=4), intent(INOUT) :: q(idim,jdim,kdim,ldim)
2183 
2184  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_real, masterproc, commglobal, ierror)
2185 
2186  end subroutine mp_bcst_4d_r4
2187 !
2188 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2189 !-------------------------------------------------------------------------------
2190 
2191 !-------------------------------------------------------------------------------
2192 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2193 !
2194 ! mp_bcst_4d_r8 :: Call SPMD broadcast
2195 !
2196  subroutine mp_bcst_4d_r8(q, idim, jdim, kdim, ldim)
2197  integer, intent(IN) :: idim, jdim, kdim, ldim
2198  real(kind=8), intent(INOUT) :: q(idim,jdim,kdim,ldim)
2199 
2200  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_double_precision, masterproc, commglobal, ierror)
2201 
2202  end subroutine mp_bcst_4d_r8
2203 !
2204 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2205 !-------------------------------------------------------------------------------
2206 
2207 !-------------------------------------------------------------------------------
2208 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2209 !
2210 ! mp_bcst_3d_i8 :: Call SPMD broadcast
2211 !
2212  subroutine mp_bcst_3d_i8(q, idim, jdim, kdim)
2213  integer, intent(IN) :: idim, jdim, kdim
2214  integer, intent(INOUT) :: q(idim,jdim,kdim)
2215 
2216  call mpi_bcast(q, idim*jdim*kdim, mpi_integer, masterproc, commglobal, ierror)
2217 
2218  end subroutine mp_bcst_3d_i8
2219 !
2220 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2221 !-------------------------------------------------------------------------------
2222 
2223 !-------------------------------------------------------------------------------
2224 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2225 !
2226 ! mp_bcst_4d_i8 :: Call SPMD broadcast
2227 !
2228  subroutine mp_bcst_4d_i8(q, idim, jdim, kdim, ldim)
2229  integer, intent(IN) :: idim, jdim, kdim, ldim
2230  integer, intent(INOUT) :: q(idim,jdim,kdim,ldim)
2231 
2232  call mpi_bcast(q, idim*jdim*kdim*ldim, mpi_integer, masterproc, commglobal, ierror)
2233 
2234  end subroutine mp_bcst_4d_i8
2235 !
2236 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2237 !-------------------------------------------------------------------------------
2238 
2239 
2240 !-------------------------------------------------------------------------------
2241 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2242 !
2243 ! mp_reduce_max_r4_1d :: Call SPMD REDUCE_MAX
2244 !
2245  subroutine mp_reduce_max_r4_1d(mymax,npts)
2246  integer, intent(IN) :: npts
2247  real(kind=4), intent(INOUT) :: mymax(npts)
2248 
2249  real(kind=4) :: gmax(npts)
2250 
2251  call mpi_allreduce( mymax, gmax, npts, mpi_real, mpi_max, &
2252  commglobal, ierror )
2253 
2254  mymax = gmax
2255 
2256  end subroutine mp_reduce_max_r4_1d
2257 !
2258 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2259 !-------------------------------------------------------------------------------
2260 
2261 
2262 !-------------------------------------------------------------------------------
2263 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2264 !
2265 ! mp_reduce_max_r8_1d :: Call SPMD REDUCE_MAX
2266 !
2267  subroutine mp_reduce_max_r8_1d(mymax,npts)
2268  integer, intent(IN) :: npts
2269  real(kind=8), intent(INOUT) :: mymax(npts)
2270 
2271  real(kind=8) :: gmax(npts)
2272 
2273  call mpi_allreduce( mymax, gmax, npts, mpi_double_precision, mpi_max, &
2274  commglobal, ierror )
2275 
2276  mymax = gmax
2277 
2278  end subroutine mp_reduce_max_r8_1d
2279 !
2280 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2281 !-------------------------------------------------------------------------------
2282 
2283 
2284 !-------------------------------------------------------------------------------
2285 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2286 !
2287 ! mp_reduce_max_r4 :: Call SPMD REDUCE_MAX
2288 !
2289  subroutine mp_reduce_max_r4(mymax)
2290  real(kind=4), intent(INOUT) :: mymax
2291 
2292  real(kind=4) :: gmax
2293 
2294  call mpi_allreduce( mymax, gmax, 1, mpi_real, mpi_max, &
2295  commglobal, ierror )
2296 
2297  mymax = gmax
2298 
2299  end subroutine mp_reduce_max_r4
2300 
2301 !-------------------------------------------------------------------------------
2302 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2303 !
2304 ! mp_reduce_max_r8 :: Call SPMD REDUCE_MAX
2305 !
2306  subroutine mp_reduce_max_r8(mymax)
2307  real(kind=8), intent(INOUT) :: mymax
2308 
2309  real(kind=8) :: gmax
2310 
2311  call mpi_allreduce( mymax, gmax, 1, mpi_double_precision, mpi_max, &
2312  commglobal, ierror )
2313 
2314  mymax = gmax
2315 
2316  end subroutine mp_reduce_max_r8
2317 
2318  subroutine mp_reduce_min_r4(mymin)
2319  real(kind=4), intent(INOUT) :: mymin
2320 
2321  real(kind=4) :: gmin
2322 
2323  call mpi_allreduce( mymin, gmin, 1, mpi_real, mpi_min, &
2324  commglobal, ierror )
2325 
2326  mymin = gmin
2327 
2328  end subroutine mp_reduce_min_r4
2329 
2330  subroutine mp_reduce_min_r8(mymin)
2331  real(kind=8), intent(INOUT) :: mymin
2332 
2333  real(kind=8) :: gmin
2334 
2335  call mpi_allreduce( mymin, gmin, 1, mpi_double_precision, mpi_min, &
2336  commglobal, ierror )
2337 
2338  mymin = gmin
2339 
2340  end subroutine mp_reduce_min_r8
2341 !
2342 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2343 !-------------------------------------------------------------------------------
2344 
2345 !-------------------------------------------------------------------------------
2346 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2347 !
2348 ! mp_bcst_4d_i4 :: Call SPMD REDUCE_MAX
2349 !
2350  subroutine mp_reduce_max_i4(mymax)
2351  integer, intent(INOUT) :: mymax
2352 
2353  integer :: gmax
2354 
2355  call mpi_allreduce( mymax, gmax, 1, mpi_integer, mpi_max, &
2356  commglobal, ierror )
2357 
2358  mymax = gmax
2359 
2360  end subroutine mp_reduce_max_i4
2361 !
2362 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2363 !-------------------------------------------------------------------------------
2364 
2365 !-------------------------------------------------------------------------------
2366 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2367 !
2368 ! mp_reduce_sum_r4 :: Call SPMD REDUCE_SUM
2369 !
2370  subroutine mp_reduce_sum_r4(mysum)
2371  real(kind=4), intent(INOUT) :: mysum
2372 
2373  real(kind=4) :: gsum
2374 
2375  call mpi_allreduce( mysum, gsum, 1, mpi_real, mpi_sum, &
2376  commglobal, ierror )
2377 
2378  mysum = gsum
2379 
2380  end subroutine mp_reduce_sum_r4
2381 !
2382 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2383 !-------------------------------------------------------------------------------
2384 
2385 !-------------------------------------------------------------------------------
2386 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2387 !
2388 ! mp_reduce_sum_r8 :: Call SPMD REDUCE_SUM
2389 !
2390  subroutine mp_reduce_sum_r8(mysum)
2391  real(kind=8), intent(INOUT) :: mysum
2392 
2393  real(kind=8) :: gsum
2394 
2395  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2396  commglobal, ierror )
2397 
2398  mysum = gsum
2399 
2400  end subroutine mp_reduce_sum_r8
2401 !
2402 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2403 !-------------------------------------------------------------------------------
2404 
2405 !-------------------------------------------------------------------------------
2406 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2407 !
2408 ! mp_reduce_sum_r4_1d :: Call SPMD REDUCE_SUM
2409 !
2410  subroutine mp_reduce_sum_r4_1d(mysum, sum1d, npts)
2411  integer, intent(in) :: npts
2412  real(kind=4), intent(in) :: sum1d(npts)
2413  real(kind=4), intent(INOUT) :: mysum
2414 
2415  real(kind=4) :: gsum
2416  integer :: i
2417 
2418  mysum = 0.0
2419  do i=1,npts
2420  mysum = mysum + sum1d(i)
2421  enddo
2422 
2423  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2424  commglobal, ierror )
2425 
2426  mysum = gsum
2427 
2428  end subroutine mp_reduce_sum_r4_1d
2429 !
2430 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2431 !-------------------------------------------------------------------------------
2432 
2433 !-------------------------------------------------------------------------------
2434 ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv !
2435 !
2436 ! mp_reduce_sum_r8_1d :: Call SPMD REDUCE_SUM
2437 !
2438  subroutine mp_reduce_sum_r8_1d(mysum, sum1d, npts)
2439  integer, intent(in) :: npts
2440  real(kind=8), intent(in) :: sum1d(npts)
2441  real(kind=8), intent(INOUT) :: mysum
2442 
2443  real(kind=8) :: gsum
2444  integer :: i
2445 
2446  mysum = 0.0
2447  do i=1,npts
2448  mysum = mysum + sum1d(i)
2449  enddo
2450 
2451  call mpi_allreduce( mysum, gsum, 1, mpi_double_precision, mpi_sum, &
2452  commglobal, ierror )
2453 
2454  mysum = gsum
2455 
2456  end subroutine mp_reduce_sum_r8_1d
2457 !
2458 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ !
2459 !-------------------------------------------------------------------------------
2460 #else
2461  implicit none
2462  private
2463  integer :: masterproc = 0
2464  integer :: gid = 0
2465  integer, parameter:: ng = 3 ! Number of ghost zones required
2466  public gid, masterproc, ng
2467 #endif
2468 
2469  end module fv_mp_nlm_mod
2470 !-------------------------------------------------------------------------------
2471 
2472 
Definition: fms.F90:20
subroutine, public set_domain(Domain2)
Definition: fms_io.F90:7401
integer, parameter, public nupdate
Definition: mpp.F90:39
integer, parameter, public wupdate
integer, parameter, public yupdate
subroutine, public fms_init(localcomm)
Definition: fms.F90:353
ensemble_manager_mod
integer, parameter, public supdate
subroutine, public fms_end()
Definition: fms.F90:476
integer, parameter, public xupdate
#define max(a, b)
Definition: mosaic_util.h:33
integer, parameter, public eupdate
integer function, public get_ensemble_id()
Derived type containing the data.