FV3 Bundle
mpp_domains_util.inc
Go to the documentation of this file.
1 ! -*-f90-*-
2 
3 !***********************************************************************
4 !* GNU Lesser General Public License
5 !*
6 !* This file is part of the GFDL Flexible Modeling System (FMS).
7 !*
8 !* FMS is free software: you can redistribute it and/or modify it under
9 !* the terms of the GNU Lesser General Public License as published by
10 !* the Free Software Foundation, either version 3 of the License, or (at
11 !* your option) any later version.
12 !*
13 !* FMS is distributed in the hope that it will be useful, but WITHOUT
14 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 !* for more details.
17 !*
18 !* You should have received a copy of the GNU Lesser General Public
19 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
20 !***********************************************************************
21 
22  ! <SUBROUTINE NAME="mpp_domains_set_stack_size">
23  ! <OVERVIEW>
24  ! Set user stack size.
25  ! </OVERVIEW>
26  ! <DESCRIPTION>
27  ! This sets the size of an array that is used for internal storage by
28  ! <TT>mpp_domains</TT>. This array is used, for instance, to buffer the
29  ! data sent and received in halo updates.
30  !
31  ! This call has implied global synchronization. It should be
32  ! placed somewhere where all PEs can call it.
33  ! </DESCRIPTION>
34  ! <TEMPLATE>
35  ! call mpp_domains_set_stack_size(n)
36  ! </TEMPLATE>
37  ! <IN NAME="n" TYPE="integer"></IN>
38  ! </SUBROUTINE>
39  subroutine mpp_domains_set_stack_size(n)
40  !set the mpp_domains_stack variable to be at least n LONG words long
41  integer, intent(in) :: n
42  character(len=8) :: text
43 
44  if( n.LE.mpp_domains_stack_size )return
45 #ifdef use_libSMA
46  call mpp_malloc( ptr_domains_stack, n, mpp_domains_stack_size )
47  call mpp_malloc( ptr_domains_stack_nonblock, n, mpp_domains_stack_size )
48 #else
49  if( allocated(mpp_domains_stack) )deallocate(mpp_domains_stack)
50  allocate( mpp_domains_stack(n) )
51  if( allocated(mpp_domains_stack_nonblock) )deallocate(mpp_domains_stack_nonblock)
52  allocate( mpp_domains_stack_nonblock(n) )
53 
55 #endif
56  write( text,'(i8)' )n
57  if( mpp_pe().EQ.mpp_root_pe() )call mpp_error( NOTE, 'MPP_DOMAINS_SET_STACK_SIZE: stack size set to '//text//'.' )
58 
59  return
60  end subroutine mpp_domains_set_stack_size
61 
62 
63 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
64  ! !
65  ! MPP_DOMAINS: overloaded operators (==, /=) !
66  ! !
67 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
68 
69  function mpp_domain1D_eq( a, b )
70  logical :: mpp_domain1D_eq
71  type(domain1D), intent(in) :: a, b
72 
73  mpp_domain1D_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
74  a%compute%end .EQ.b%compute%end .AND. &
75  a%data%begin .EQ.b%data%begin .AND. &
76  a%data%end .EQ.b%data%end .AND. &
77  a%global%begin .EQ.b%global%begin .AND. &
78  a%global%end .EQ.b%global%end )
79  !compare pelists
80  ! if( mpp_domain1D_eq )mpp_domain1D_eq = ASSOCIATED(a%list) .AND. ASSOCIATED(b%list)
81  ! if( mpp_domain1D_eq )mpp_domain1D_eq = size(a%list(:)).EQ.size(b%list(:))
82  ! if( mpp_domain1D_eq )mpp_domain1D_eq = ALL(a%list%pe.EQ.b%list%pe)
83 
84  return
85  end function mpp_domain1D_eq
86 
87  function mpp_domain1D_ne( a, b )
88  logical :: mpp_domain1D_ne
89  type(domain1D), intent(in) :: a, b
90 
91  mpp_domain1D_ne = .NOT. ( a.EQ.b )
92  return
93  end function mpp_domain1D_ne
94 
95  function mpp_domain2D_eq( a, b )
96  logical :: mpp_domain2D_eq
97  type(domain2D), intent(in) :: a, b
98  integer :: nt, n
99 
100  mpp_domain2d_eq = size(a%x(:)) .EQ. size(b%x(:))
101  nt = size(a%x(:))
102  do n = 1, nt
103  if(mpp_domain2d_eq) mpp_domain2D_eq = a%x(n).EQ.b%x(n) .AND. a%y(n).EQ.b%y(n)
104  end do
105 
106  if( mpp_domain2D_eq .AND. ((a%pe.EQ.NULL_PE).OR.(b%pe.EQ.NULL_PE)) )return !NULL_DOMAIN2D
107  !compare pelists
108  if( mpp_domain2D_eq )mpp_domain2D_eq = ASSOCIATED(a%list) .AND. ASSOCIATED(b%list)
109  if( mpp_domain2D_eq )mpp_domain2D_eq = size(a%list(:)).EQ.size(b%list(:))
110  if( mpp_domain2D_eq )mpp_domain2D_eq = ALL(a%list%pe.EQ.b%list%pe)
111  if( mpp_domain2D_eq )mpp_domain2D_eq = ALL(a%io_layout .EQ. b%io_layout)
112  if( mpp_domain2D_eq )mpp_domain2D_eq = a%symmetry .eqv. b%symmetry
113 
114  return
115  end function mpp_domain2D_eq
116 
117  !#####################################################################
118 
119  function mpp_domain2D_ne( a, b )
120  logical :: mpp_domain2D_ne
121  type(domain2D), intent(in) :: a, b
122 
123  mpp_domain2D_ne = .NOT. ( a.EQ.b )
124  return
125  end function mpp_domain2D_ne
126 
127 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
128  ! !
129  ! MPP_GET and SET routiness: retrieve various components of domains !
130  ! !
131 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
132 
133  subroutine mpp_get_compute_domain1D( domain, begin, end, size, max_size, is_global )
134  type(domain1D), intent(in) :: domain
135  integer, intent(out), optional :: begin, end, size, max_size
136  logical, intent(out), optional :: is_global
137 
138  if( PRESENT(begin) )begin = domain%compute%begin
139  if( PRESENT(end) )end = domain%compute%end
140  if( PRESENT(size) )size = domain%compute%size
141  if( PRESENT(max_size) )max_size = domain%compute%max_size
142  if( PRESENT(is_global) )is_global = domain%compute%is_global
143  return
144  end subroutine mpp_get_compute_domain1D
145 
146  !#####################################################################
147  subroutine mpp_get_data_domain1D( domain, begin, end, size, max_size, is_global )
148  type(domain1D), intent(in) :: domain
149  integer, intent(out), optional :: begin, end, size, max_size
150  logical, intent(out), optional :: is_global
151 
152  if( PRESENT(begin) )begin = domain%data%begin
153  if( PRESENT(end) )end = domain%data%end
154  if( PRESENT(size) )size = domain%data%size
155  if( PRESENT(max_size) )max_size = domain%data%max_size
156  if( PRESENT(is_global) )is_global = domain%data%is_global
157  return
158  end subroutine mpp_get_data_domain1D
159 
160  !#####################################################################
161  subroutine mpp_get_global_domain1D( domain, begin, end, size, max_size )
162  type(domain1D), intent(in) :: domain
163  integer, intent(out), optional :: begin, end, size, max_size
164 
165  if( PRESENT(begin) )begin = domain%global%begin
166  if( PRESENT(end) )end = domain%global%end
167  if( PRESENT(size) )size = domain%global%size
168  if( PRESENT(max_size) )max_size = domain%global%max_size
169  return
170  end subroutine mpp_get_global_domain1D
171 
172  !#####################################################################
173  subroutine mpp_get_memory_domain1D( domain, begin, end, size, max_size, is_global )
174  type(domain1D), intent(in) :: domain
175  integer, intent(out), optional :: begin, end, size, max_size
176  logical, intent(out), optional :: is_global
177 
178  if( PRESENT(begin) )begin = domain%memory%begin
179  if( PRESENT(end) )end = domain%memory%end
180  if( PRESENT(size) )size = domain%memory%size
181  if( PRESENT(max_size) )max_size = domain%memory%max_size
182  if( PRESENT(is_global) )is_global = domain%memory%is_global
183  return
184  end subroutine mpp_get_memory_domain1D
185 
186  !#####################################################################
187  subroutine mpp_get_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
188  x_is_global, y_is_global, tile_count, position )
189  type(domain2D), intent(in) :: domain
190  integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
191  logical, intent(out), optional :: x_is_global, y_is_global
192  integer, intent(in), optional :: tile_count, position
193  integer :: tile, ishift, jshift
194 
195  tile = 1
196  if(present(tile_count)) tile = tile_count
197 
198  call mpp_get_compute_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
199  call mpp_get_compute_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
200  call mpp_get_domain_shift( domain, ishift, jshift, position )
201  if( PRESENT(xend) ) xend = xend + ishift
202  if( PRESENT(yend) ) yend = yend + jshift
203  if( PRESENT(xsize)) xsize = xsize + ishift
204  if( PRESENT(ysize)) ysize = ysize + jshift
205  if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
206  if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
207 
208  return
209  end subroutine mpp_get_compute_domain2D
210 
211  !#####################################################################
212  subroutine mpp_get_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
213  x_is_global, y_is_global, tile_count, position )
214  type(domain2D), intent(in) :: domain
215  integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
216  logical, intent(out), optional :: x_is_global, y_is_global
217  integer, intent(in), optional :: tile_count, position
218  integer :: tile, ishift, jshift
219 
220  tile = 1
221  if(present(tile_count)) tile = tile_count
222 
223  call mpp_get_data_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
224  call mpp_get_data_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
225  call mpp_get_domain_shift( domain, ishift, jshift, position )
226  if( PRESENT(xend) ) xend = xend + ishift
227  if( PRESENT(yend) ) yend = yend + jshift
228  if( PRESENT(xsize)) xsize = xsize + ishift
229  if( PRESENT(ysize)) ysize = ysize + jshift
230  if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
231  if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
232 
233  return
234  end subroutine mpp_get_data_domain2D
235 
236  !#####################################################################
237  subroutine mpp_get_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
238  tile_count, position )
239  type(domain2D), intent(in) :: domain
240  integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
241  integer, intent(in), optional :: tile_count, position
242  integer :: tile, ishift, jshift
243 
244  tile = 1
245  if(present(tile_count)) tile = tile_count
246 
247  call mpp_get_global_domain( domain%x(tile), xbegin, xend, xsize, xmax_size )
248  call mpp_get_global_domain( domain%y(tile), ybegin, yend, ysize, ymax_size )
249  call mpp_get_domain_shift( domain, ishift, jshift, position )
250  if( PRESENT(xend) ) xend = xend + ishift
251  if( PRESENT(yend) ) yend = yend + jshift
252  if( PRESENT(xsize)) xsize = xsize + ishift
253  if( PRESENT(ysize)) ysize = ysize + jshift
254  if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
255  if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
256 
257  return
258  end subroutine mpp_get_global_domain2D
259 
260  !#####################################################################
261  subroutine mpp_get_memory_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
262  x_is_global, y_is_global, position)
263  type(domain2D), intent(in) :: domain
264  integer, intent(out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
265  logical, intent(out), optional :: x_is_global, y_is_global
266  integer, intent(in), optional :: position
267  integer :: tile, ishift, jshift
268 
269  tile = 1
270 
271  call mpp_get_memory_domain( domain%x(tile), xbegin, xend, xsize, xmax_size, x_is_global )
272  call mpp_get_memory_domain( domain%y(tile), ybegin, yend, ysize, ymax_size, y_is_global )
273  call mpp_get_domain_shift( domain, ishift, jshift, position )
274  if( PRESENT(xend) ) xend = xend + ishift
275  if( PRESENT(yend) ) yend = yend + jshift
276  if( PRESENT(xsize)) xsize = xsize + ishift
277  if( PRESENT(ysize)) ysize = ysize + jshift
278  if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
279  if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
280 
281  return
282  end subroutine mpp_get_memory_domain2D
283 
284  !#####################################################################
285  subroutine mpp_set_compute_domain1D( domain, begin, end, size, is_global )
286  type(domain1D), intent(inout) :: domain
287  integer, intent(in), optional :: begin, end, size
288  logical, intent(in), optional :: is_global
289 
290  if(present(begin)) domain%compute%begin = begin
291  if(present(end)) domain%compute%end = end
292  if(present(size)) domain%compute%size = size
293  if(present(is_global)) domain%compute%is_global = is_global
294 
295  end subroutine mpp_set_compute_domain1D
296 
297  !#####################################################################
298  subroutine mpp_set_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
299  x_is_global, y_is_global, tile_count )
300  type(domain2D), intent(inout) :: domain
301  integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
302  logical, intent(in), optional :: x_is_global, y_is_global
303  integer, intent(in), optional :: tile_count
304  integer :: tile
305 
306  tile = 1
307  if(present(tile_count)) tile = tile_count
308 
309  call mpp_set_compute_domain(domain%x(tile), xbegin, xend, xsize, x_is_global)
310  call mpp_set_compute_domain(domain%y(tile), ybegin, yend, ysize, y_is_global)
311 
312  end subroutine mpp_set_compute_domain2D
313 
314  !#####################################################################
315  subroutine mpp_set_data_domain1D( domain, begin, end, size, is_global )
316  type(domain1D), intent(inout) :: domain
317  integer, intent(in), optional :: begin, end, size
318  logical, intent(in), optional :: is_global
319 
320  if(present(begin)) domain%data%begin = begin
321  if(present(end)) domain%data%end = end
322  if(present(size)) domain%data%size = size
323  if(present(is_global)) domain%data%is_global = is_global
324 
325  end subroutine mpp_set_data_domain1D
326 
327  !#####################################################################
328  subroutine mpp_set_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
329  x_is_global, y_is_global, tile_count )
330  type(domain2D), intent(inout) :: domain
331  integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
332  logical, intent(in), optional :: x_is_global, y_is_global
333  integer, intent(in), optional :: tile_count
334  integer :: tile
335 
336  tile = 1
337  if(present(tile_count)) tile = tile_count
338 
339  call mpp_set_data_domain(domain%x(tile), xbegin, xend, xsize, x_is_global)
340  call mpp_set_data_domain(domain%y(tile), ybegin, yend, ysize, y_is_global)
341 
342  end subroutine mpp_set_data_domain2D
343 
344  !#####################################################################
345  subroutine mpp_set_global_domain1D( domain, begin, end, size)
346  type(domain1D), intent(inout) :: domain
347  integer, intent(in), optional :: begin, end, size
348 
349  if(present(begin)) domain%global%begin = begin
350  if(present(end)) domain%global%end = end
351  if(present(size)) domain%global%size = size
352 
353  end subroutine mpp_set_global_domain1D
354 
355  !#####################################################################
356  subroutine mpp_set_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, tile_count )
357  type(domain2D), intent(inout) :: domain
358  integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
359  integer, intent(in), optional :: tile_count
360  integer :: tile
361 
362  tile = 1
363  if(present(tile_count)) tile = tile_count
364  call mpp_set_global_domain(domain%x(tile), xbegin, xend, xsize)
365  call mpp_set_global_domain(domain%y(tile), ybegin, yend, ysize)
366 
367  end subroutine mpp_set_global_domain2D
368 
369  !#####################################################################
370  ! <SUBROUTINE NAME="mpp_get_domain_components">
371  ! <OVERVIEW>
372  ! Retrieve 1D components of 2D decomposition.
373  ! </OVERVIEW>
374  ! <DESCRIPTION>
375  ! It is sometime necessary to have direct recourse to the domain1D types
376  ! that compose a domain2D object. This call retrieves them.
377  ! </DESCRIPTION>
378  ! <TEMPLATE>
379  ! call mpp_get_domain_components( domain, x, y )
380  ! </TEMPLATE>
381  ! <IN NAME="domain" TYPE="type(domain2D)"></IN>
382  ! <OUT NAME="x,y" TYPE="type(domain1D)"></OUT>
383  ! </SUBROUTINE>
384  subroutine mpp_get_domain_components( domain, x, y, tile_count )
385  type(domain2D), intent(in) :: domain
386  type(domain1D), intent(inout), optional :: x, y
387  integer, intent(in), optional :: tile_count
388  integer :: tile
389 
390  tile = 1
391  if(present(tile_count)) tile = tile_count
392  if( PRESENT(x) )x = domain%x(tile)
393  if( PRESENT(y) )y = domain%y(tile)
394  return
395  end subroutine mpp_get_domain_components
396 
397  !#####################################################################
398  subroutine mpp_get_compute_domains1D( domain, begin, end, size )
399  type(domain1D), intent(in) :: domain
400  integer, intent(out), optional, dimension(:) :: begin, end, size
401 
402  if( .NOT.module_is_initialized ) &
403  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
404  !we use shape instead of size for error checks because size is used as an argument
405  if( PRESENT(begin) )then
406  if( any(shape(begin).NE.shape(domain%list)) ) &
407  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: begin array size does not match domain.' )
408  begin(:) = domain%list(:)%compute%begin
409  end if
410  if( PRESENT(end) )then
411  if( any(shape(end).NE.shape(domain%list)) ) &
412  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: end array size does not match domain.' )
413  end(:) = domain%list(:)%compute%end
414  end if
415  if( PRESENT(size) )then
416  if( any(shape(size).NE.shape(domain%list)) ) &
417  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: size array size does not match domain.' )
418  size(:) = domain%list(:)%compute%size
419  end if
420  return
421 end subroutine mpp_get_compute_domains1D
422 
423 !#####################################################################
424 subroutine mpp_get_compute_domains2D( domain, xbegin, xend, xsize, ybegin, yend, ysize, position )
425  type(domain2D), intent(in) :: domain
426  integer, intent(out), optional, dimension(:) :: xbegin, xend, xsize, ybegin, yend, ysize
427  integer, intent(in ), optional :: position
428 
429  integer :: i, ishift, jshift
430 
431  call mpp_get_domain_shift( domain, ishift, jshift, position )
432 
433 
434  if( .NOT.module_is_initialized ) &
435  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
436 
437  if( PRESENT(xbegin) )then
438  if( size(xbegin(:)).NE.size(domain%list(:)) ) &
439  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xbegin array size does not match domain.' )
440  do i = 1, size(xbegin(:))
441  xbegin(i) = domain%list(i-1)%x(1)%compute%begin
442  end do
443  end if
444  if( PRESENT(xend) )then
445  if( size(xend(:)).NE.size(domain%list(:)) ) &
446  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xend array size does not match domain.' )
447  do i = 1, size(xend(:))
448  xend(i) = domain%list(i-1)%x(1)%compute%end + ishift
449  end do
450  end if
451  if( PRESENT(xsize) )then
452  if( size(xsize(:)).NE.size(domain%list(:)) ) &
453  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: xsize array size does not match domain.' )
454  do i = 1, size(xsize(:))
455  xsize(i) = domain%list(i-1)%x(1)%compute%size + ishift
456  end do
457  end if
458  if( PRESENT(ybegin) )then
459  if( size(ybegin(:)).NE.size(domain%list(:)) ) &
460  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: ybegin array size does not match domain.' )
461  do i = 1, size(ybegin(:))
462  ybegin(i) = domain%list(i-1)%y(1)%compute%begin
463  end do
464  end if
465  if( PRESENT(yend) )then
466  if( size(yend(:)).NE.size(domain%list(:)) ) &
467  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: yend array size does not match domain.' )
468  do i = 1, size(yend(:))
469  yend(i) = domain%list(i-1)%y(1)%compute%end + jshift
470  end do
471  end if
472  if( PRESENT(ysize) )then
473  if( size(ysize(:)).NE.size(domain%list(:)) ) &
474  call mpp_error( FATAL, 'MPP_GET_COMPUTE_DOMAINS: ysize array size does not match domain.' )
475  do i = 1, size(ysize(:))
476  ysize(i) = domain%list(i-1)%y(1)%compute%size + jshift
477  end do
478  end if
479  return
480 end subroutine mpp_get_compute_domains2D
481 
482 !#####################################################################
483 subroutine mpp_get_domain_extents1D(domain, xextent, yextent)
484  type(domain2d), intent(in) :: domain
485  integer, dimension(0:), intent(inout) :: xextent, yextent
486  integer :: n
487 
488  if(domain%ntiles .NE. 1) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
489  "ntiles is more than 1, please use mpp_get_domain_extents2D")
490  if(size(xextent) .NE. size(domain%x(1)%list(:))) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
491  "size(xextent) does not equal to size(domain%x(1)%list(:)))")
492  if(size(yextent) .NE. size(domain%y(1)%list(:))) call mpp_error(FATAL,"mpp_domains_util.inc(mpp_get_domain_extents1D): "// &
493  "size(yextent) does not equal to size(domain%y(1)%list(:)))")
494  do n = 0, size(domain%x(1)%list(:))-1
495  xextent(n) = domain%x(1)%list(n)%compute%size
496  enddo
497  do n = 0, size(domain%y(1)%list(:))-1
498  yextent(n) = domain%y(1)%list(n)%compute%size
499  enddo
500 
501 end subroutine mpp_get_domain_extents1D
502 
503 !#####################################################################
504 ! This will return xextent and yextent for each tile
505 subroutine mpp_get_domain_extents2D(domain, xextent, yextent)
506  type(domain2d), intent(in) :: domain
507  integer, dimension(:,:), intent(inout) :: xextent, yextent
508  integer :: ntile, nlist, n, m, ndivx, ndivy, tile, pos
509 
510  ntile = domain%ntiles
511  nlist = size(domain%list(:))
512  if(size(xextent,2) .ne. ntile .or. size(yextent,2) .ne. ntile) call mpp_error(FATAL, &
513  "mpp_domains_utile.inc: the second dimension size of xextent/yextent is not correct")
514  ndivx = size(xextent,1); ndivy = size(yextent,1)
515  do n = 0, nlist-1
516  if(ANY(domain%list(n)%x(:)%pos>ndivx-1) ) call mpp_error(FATAL, &
517  "mpp_domains_utile.inc: first dimension size of xextent is less than the x-layout in some tile")
518  if(ANY(domain%list(n)%y(:)%pos>ndivy-1) ) call mpp_error(FATAL, &
519  "mpp_domains_utile.inc: first dimension size of yextent is less than the y-layout in some tile")
520  end do
521 
522  xextent = 0; yextent=0
523 
524  do n = 0, nlist-1
525  do m = 1, size(domain%list(n)%tile_id(:))
526  tile = domain%list(n)%tile_id(m)
527  pos = domain%list(n)%x(m)%pos+1
528  if(xextent(pos, tile) == 0) xextent(pos,tile) = domain%list(n)%x(m)%compute%size
529  pos = domain%list(n)%y(m)%pos+1
530  if(yextent(pos, tile) == 0) yextent(pos,tile) = domain%list(n)%y(m)%compute%size
531  end do
532  end do
533 
534 
535 end subroutine mpp_get_domain_extents2D
536 
537 !#####################################################################
538 function mpp_get_domain_pe(domain)
539  type(domain2d), intent(in) :: domain
540  integer :: mpp_get_domain_pe
541 
542  mpp_get_domain_pe = domain%pe
543 
544 
545 end function mpp_get_domain_pe
546 
547 
548 function mpp_get_domain_tile_root_pe(domain)
549  type(domain2d), intent(in) :: domain
550  integer :: mpp_get_domain_tile_root_pe
551 
552  mpp_get_domain_tile_root_pe = domain%tile_root_pe
553 
554 end function mpp_get_domain_tile_root_pe
555 
556 function mpp_get_io_domain(domain)
557  type(domain2d), intent(in) :: domain
558  type(domain2d), pointer :: mpp_get_io_domain
559 
560  if(ASSOCIATED(domain%io_domain)) then
561  mpp_get_io_domain => domain%io_domain
562  else
563  mpp_get_io_domain => NULL()
564  endif
565 
566 end function mpp_get_io_domain
567 
568 !#####################################################################
569 ! <SUBROUTINE NAME="mpp_get_pelist1D" INTERFACE="mpp_get_pelist">
570 ! <IN NAME="domain" TYPE="type(domain1D)"></IN>
571 ! <OUT NAME="pelist" TYPE="integer" DIM="(:)"></OUT>
572 ! <OUT NAME="pos" TYPE="integer"></OUT>
573 ! </SUBROUTINE>
574 subroutine mpp_get_pelist1D( domain, pelist, pos )
575  type(domain1D), intent(in) :: domain
576  integer, intent(out) :: pelist(:)
577  integer, intent(out), optional :: pos
578  integer :: ndivs
579 
580  if( .NOT.module_is_initialized ) &
581  call mpp_error( FATAL, 'MPP_GET_PELIST: must first call mpp_domains_init.' )
582  ndivs = size(domain%list(:))
583 
584  if( size(pelist(:)).NE.ndivs ) &
585  call mpp_error( FATAL, 'MPP_GET_PELIST: pelist array size does not match domain.' )
586 
587  pelist(:) = domain%list(0:ndivs-1)%pe
588  if( PRESENT(pos) )pos = domain%pos
589  return
590 end subroutine mpp_get_pelist1D
591 
592 !#####################################################################
593 ! <SUBROUTINE NAME="mpp_get_pelist2D" INTERFACE="mpp_get_pelist">
594 ! <IN NAME="domain" TYPE="type(domain2D)"></IN>
595 ! <OUT NAME="pelist" TYPE="integer" DIM="(:)"></OUT>
596 ! <OUT NAME="pos" TYPE="integer"></OUT>
597 ! </SUBROUTINE>
598 subroutine mpp_get_pelist2D( domain, pelist, pos )
599  type(domain2D), intent(in) :: domain
600  integer, intent(out) :: pelist(:)
601  integer, intent(out), optional :: pos
602 
603  if( .NOT.module_is_initialized ) &
604  call mpp_error( FATAL, 'MPP_GET_PELIST: must first call mpp_domains_init.' )
605  if( size(pelist(:)).NE.size(domain%list(:)) ) &
606  call mpp_error( FATAL, 'MPP_GET_PELIST: pelist array size does not match domain.' )
607 
608  pelist(:) = domain%list(:)%pe
609  if( PRESENT(pos) )pos = domain%pos
610  return
611 end subroutine mpp_get_pelist2D
612 
613 !#####################################################################
614 ! <SUBROUTINE NAME="mpp_get_layout1D" INTERFACE="mpp_get_layout">
615 ! <IN NAME="domain" TYPE="type(domain1D)"></IN>
616 ! <OUT NAME="layout" TYPE="integer"></OUT>
617 ! </SUBROUTINE>
618 subroutine mpp_get_layout1D( domain, layout )
619  type(domain1D), intent(in) :: domain
620  integer, intent(out) :: layout
621 
622  if( .NOT.module_is_initialized ) &
623  call mpp_error( FATAL, 'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
624 
625  layout = size(domain%list(:))
626  return
627 end subroutine mpp_get_layout1D
628 
629 !#####################################################################
630 ! <SUBROUTINE NAME="mpp_get_layout2D" INTERFACE="mpp_get_layout">
631 ! <IN NAME="domain" TYPE="type(domain2D)"></IN>
632 ! <OUT NAME="layout" TYPE="integer" DIM="(2)"></OUT>
633 ! </SUBROUTINE>
634 subroutine mpp_get_layout2D( domain, layout )
635  type(domain2D), intent(in) :: domain
636  integer, intent(out) :: layout(2)
637 
638  if( .NOT.module_is_initialized ) &
639  call mpp_error( FATAL, 'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
640 
641  layout(1) = size(domain%x(1)%list(:))
642  layout(2) = size(domain%y(1)%list(:))
643  return
644 end subroutine mpp_get_layout2D
645 
646 !#####################################################################
647  ! <SUBROUTINE NAME="mpp_get_domain_shift">
648  ! <OVERVIEW>
649  ! Returns the shift value in x and y-direction according to domain position..
650  ! </OVERVIEW>
651  ! <DESCRIPTION>
652  ! When domain is symmetry, one extra point maybe needed in
653  ! x- and/or y-direction. This routine will return the shift value based
654  ! on the position
655  ! </DESCRIPTION>
656  ! <TEMPLATE>
657  ! call mpp_get_domain_shift( domain, ishift, jshift, position )
658  ! </TEMPLATE>
659  ! <IN NAME="domain" TYPE="type(domain2D)">
660  ! predefined data contains 2-d domain decomposition.
661  ! </IN>
662  ! <OUT NAME="ishift, jshift" TYPE="integer">
663  ! return value will be 0 or 1.
664  ! </OUT>
665  ! <IN NAME="position" TYPE="integer">
666  ! position of data. Its value can be CENTER, EAST, NORTH or CORNER.
667  ! </OUT>
668  ! </SUBROUTINE>
669 subroutine mpp_get_domain_shift(domain, ishift, jshift, position)
670  type(domain2D), intent(in) :: domain
671  integer, intent(out) :: ishift, jshift
672  integer, optional, intent(in) :: position
673  integer :: pos
674 
675  ishift = 0 ; jshift = 0
676  pos = CENTER
677  if(present(position)) pos = position
678 
679  if(domain%symmetry) then ! shift is non-zero only when the domain is symmetry.
680  select case(pos)
681  case(CORNER)
682  ishift = 1; jshift = 1
683  case(EAST)
684  ishift = 1
685  case(NORTH)
686  jshift = 1
687  end select
688  end if
689 
690 end subroutine mpp_get_domain_shift
691 
692 !#####################################################################
693 
694  subroutine mpp_get_neighbor_pe_1d(domain, direction, pe)
695 
696  ! Return PE to the righ/left of this PE-domain.
697 
698  type(domain1D), intent(inout) :: domain
699  integer, intent(in) :: direction
700  integer, intent(out) :: pe
701 
702  integer ipos, ipos2, npx
703 
704  pe = NULL_PE
705  npx = size(domain%list(:)) ! 0..npx-1
706  ipos = domain%pos
707 
708  select case (direction)
709 
710  case (:-1)
711  ! neighbor on the left
712  ipos2 = ipos - 1
713  if(ipos2 < 0) then
714  if(domain%cyclic) then
715  ipos2 = npx-1
716  else
717  ipos2 = -999
718  endif
719  endif
720 
721  case (0)
722  ! identity
723  ipos2 = ipos
724 
725  case (1:)
726  ! neighbor on the right
727  ipos2 = ipos + 1
728  if(ipos2 > npx-1) then
729  if(domain%cyclic) then
730  ipos2 = 0
731  else
732  ipos2 = -999
733  endif
734  endif
735 
736  end select
737 
738  if(ipos2 >= 0) pe = domain%list(ipos2)%pe
739 
740  end subroutine mpp_get_neighbor_pe_1d
741 !#####################################################################
742 
743  subroutine mpp_get_neighbor_pe_2d(domain, direction, pe)
744 
745  ! Return PE North/South/East/West of this PE-domain.
746  ! direction must be NORTH, SOUTH, EAST or WEST.
747 
748  type(domain2D), intent(inout) :: domain
749  integer, intent(in) :: direction
750  integer, intent(out) :: pe
751 
752  integer ipos, jpos, npx, npy, ix, iy, ipos0, jpos0
753 
754  pe = NULL_PE
755  npx = size(domain%x(1)%list(:)) ! 0..npx-1
756  npy = size(domain%y(1)%list(:)) ! 0..npy-1
757  ipos0 = domain%x(1)%pos
758  jpos0 = domain%y(1)%pos
759 
760  select case (direction)
761  case (NORTH)
762  ix = 0
763  iy = 1
764  case (NORTH_EAST)
765  ix = 1
766  iy = 1
767  case (EAST)
768  ix = 1
769  iy = 0
770  case (SOUTH_EAST)
771  ix = 1
772  iy =-1
773  case (SOUTH)
774  ix = 0
775  iy =-1
776  case (SOUTH_WEST)
777  ix =-1
778  iy =-1
779  case (WEST)
780  ix =-1
781  iy = 0
782  case (NORTH_WEST)
783  ix =-1
784  iy = 1
785 
786  case default
787  call mpp_error( FATAL, &
788  & 'MPP_GET_NEIGHBOR_PE_2D: direction must be either NORTH, ' &
789  & // 'SOUTH, EAST, WEST, NORTH_EAST, SOUTH_EAST, SOUTH_WEST or NORTH_WEST')
790  end select
791 
792  ipos = ipos0 + ix
793  jpos = jpos0 + iy
794 
795 
796  if( (ipos < 0 .or. ipos > npx-1) .and. domain%x(1)%cyclic ) then
797  ! E/W cyclic domain
798  ipos = modulo(ipos, npx)
799  endif
800 
801  if( (ipos < 0 .and. btest(domain%fold,WEST)) .or. &
802  & (ipos > npx-1 .and. btest(domain%fold,EAST)) ) then
803  ! E or W folded domain
804  ipos = ipos0
805  jpos = npy-jpos-1
806  endif
807 
808  if( (jpos < 0 .or. jpos > npy-1) .and. domain%y(1)%cyclic ) then
809  ! N/S cyclic
810  jpos = modulo(jpos, npy)
811  endif
812 
813  if( (jpos < 0 .and. btest(domain%fold,SOUTH)) .or. &
814  & (jpos > npy-1 .and. btest(domain%fold,NORTH)) ) then
815  ! N or S folded
816  ipos = npx-ipos-1
817  jpos = jpos0
818  endif
819 
820  ! get the PE number
821  pe = NULL_PE
822  if(ipos >= 0 .and. ipos <= npx-1 .and. jpos >= 0 .and. jpos <= npy-1) then
823  pe = domain%pearray(ipos, jpos)
824  endif
825 
826 
827  end subroutine mpp_get_neighbor_pe_2d
828 
829 
830 !#######################################################################
831 
832  subroutine nullify_domain2d_list(domain)
833  type(domain2d), intent(inout) :: domain
834 
835  domain%list =>NULL()
836 
837  end subroutine nullify_domain2d_list
838 
839 !#######################################################################
840  function mpp_domain_is_symmetry(domain)
841  type(domain2d), intent(in) :: domain
842  logical :: mpp_domain_is_symmetry
843 
844  mpp_domain_is_symmetry = domain%symmetry
845  return
846 
847  end function mpp_domain_is_symmetry
848 
849 !#######################################################################
850  function mpp_domain_is_initialized(domain)
851  type(domain2d), intent(in) :: domain
852  logical :: mpp_domain_is_initialized
853 
854  mpp_domain_is_initialized = domain%initialized
855 
856  return
857 
858  end function mpp_domain_is_initialized
859 
860 !#######################################################################
861  !--- private routine used only for mpp_update_domains. This routine will
862  !--- compare whalo, ehalo, shalo, nhalo with the halo size when defining "domain"
863  !--- to decide if update is needed. Also it check the sign of whalo, ehalo, shalo and nhalo.
864  function domain_update_is_needed(domain, whalo, ehalo, shalo, nhalo)
865  type(domain2d), intent(in) :: domain
866  integer, intent(in) :: whalo, ehalo, shalo, nhalo
867  logical :: domain_update_is_needed
868 
869  domain_update_is_needed = .true.
870 
871  if(whalo == 0 .AND. ehalo==0 .AND. shalo == 0 .AND. nhalo==0 ) then
872  domain_update_is_needed = .false.
873  if( debug )call mpp_error(NOTE, &
874  'mpp_domains_util.inc: halo size to be updated are all zero, no update will be done')
875  return
876  end if
877  if( (whalo == -domain%whalo .AND. domain%whalo .NE. 0) .or. &
878  (ehalo == -domain%ehalo .AND. domain%ehalo .NE. 0) .or. &
879  (shalo == -domain%shalo .AND. domain%shalo .NE. 0) .or. &
880  (nhalo == -domain%nhalo .AND. domain%nhalo .NE. 0) ) then
881  domain_update_is_needed = .false.
882  call mpp_error(NOTE, 'mpp_domains_util.inc: at least one of w/e/s/n halo size to be updated '// &
883  'is the inverse of the original halo when defining domain, no update will be done')
884  return
885  end if
886 
887  end function domain_update_is_needed
888 !#######################################################################
889  ! this routine found the domain has the same halo size with the input
890  ! whalo, ehalo,
891  function search_update_overlap(domain, whalo, ehalo, shalo, nhalo, position)
892  type(domain2d), intent(inout) :: domain
893  integer, intent(in) :: whalo, ehalo, shalo, nhalo
894  integer, intent(in) :: position
895  type(overlapSpec), pointer :: search_update_overlap
896  type(overlapSpec), pointer :: update_ref
897  type(overlapSpec), pointer :: check => NULL()
898  integer :: ishift, jshift, shift
899 
900  shift = 0; if(domain%symmetry) shift = 1
901  select case(position)
902  case (CENTER)
903  update_ref => domain%update_T
904  ishift = 0; jshift = 0
905  case (CORNER)
906  update_ref => domain%update_C
907  ishift = shift; jshift = shift
908  case (NORTH)
909  update_ref => domain%update_N
910  ishift = 0; jshift = shift
911  case (EAST)
912  update_ref => domain%update_E
913  ishift = shift; jshift = 0
914  case default
915  call mpp_error(FATAL,"mpp_domains_util.inc(search_update_overlap): position should be CENTER|CORNER|EAST|NORTH")
916  end select
917 
918  search_update_overlap => update_ref
919 
920  do
921  if(whalo == search_update_overlap%whalo .AND. ehalo == search_update_overlap%ehalo .AND. &
922  shalo == search_update_overlap%shalo .AND. nhalo == search_update_overlap%nhalo ) then
923  exit ! found domain
924  endif
925  !--- if not found, switch to next
926  if(.NOT. ASSOCIATED(search_update_overlap%next)) then
927  allocate(search_update_overlap%next)
928  search_update_overlap => search_update_overlap%next
929  if(domain%fold .NE. 0) then
930  call compute_overlaps(domain, position, search_update_overlap, check, &
931  ishift, jshift, 0, 0, whalo, ehalo, shalo, nhalo)
932  else
933  call set_overlaps(domain, update_ref, search_update_overlap, whalo, ehalo, shalo, nhalo )
934  endif
935  exit
936  else
937  search_update_overlap => search_update_overlap%next
938  end if
939 
940  end do
941 
942  update_ref => NULL()
943 
944  end function search_update_overlap
945 
946 !#######################################################################
947  ! this routine found the check at certain position
948  function search_check_overlap(domain, position)
949  type(domain2d), intent(in) :: domain
950  integer, intent(in) :: position
951  type(overlapSpec), pointer :: search_check_overlap
952 
953  select case(position)
954  case (CENTER)
955  search_check_overlap => NULL()
956  case (CORNER)
957  search_check_overlap => domain%check_C
958  case (NORTH)
959  search_check_overlap => domain%check_N
960  case (EAST)
961  search_check_overlap => domain%check_E
962  case default
963  call mpp_error(FATAL,"mpp_domains_util.inc(search_check_overlap): position should be CENTER|CORNER|EAST|NORTH")
964  end select
965 
966  end function search_check_overlap
967 
968 !#######################################################################
969  ! this routine found the bound at certain position
970  function search_bound_overlap(domain, position)
971  type(domain2d), intent(in) :: domain
972  integer, intent(in) :: position
973  type(overlapSpec), pointer :: search_bound_overlap
974 
975  select case(position)
976  case (CENTER)
977  search_bound_overlap => NULL()
978  case (CORNER)
979  search_bound_overlap => domain%bound_C
980  case (NORTH)
981  search_bound_overlap => domain%bound_N
982  case (EAST)
983  search_bound_overlap => domain%bound_E
984  case default
985  call mpp_error(FATAL,"mpp_domains_util.inc(search_bound_overlap): position should be CENTER|CORNER|EAST|NORTH")
986  end select
987 
988  end function search_bound_overlap
989 
990  !########################################################################
991  ! return the tile_id on current pe
992  function mpp_get_tile_id(domain)
993  type(domain2d), intent(in) :: domain
994  integer, dimension(size(domain%tile_id(:))) :: mpp_get_tile_id
995 
996  mpp_get_tile_id = domain%tile_id
997  return
998 
999  end function mpp_get_tile_id
1000 
1001  !#######################################################################
1002  ! return the tile_id on current pelist. one-tile-per-pe is assumed.
1003  subroutine mpp_get_tile_list(domain, tiles)
1004  type(domain2d), intent(in) :: domain
1005  integer, intent(inout) :: tiles(:)
1006  integer :: i
1007 
1008  if( size(tiles(:)).NE.size(domain%list(:)) ) &
1009  call mpp_error( FATAL, 'mpp_get_tile_list: tiles array size does not match domain.' )
1010  do i = 1, size(tiles(:))
1011  if(size(domain%list(i-1)%tile_id(:)) > 1) call mpp_error( FATAL, &
1012  'mpp_get_tile_list: only support one-tile-per-pe now, contact developer');
1013  tiles(i) = domain%list(i-1)%tile_id(1)
1014  end do
1015 
1016  end subroutine mpp_get_tile_list
1017 
1018  !########################################################################
1019  ! return number of tiles in mosaic
1020  function mpp_get_ntile_count(domain)
1021  type(domain2d), intent(in) :: domain
1022  integer :: mpp_get_ntile_count
1023 
1024  mpp_get_ntile_count = domain%ntiles
1025  return
1026 
1027  end function mpp_get_ntile_count
1028 
1029  !########################################################################
1030  ! return number of tile on current pe
1031  function mpp_get_current_ntile(domain)
1032  type(domain2d), intent(in) :: domain
1033  integer :: mpp_get_current_ntile
1034 
1035  mpp_get_current_ntile = size(domain%tile_id(:))
1036  return
1037 
1038  end function mpp_get_current_ntile
1039 
1040  !#######################################################################
1041  ! return if current pe is the root pe of the tile, if number of tiles on current pe
1042  ! is greater than 1, will return true, if isc==isg and jsc==jsg also will return true,
1043  ! otherwise false will be returned.
1044  function mpp_domain_is_tile_root_pe(domain)
1045  type(domain2d), intent(in) :: domain
1046  logical :: mpp_domain_is_tile_root_pe
1047 
1048  mpp_domain_is_tile_root_pe = domain%pe == domain%tile_root_pe;
1049 
1050  end function mpp_domain_is_tile_root_pe
1051 
1052  !#########################################################################
1053  ! return number of processors used on current tile.
1054  function mpp_get_tile_npes(domain)
1055  type(domain2d), intent(in) :: domain
1056  integer :: mpp_get_tile_npes
1057  integer :: i, tile
1058 
1059  !--- When there is more than one tile on this pe, we assume each tile will be
1060  !--- limited to this pe.
1061  if(size(domain%tile_id(:)) > 1) then
1062  mpp_get_tile_npes = 1
1063  else
1064  mpp_get_tile_npes = 0
1065  tile = domain%tile_id(1)
1066  do i = 0, size(domain%list(:))-1
1067  if(tile == domain%list(i)%tile_id(1) ) mpp_get_tile_npes = mpp_get_tile_npes + 1
1068  end do
1069  endif
1070 
1071  end function mpp_get_tile_npes
1072 
1073  !########################################################################
1074  ! get the processors list used on current tile.
1075  subroutine mpp_get_tile_pelist(domain, pelist)
1076  type(domain2d), intent(in) :: domain
1077  integer, intent(inout) :: pelist(:)
1078  integer :: npes_on_tile
1079  integer :: i, tile, pos
1080 
1081  npes_on_tile = mpp_get_tile_npes(domain)
1082  if(size(pelist(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
1083  "mpp_domains_util.inc(mpp_get_tile_pelist): size(pelist) does not equal npes on current tile")
1084  tile = domain%tile_id(1)
1085  pos = 0
1086  do i = 0, size(domain%list(:))-1
1087  if(tile == domain%list(i)%tile_id(1)) then
1088  pos = pos+1
1089  pelist(pos) = domain%list(i)%pe
1090  endif
1091  enddo
1092 
1093  return
1094 
1095  end subroutine mpp_get_tile_pelist
1096 
1097 !#####################################################################
1098 subroutine mpp_get_tile_compute_domains( domain, xbegin, xend, ybegin, yend, position )
1099  type(domain2D), intent(in) :: domain
1100  integer, intent(out), dimension(:) :: xbegin, xend, ybegin, yend
1101  integer, intent(in ), optional :: position
1102 
1103  integer :: i, ishift, jshift
1104  integer :: npes_on_tile, pos, tile
1105 
1106  call mpp_get_domain_shift( domain, ishift, jshift, position )
1107 
1108 
1109  if( .NOT.module_is_initialized ) &
1110  call mpp_error( FATAL, 'mpp_get_compute_domains2D: must first call mpp_domains_init.' )
1111 
1112  npes_on_tile = mpp_get_tile_npes(domain)
1113  if(size(xbegin(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
1114  "mpp_domains_util.inc(mpp_get_compute_domains2D): size(xbegin) does not equal npes on current tile")
1115  if(size(xend(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
1116  "mpp_domains_util.inc(mpp_get_compute_domains2D): size(xend) does not equal npes on current tile")
1117  if(size(ybegin(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
1118  "mpp_domains_util.inc(mpp_get_compute_domains2D): size(ybegin) does not equal npes on current tile")
1119  if(size(yend(:)) .NE. npes_on_tile) call mpp_error(FATAL, &
1120  "mpp_domains_util.inc(mpp_get_compute_domains2D): size(yend) does not equal npes on current tile")
1121 
1122  tile = domain%tile_id(1)
1123  pos = 0
1124  do i = 0, size(domain%list(:))-1
1125  if(tile == domain%list(i)%tile_id(1)) then
1126  pos = pos+1
1127  xbegin(pos) = domain%list(i)%x(1)%compute%begin
1128  xend (pos) = domain%list(i)%x(1)%compute%end + ishift
1129  ybegin(pos) = domain%list(i)%y(1)%compute%begin
1130  yend (pos) = domain%list(i)%y(1)%compute%end + jshift
1131  endif
1132  enddo
1133 
1134  return
1135 
1136 end subroutine mpp_get_tile_compute_domains
1137 
1138 
1139 
1140  !#############################################################################
1141  function mpp_get_num_overlap(domain, action, p, position)
1142  type(domain2d), intent(in) :: domain
1143  integer, intent(in) :: action
1144  integer, intent(in) :: p
1145  integer, optional, intent(in) :: position
1146  integer :: mpp_get_num_overlap
1147  type(overlapSpec), pointer :: update => NULL()
1148  integer :: pos
1149 
1150  pos = CENTER
1151  if(present(position)) pos = position
1152  select case(pos)
1153  case (CENTER)
1154  update => domain%update_T
1155  case (CORNER)
1156  update => domain%update_C
1157  case (EAST)
1158  update => domain%update_E
1159  case (NORTH)
1160  update => domain%update_N
1161  case default
1162  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_num_overlap): invalid option of position")
1163  end select
1164 
1165  if(action == EVENT_SEND) then
1166  if(p< 1 .OR. p > update%nsend) call mpp_error( FATAL, &
1167  "mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nsend")
1168  mpp_get_num_overlap = update%send(p)%count
1169  else if(action == EVENT_RECV) then
1170  if(p< 1 .OR. p > update%nrecv) call mpp_error( FATAL, &
1171  "mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nrecv")
1172  mpp_get_num_overlap = update%recv(p)%count
1173  else
1174  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_num_overlap): invalid option of action")
1175  end if
1176 
1177  end function mpp_get_num_overlap
1178 
1179  !#############################################################################
1180  subroutine mpp_get_update_size(domain, nsend, nrecv, position)
1181  type(domain2d), intent(in) :: domain
1182  integer, intent(out) :: nsend, nrecv
1183  integer, optional, intent(in) :: position
1184  integer :: pos
1185 
1186  pos = CENTER
1187  if(present(position)) pos = position
1188  select case(pos)
1189  case (CENTER)
1190  nsend = domain%update_T%nsend
1191  nrecv = domain%update_T%nrecv
1192  case (CORNER)
1193  nsend = domain%update_C%nsend
1194  nrecv = domain%update_C%nrecv
1195  case (EAST)
1196  nsend = domain%update_E%nsend
1197  nrecv = domain%update_E%nrecv
1198  case (NORTH)
1199  nsend = domain%update_N%nsend
1200  nrecv = domain%update_N%nrecv
1201  case default
1202  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_size): invalid option of position")
1203  end select
1204 
1205  end subroutine mpp_get_update_size
1206 
1207  !#############################################################################
1208  subroutine mpp_get_update_pelist(domain, action, pelist, position)
1209  type(domain2d), intent(in) :: domain
1210  integer, intent(in) :: action
1211  integer, intent(inout) :: pelist(:)
1212  integer, optional, intent(in) :: position
1213  type(overlapSpec), pointer :: update => NULL()
1214  integer :: pos, p
1215 
1216  pos = CENTER
1217  if(present(position)) pos = position
1218  select case(pos)
1219  case (CENTER)
1220  update => domain%update_T
1221  case (CORNER)
1222  update => domain%update_C
1223  case (EAST)
1224  update => domain%update_E
1225  case (NORTH)
1226  update => domain%update_N
1227  case default
1228  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_pelist): invalid option of position")
1229  end select
1230 
1231  if(action == EVENT_SEND) then
1232  if(size(pelist) .NE. update%nsend) call mpp_error( FATAL, &
1233  "mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nsend")
1234  do p = 1, update%nsend
1235  pelist(p) = update%send(p)%pe
1236  enddo
1237  else if(action == EVENT_RECV) then
1238  if(size(pelist) .NE. update%nrecv) call mpp_error( FATAL, &
1239  "mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nrecv")
1240  do p = 1, update%nrecv
1241  pelist(p) = update%recv(p)%pe
1242  enddo
1243  else
1244  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_update_pelist): invalid option of action")
1245  end if
1246 
1247  end subroutine mpp_get_update_pelist
1248 
1249  !#############################################################################
1250  subroutine mpp_get_overlap(domain, action, p, is, ie, js, je, dir, rot, position)
1251  type(domain2d), intent(in) :: domain
1252  integer, intent(in) :: action
1253  integer, intent(in) :: p
1254  integer, dimension(:), intent(out) :: is, ie, js, je
1255  integer, dimension(:), intent(out) :: dir, rot
1256  integer, optional, intent(in) :: position
1257  type(overlapSpec), pointer :: update => NULL()
1258  type(overlap_type), pointer :: overlap => NULL()
1259  integer :: count, pos
1260 
1261  pos = CENTER
1262  if(present(position)) pos = position
1263  select case(pos)
1264  case (CENTER)
1265  update => domain%update_T
1266  case (CORNER)
1267  update => domain%update_C
1268  case (EAST)
1269  update => domain%update_E
1270  case (NORTH)
1271  update => domain%update_N
1272  case default
1273  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): invalid option of position")
1274  end select
1275 
1276  if(action == EVENT_SEND) then
1277  overlap => update%send(p)
1278  else if(action == EVENT_RECV) then
1279  overlap => update%recv(p)
1280  else
1281  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): invalid option of action")
1282  end if
1283 
1284  count = overlap%count
1285  if(size(is(:)) .NE. count .OR. size(ie (:)) .NE. count .OR. size(js (:)) .NE. count .OR. &
1286  size(je(:)) .NE. count .OR. size(dir(:)) .NE. count .OR. size(rot(:)) .NE. count ) &
1287  call mpp_error( FATAL, "mpp_domains_mod(mpp_get_overlap): size mismatch between number of overlap and array size")
1288 
1289  is = overlap%is (1:count)
1290  ie = overlap%ie (1:count)
1291  js = overlap%js (1:count)
1292  je = overlap%je (1:count)
1293  dir = overlap%dir (1:count)
1294  rot = overlap%rotation(1:count)
1295 
1296  update => NULL()
1297  overlap => NULL()
1298 
1299  end subroutine mpp_get_overlap
1300 
1301  !##################################################################
1302  function mpp_get_domain_name(domain)
1303  type(domain2d), intent(in) :: domain
1304  character(len=NAME_LENGTH) :: mpp_get_domain_name
1305 
1306  mpp_get_domain_name = domain%name
1307 
1308  end function mpp_get_domain_name
1309 
1310  !#################################################################
1311  function mpp_get_domain_root_pe(domain)
1312  type(domain2d), intent(in) :: domain
1313  integer :: mpp_get_domain_root_pe
1314 
1315  mpp_get_domain_root_pe = domain%list(0)%pe
1316 
1317  end function mpp_get_domain_root_pe
1318  !#################################################################
1319  function mpp_get_domain_npes(domain)
1320  type(domain2d), intent(in) :: domain
1321  integer :: mpp_get_domain_npes
1322 
1323  mpp_get_domain_npes = size(domain%list(:))
1324 
1325  return
1326 
1327  end function mpp_get_domain_npes
1328 
1329  !################################################################
1330  subroutine mpp_get_domain_pelist(domain, pelist)
1331  type(domain2d), intent(in) :: domain
1332  integer, intent(out) :: pelist(:)
1333  integer :: p
1334 
1335  if(size(pelist(:)) .NE. size(domain%list(:)) ) then
1336  call mpp_error(FATAL, "mpp_get_domain_pelist: size(pelist(:)) .NE. size(domain%list(:)) ")
1337  endif
1338 
1339  do p = 0, size(domain%list(:))-1
1340  pelist(p+1) = domain%list(p)%pe
1341  enddo
1342 
1343  return
1344 
1345  end subroutine mpp_get_domain_pelist
1346 
1347  !#################################################################
1348  function mpp_get_io_domain_layout(domain)
1349  type(domain2d), intent(in) :: domain
1350  integer, dimension(2) :: mpp_get_io_domain_layout
1351 
1352  mpp_get_io_domain_layout = domain%io_layout
1353 
1354  end function mpp_get_io_domain_layout
1355 
1356  !################################################################
1357  function get_rank_send(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1358  type(domain2D), intent(in) :: domain
1359  type(overlapSpec), intent(in) :: overlap_x, overlap_y
1360  integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
1361  integer :: get_rank_send
1362  integer :: nlist, nsend_x, nsend_y
1363 
1364  nlist = size(domain%list(:))
1365  nsend_x = overlap_x%nsend
1366  nsend_y = overlap_y%nsend
1367  rank_x = nlist+1
1368  rank_y = nlist+1
1369  if(nsend_x>0) rank_x = overlap_x%send(1)%pe - domain%pe
1370  if(nsend_y>0) rank_y = overlap_y%send(1)%pe - domain%pe
1371  if(rank_x .LT. 0) rank_x = rank_x + nlist
1372  if(rank_y .LT. 0) rank_y = rank_y + nlist
1373  get_rank_send = min(rank_x, rank_y)
1374  ind_x = nsend_x + 1
1375  ind_y = nsend_y + 1
1376  if(get_rank_send < nlist+1) then
1377  if(nsend_x>0) ind_x = 1
1378  if(nsend_y>0) ind_y = 1
1379  endif
1380 
1381  end function get_rank_send
1382 
1383  !############################################################################
1384  function get_rank_recv(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1385  type(domain2D), intent(in) :: domain
1386  type(overlapSpec), intent(in) :: overlap_x, overlap_y
1387  integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
1388  integer :: get_rank_recv
1389  integer :: nlist, nrecv_x, nrecv_y
1390 
1391  nlist = size(domain%list(:))
1392  nrecv_x = overlap_x%nrecv
1393  nrecv_y = overlap_y%nrecv
1394  rank_x = -1
1395  rank_y = -1
1396  if(nrecv_x>0) then
1397  rank_x = overlap_x%recv(1)%pe - domain%pe
1398  if(rank_x .LE. 0) rank_x = rank_x + nlist
1399  endif
1400  if(nrecv_y>0) then
1401  rank_y = overlap_y%recv(1)%pe - domain%pe
1402  if(rank_y .LE. 0) rank_y = rank_y + nlist
1403  endif
1404  get_rank_recv = max(rank_x, rank_y)
1405  ind_x = nrecv_x + 1
1406  ind_y = nrecv_y + 1
1407  if(get_rank_recv < nlist+1) then
1408  if(nrecv_x>0) ind_x = 1
1409  if(nrecv_y>0) ind_y = 1
1410  endif
1411 
1412  end function get_rank_recv
1413 
1414  function get_vector_recv(domain, update_x, update_y, ind_x, ind_y, start_pos, pelist)
1415  type(domain2D), intent(in) :: domain
1416  type(overlapSpec), intent(in) :: update_x, update_y
1417  integer, intent(out) :: ind_x(:), ind_y(:)
1418  integer, intent(out) :: start_pos(:)
1419  integer, intent(out) :: pelist(:)
1420  integer :: nlist, nrecv_x, nrecv_y, ntot, n
1421  integer :: ix, iy, rank_x, rank_y, cur_pos
1422  integer :: get_vector_recv
1423 
1424  nlist = size(domain%list(:))
1425  nrecv_x = update_x%nrecv
1426  nrecv_y = update_y%nrecv
1427 
1428  ntot = nrecv_x + nrecv_y
1429 
1430  n = 1
1431  ix = 1
1432  iy = 1
1433  ind_x = -1
1434  ind_y = -1
1435  get_vector_recv = 0
1436  cur_pos = 0
1437  do while (n<=ntot)
1438  if(ix <= nrecv_x ) then
1439  rank_x = update_x%recv(ix)%pe-domain%pe
1440  if(rank_x .LE. 0) rank_x = rank_x + nlist
1441  else
1442  rank_x = -1
1443  endif
1444  if(iy <= nrecv_y ) then
1445  rank_y = update_y%recv(iy)%pe-domain%pe
1446  if(rank_y .LE. 0) rank_y = rank_y + nlist
1447  else
1448  rank_y = -1
1449  endif
1450  get_vector_recv = get_vector_recv + 1
1451  start_pos(get_vector_recv) = cur_pos
1452  if( rank_x == rank_y ) then
1453  n = n+2
1454  ind_x (get_vector_recv) = ix
1455  ind_y (get_vector_recv) = iy
1456  cur_pos = cur_pos + update_x%recv(ix)%totsize + update_y%recv(iy)%totsize
1457  pelist(get_vector_recv) = update_x%recv(ix)%pe
1458  ix = ix + 1
1459  iy = iy + 1
1460  else if ( rank_x > rank_y ) then
1461  n = n+1
1462  ind_x (get_vector_recv) = ix
1463  ind_y (get_vector_recv) = -1
1464  cur_pos = cur_pos + update_x%recv(ix)%totsize
1465  pelist(get_vector_recv) = update_x%recv(ix)%pe
1466  ix = ix + 1
1467  else if ( rank_y > rank_x ) then
1468  n = n+1
1469  ind_x (get_vector_recv) = -1
1470  ind_y (get_vector_recv) = iy
1471  cur_pos = cur_pos + update_y%recv(iy)%totsize
1472  pelist(get_vector_recv) = update_y%recv(iy)%pe
1473  iy = iy+1
1474  endif
1475  end do
1476 
1477 
1478  end function get_vector_recv
1479 
1480  function get_vector_send(domain, update_x, update_y, ind_x, ind_y, start_pos, pelist)
1481  type(domain2D), intent(in) :: domain
1482  type(overlapSpec), intent(in) :: update_x, update_y
1483  integer, intent(out) :: ind_x(:), ind_y(:)
1484  integer, intent(out) :: start_pos(:)
1485  integer, intent(out) :: pelist(:)
1486  integer :: nlist, nsend_x, nsend_y, ntot, n
1487  integer :: ix, iy, rank_x, rank_y, cur_pos
1488  integer :: get_vector_send
1489 
1490  nlist = size(domain%list(:))
1491  nsend_x = update_x%nsend
1492  nsend_y = update_y%nsend
1493 
1494  ntot = nsend_x + nsend_y
1495  n = 1
1496  ix = 1
1497  iy = 1
1498  ind_x = -1
1499  ind_y = -1
1500  get_vector_send = 0
1501  cur_pos = 0
1502  do while (n<=ntot)
1503  if(ix <= nsend_x ) then
1504  rank_x = update_x%send(ix)%pe-domain%pe
1505  if(rank_x .LT. 0) rank_x = rank_x + nlist
1506  else
1507  rank_x = nlist+1
1508  endif
1509  if(iy <= nsend_y ) then
1510  rank_y = update_y%send(iy)%pe-domain%pe
1511  if(rank_y .LT. 0) rank_y = rank_y + nlist
1512  else
1513  rank_y = nlist+1
1514  endif
1515  get_vector_send = get_vector_send + 1
1516  start_pos(get_vector_send) = cur_pos
1517 
1518  if( rank_x == rank_y ) then
1519  n = n+2
1520  ind_x (get_vector_send) = ix
1521  ind_y (get_vector_send) = iy
1522  cur_pos = cur_pos + update_x%send(ix)%totsize + update_y%send(iy)%totsize
1523  pelist (get_vector_send) = update_x%send(ix)%pe
1524  ix = ix + 1
1525  iy = iy + 1
1526  else if ( rank_x < rank_y ) then
1527  n = n+1
1528  ind_x (get_vector_send) = ix
1529  ind_y (get_vector_send) = -1
1530  cur_pos = cur_pos + update_x%send(ix)%totsize
1531  pelist (get_vector_send) = update_x%send(ix)%pe
1532  ix = ix + 1
1533  else if ( rank_y < rank_x ) then
1534  n = n+1
1535  ind_x (get_vector_send) = -1
1536  ind_y (get_vector_send) = iy
1537  cur_pos = cur_pos + update_y%send(iy)%totsize
1538  pelist (get_vector_send) = update_y%send(iy)%pe
1539  iy = iy+1
1540  endif
1541  end do
1542 
1543 
1544  end function get_vector_send
1545 
1546 
1547  !############################################################################
1548  function get_rank_unpack(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1549  type(domain2D), intent(in) :: domain
1550  type(overlapSpec), intent(in) :: overlap_x, overlap_y
1551  integer, intent(out) :: rank_x, rank_y, ind_x, ind_y
1552  integer :: get_rank_unpack
1553  integer :: nlist, nrecv_x, nrecv_y
1554 
1555  nlist = size(domain%list(:))
1556  nrecv_x = overlap_x%nrecv
1557  nrecv_y = overlap_y%nrecv
1558 
1559  rank_x = nlist+1
1560  rank_y = nlist+1
1561  if(nrecv_x>0) rank_x = overlap_x%recv(nrecv_x)%pe - domain%pe
1562  if(nrecv_y>0) rank_y = overlap_y%recv(nrecv_y)%pe - domain%pe
1563  if(rank_x .LE.0) rank_x = rank_x + nlist
1564  if(rank_y .LE.0) rank_y = rank_y + nlist
1565 
1566  get_rank_unpack = min(rank_x, rank_y)
1567  ind_x = 0
1568  ind_y = 0
1569  if(get_rank_unpack < nlist+1) then
1570  if(nrecv_x >0) ind_x = nrecv_x
1571  if(nrecv_y >0) ind_y = nrecv_y
1572  endif
1573 
1574  end function get_rank_unpack
1575 
1576  function get_mesgsize(overlap, do_dir)
1577  type(overlap_type), intent(in) :: overlap
1578  logical, intent(in) :: do_dir(:)
1579  integer :: get_mesgsize
1580  integer :: n, dir
1581 
1582  get_mesgsize = 0
1583  do n = 1, overlap%count
1584  dir = overlap%dir(n)
1585  if(do_dir(dir)) then
1586  get_mesgsize = get_mesgsize + overlap%msgsize(n)
1587  end if
1588  end do
1589 
1590  end function get_mesgsize
1591 
1592  !#############################################################################
1593  subroutine mpp_set_domain_symmetry(domain, symmetry)
1594  type(domain2D), intent(inout) :: domain
1595  logical, intent(in ) :: symmetry
1596 
1597  domain%symmetry = symmetry
1598 
1599  end subroutine mpp_set_domain_symmetry
1600 
1601 
1602  subroutine mpp_copy_domain1D(domain_in, domain_out)
1603  type(domain1D), intent(in) :: domain_in
1604  type(domain1D), intent(inout) :: domain_out
1605 
1606  domain_out%compute = domain_in%compute
1607  domain_out%data = domain_in%data
1609  domain_out%memory = domain_in%memory
1613 
1614  end subroutine mpp_copy_domain1D
1615 
1616  !#################################################################
1617  !z1l: This is not fully implemented. The current purpose is to make
1618  ! it work in read_data.
1619  subroutine mpp_copy_domain2D(domain_in, domain_out)
1620  type(domain2D), intent(in) :: domain_in
1621  type(domain2D), intent(inout) :: domain_out
1622 
1623  integer :: n, ntiles
1624 
1627  domain_out%fold = domain_in%fold
1629  domain_out%symmetry = domain_in%symmetry
1630  domain_out%whalo = domain_in%whalo
1631  domain_out%ehalo = domain_in%ehalo
1632  domain_out%shalo = domain_in%shalo
1633  domain_out%nhalo = domain_in%nhalo
1635  domain_out%max_ntile_pe = domain_in%max_ntile_pe
1636  domain_out%ncontacts = domain_in%ncontacts
1637  domain_out%rotated_ninety = domain_in%rotated_ninety
1638  domain_out%initialized = domain_in%initialized
1639  domain_out%tile_root_pe = domain_in%tile_root_pe
1642 
1643  ntiles = size(domain_in%x(:))
1644  allocate(domain_out%x(ntiles), domain_out%y(ntiles), domain_out%tile_id(ntiles) )
1645  do n = 1, ntiles
1646  call mpp_copy_domain1D(domain_in%x(n), domain_out%x(n))
1647  call mpp_copy_domain1D(domain_in%y(n), domain_out%y(n))
1648  enddo
1649  domain_out%tile_id = domain_in%tile_id
1650 
1651  return
1652 
1653  end subroutine mpp_copy_domain2D
1654 
1655  !######################################################################
1656  subroutine set_group_update(group, domain)
1657  type(mpp_group_update_type), intent(inout) :: group
1658  type(domain2D), intent(inout) :: domain
1659  integer :: nscalar, nvector, nlist
1660  integer :: nsend, nrecv, nsend_old, nrecv_old
1661  integer :: nsend_s, nsend_x, nsend_y
1662  integer :: nrecv_s, nrecv_x, nrecv_y
1663  integer :: update_buffer_pos, tot_recv_size, tot_send_size
1664  integer :: msgsize_s, msgsize_x, msgsize_y, msgsize
1665  logical :: recv_s(8), send_s(8)
1666  logical :: recv_x(8), send_x(8), recv_y(8), send_y(8)
1667  integer :: ntot, n, l, m, ksize
1668  integer :: i_s, i_x, i_y, rank_s, rank_x, rank_y, rank
1669  integer :: ind_s(3*MAXOVERLAP)
1670  integer :: ind_x(3*MAXOVERLAP)
1671  integer :: ind_y(3*MAXOVERLAP)
1672  integer :: pelist(3*MAXOVERLAP), to_pe_list(3*MAXOVERLAP)
1673  integer :: buffer_pos_recv(3*MAXOVERLAP), buffer_pos_send(3*MAXOVERLAP)
1674  integer :: recv_size(3*MAXOVERLAP), send_size(3*MAXOVERLAP)
1675  integer :: position_x, position_y, npack, nunpack, dir
1676  integer :: pack_buffer_pos, unpack_buffer_pos
1677  integer :: omp_get_num_threads, nthreads
1678  character(len=8) :: text
1679  type(overlap_type), pointer :: overPtr => NULL()
1680  type(overlapSpec), pointer :: update_s => NULL()
1681  type(overlapSpec), pointer :: update_x => NULL()
1682  type(overlapSpec), pointer :: update_y => NULL()
1683 
1684  nscalar = group%nscalar
1685  nvector = group%nvector
1686 
1687  !--- get the overlap data type
1688  select case(group%gridtype)
1689  case (AGRID)
1690  position_x = CENTER
1691  position_y = CENTER
1692  case (BGRID_NE, BGRID_SW)
1693  position_x = CORNER
1694  position_y = CORNER
1695  case (CGRID_NE, CGRID_SW)
1696  position_x = EAST
1697  position_y = NORTH
1698  case (DGRID_NE, DGRID_SW)
1699  position_x = NORTH
1700  position_y = EAST
1701  case default
1702  call mpp_error(FATAL, "set_group_update: invalid value of gridtype")
1703  end select
1704  if(nscalar>0) then
1705  update_s => search_update_overlap(domain, group%whalo_s, group%ehalo_s, &
1706  group%shalo_s, group%nhalo_s, group%position)
1707  endif
1708  if(nvector>0) then
1709  update_x => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
1710  group%shalo_v, group%nhalo_v, position_x)
1711  update_y => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
1712  group%shalo_v, group%nhalo_v, position_y)
1713  endif
1714 
1715  if(nscalar > 0) then
1716  recv_s = group%recv_s
1717  send_s = recv_s
1718  endif
1719  if(nvector > 0) then
1720  recv_x = group%recv_x
1721  send_x = recv_x
1722  recv_y = group%recv_y
1723  send_y = recv_y
1724  end if
1725  nlist = size(domain%list(:))
1726  group%initialized = .true.
1727  nsend_s = 0; nsend_x = 0; nsend_y = 0
1728  nrecv_s = 0; nrecv_x = 0; nrecv_y = 0
1729 
1730  if(nscalar > 0) then
1731  !--- This check could not be done because of memory domain
1732 ! if( group%isize_s .NE. (group%ie_s-group%is_s+1) .OR. group%jsize_s .NE. (group%je_s-group%js_s+1)) &
1733 ! call mpp_error(FATAL, "set_group_update: mismatch of size of the field and domain memory domain")
1734  nsend_s = update_s%nsend
1735  nrecv_s = update_s%nrecv
1736  endif
1737 
1738  !--- ksize_s must equal ksize_v
1739  if(nvector > 0 .AND. nscalar > 0) then
1740  if(group%ksize_s .NE. group%ksize_v) then
1741  call mpp_error(FATAL, "set_group_update: ksize_s and ksize_v are not equal")
1742  endif
1743  ksize = group%ksize_s
1744  else if (nscalar > 0) then
1745  ksize = group%ksize_s
1746  else if (nvector > 0) then
1747  ksize = group%ksize_v
1748  else
1749  call mpp_error(FATAL, "set_group_update: nscalar and nvector are all 0")
1750  endif
1751 
1752  nthreads = 1
1753 !$OMP PARALLEL
1754 !$ nthreads = omp_get_num_threads()
1755 !$OMP END PARALLEL
1756  if( nthreads > nthread_control_loop ) then
1757  group%k_loop_inside = .FALSE.
1758  else
1759  group%k_loop_inside = .TRUE.
1760  endif
1761 
1762  if(nvector > 0) then
1763  !--- This check could not be done because of memory domain
1764 ! if( group%isize_x .NE. (group%ie_x-group%is_x+1) .OR. group%jsize_x .NE. (group%je_x-group%js_x+1)) &
1765 ! call mpp_error(FATAL, "set_group_update: mismatch of size of the fieldx and domain memory domain")
1766 ! if( group%isize_y .NE. (group%ie_y-group%is_y+1) .OR. group%jsize_y .NE. (group%je_y-group%js_y+1)) &
1767 ! call mpp_error(FATAL, "set_group_update: mismatch of size of the fieldy and domain memory domain")
1768  nsend_x = update_x%nsend
1769  nrecv_x = update_x%nrecv
1770  nsend_y = update_y%nsend
1771  nrecv_y = update_y%nrecv
1772  endif
1773 
1774  !figure out message size for each processor.
1775  ntot = nrecv_s + nrecv_x + nrecv_y
1776  if(ntot > 3*MAXOVERLAP) call mpp_error(FATAL, "set_group_update: ntot is greater than 3*MAXOVERLAP")
1777  n = 1
1778  i_s = 1
1779  i_x = 1
1780  i_y = 1
1781  ind_s = -1
1782  ind_x = -1
1783  ind_y = -1
1784  nrecv = 0
1785  do while(n<=ntot)
1786  if( i_s <= nrecv_s ) then
1787  rank_s = update_s%recv(i_s)%pe-domain%pe
1788  if(rank_s .LE. 0) rank_s = rank_s + nlist
1789  else
1790  rank_s = -1
1791  endif
1792  if( i_x <= nrecv_x ) then
1793  rank_x = update_x%recv(i_x)%pe-domain%pe
1794  if(rank_x .LE. 0) rank_x = rank_x + nlist
1795  else
1796  rank_x = -1
1797  endif
1798  if( i_y <= nrecv_y ) then
1799  rank_y = update_y%recv(i_y)%pe-domain%pe
1800  if(rank_y .LE. 0) rank_y = rank_y + nlist
1801  else
1802  rank_y = -1
1803  endif
1804  nrecv = nrecv + 1
1805  rank = maxval((/rank_s, rank_x, rank_y/))
1806  if(rank == rank_s) then
1807  n = n + 1
1808  ind_s(nrecv) = i_s
1809  pelist(nrecv) = update_s%recv(i_s)%pe
1810  i_s = i_s + 1
1811  endif
1812  if(rank == rank_x) then
1813  n = n + 1
1814  ind_x(nrecv) = i_x
1815  pelist(nrecv) = update_x%recv(i_x)%pe
1816  i_x = i_x + 1
1817  endif
1818  if(rank == rank_y) then
1819  n = n + 1
1820  ind_y(nrecv) = i_y
1821  pelist(nrecv) = update_y%recv(i_y)%pe
1822  i_y = i_y + 1
1823  endif
1824  enddo
1825 
1826  nrecv_old = nrecv
1827  nrecv = 0
1828  update_buffer_pos = 0
1829  tot_recv_size = 0
1830 
1831  !--- setup for recv
1832  do l = 1, nrecv_old
1833  msgsize_s = 0
1834  msgsize_x = 0
1835  msgsize_y = 0
1836  m = ind_s(l)
1837  if(m>0) msgsize_s = get_mesgsize(update_s%recv(m), recv_s)*ksize*nscalar
1838  m = ind_x(l)
1839  if(m>0) msgsize_x = get_mesgsize(update_x%recv(m), recv_x)*ksize*nvector
1840  m = ind_y(l)
1841  if(m>0) msgsize_y = get_mesgsize(update_y%recv(m), recv_y)*ksize*nvector
1842  msgsize = msgsize_s + msgsize_x + msgsize_y
1843  if( msgsize.GT.0 )then
1844  tot_recv_size = tot_recv_size + msgsize
1845  nrecv = nrecv + 1
1846  if(nrecv > MAXOVERLAP) then
1847  call mpp_error(FATAL, "set_group_update: nrecv is greater than MAXOVERLAP, increase MAXOVERLAP")
1848  endif
1849  group%from_pe(nrecv) = pelist(l)
1850  group%recv_size(nrecv) = msgsize
1851  group%buffer_pos_recv(nrecv) = update_buffer_pos
1852  update_buffer_pos = update_buffer_pos + msgsize
1853  end if
1854  end do
1855  group%nrecv = nrecv
1856 
1857  !--- setup for unpack
1858  nunpack = 0
1859  unpack_buffer_pos = 0
1860  do l = 1, nrecv_old
1861  m = ind_s(l)
1862  if(m>0) then
1863  overptr => update_s%recv(m)
1864  do n = 1, overptr%count
1865  dir = overptr%dir(n)
1866  if(recv_s(dir)) then
1867  nunpack = nunpack + 1
1868  if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
1869  "set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 1")
1870  group%unpack_type(nunpack) = FIELD_S
1871  group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1872  group%unpack_rotation(nunpack) = overptr%rotation(n)
1873  group%unpack_is(nunpack) = overptr%is(n)
1874  group%unpack_ie(nunpack) = overptr%ie(n)
1875  group%unpack_js(nunpack) = overptr%js(n)
1876  group%unpack_je(nunpack) = overptr%je(n)
1877  group%unpack_size(nunpack) = overptr%msgsize(n)*nscalar
1878  unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
1879  end if
1880  end do
1881  end if
1882 
1883  m = ind_x(l)
1884  if(m>0) then
1885  overptr => update_x%recv(m)
1886  do n = 1, overptr%count
1887  dir = overptr%dir(n)
1888  if(recv_x(dir)) then
1889  nunpack = nunpack + 1
1890  if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
1891  "set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 2")
1892  group%unpack_type(nunpack) = FIELD_X
1893  group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1894  group%unpack_rotation(nunpack) = overptr%rotation(n)
1895  group%unpack_is(nunpack) = overptr%is(n)
1896  group%unpack_ie(nunpack) = overptr%ie(n)
1897  group%unpack_js(nunpack) = overptr%js(n)
1898  group%unpack_je(nunpack) = overptr%je(n)
1899  group%unpack_size(nunpack) = overptr%msgsize(n)*nvector
1900  unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
1901  end if
1902  end do
1903  end if
1904 
1905  m = ind_y(l)
1906  if(m>0) then
1907  overptr => update_y%recv(m)
1908  do n = 1, overptr%count
1909  dir = overptr%dir(n)
1910  if(recv_y(dir)) then
1911  nunpack = nunpack + 1
1912  if(nunpack > MAXOVERLAP) call mpp_error(FATAL, &
1913  "set_group_update: nunpack is greater than MAXOVERLAP, increase MAXOVERLAP 3")
1914  group%unpack_type(nunpack) = FIELD_Y
1915  group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1916  group%unpack_rotation(nunpack) = overptr%rotation(n)
1917  group%unpack_is(nunpack) = overptr%is(n)
1918  group%unpack_ie(nunpack) = overptr%ie(n)
1919  group%unpack_js(nunpack) = overptr%js(n)
1920  group%unpack_je(nunpack) = overptr%je(n)
1921  group%unpack_size(nunpack) = overptr%msgsize(n)*nvector
1922  unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*ksize
1923  end if
1924  end do
1925  end if
1926  end do
1927  group%nunpack = nunpack
1928 
1929  if(update_buffer_pos .NE. unpack_buffer_pos ) call mpp_error(FATAL, &
1930  "set_group_update: update_buffer_pos .NE. unpack_buffer_pos")
1931 
1932  !figure out message size for each processor.
1933  ntot = nsend_s + nsend_x + nsend_y
1934  n = 1
1935  i_s = 1
1936  i_x = 1
1937  i_y = 1
1938  ind_s = -1
1939  ind_x = -1
1940  ind_y = -1
1941  nsend = 0
1942  do while(n<=ntot)
1943  if( i_s <= nsend_s ) then
1944  rank_s = update_s%send(i_s)%pe-domain%pe
1945  if(rank_s .LT. 0) rank_s = rank_s + nlist
1946  else
1947  rank_s = nlist+1
1948  endif
1949  if( i_x <= nsend_x ) then
1950  rank_x = update_x%send(i_x)%pe-domain%pe
1951  if(rank_x .LT. 0) rank_x = rank_x + nlist
1952  else
1953  rank_x = nlist+1
1954  endif
1955  if( i_y <= nsend_y ) then
1956  rank_y = update_y%send(i_y)%pe-domain%pe
1957  if(rank_y .LT. 0) rank_y = rank_y + nlist
1958  else
1959  rank_y = nlist+1
1960  endif
1961  nsend = nsend + 1
1962  rank = minval((/rank_s, rank_x, rank_y/))
1963  if(rank == rank_s) then
1964  n = n + 1
1965  ind_s(nsend) = i_s
1966  pelist(nsend) = update_s%send(i_s)%pe
1967  i_s = i_s + 1
1968  endif
1969  if(rank == rank_x) then
1970  n = n + 1
1971  ind_x(nsend) = i_x
1972  pelist(nsend) = update_x%send(i_x)%pe
1973  i_x = i_x + 1
1974  endif
1975  if(rank == rank_y) then
1976  n = n + 1
1977  ind_y(nsend) = i_y
1978  pelist(nsend) = update_y%send(i_y)%pe
1979  i_y = i_y + 1
1980  endif
1981  enddo
1982 
1983  nsend_old = nsend
1984  nsend = 0
1985  tot_send_size = 0
1986  do l = 1, nsend_old
1987  msgsize_s = 0
1988  msgsize_x = 0
1989  msgsize_y = 0
1990  m = ind_s(l)
1991  if(m>0) msgsize_s = get_mesgsize(update_s%send(m), send_s)*ksize*nscalar
1992  m = ind_x(l)
1993  if(m>0) msgsize_x = get_mesgsize(update_x%send(m), send_x)*ksize*nvector
1994  m = ind_y(l)
1995  if(m>0) msgsize_y = get_mesgsize(update_y%send(m), send_y)*ksize*nvector
1996  msgsize = msgsize_s + msgsize_x + msgsize_y
1997  if( msgsize.GT.0 )then
1998  tot_send_size = tot_send_size + msgsize
1999  nsend = nsend + 1
2000  if(nsend > MAXOVERLAP) then
2001  call mpp_error(FATAL, "set_group_update: nsend is greater than MAXOVERLAP, increase MAXOVERLAP")
2002  endif
2003  send_size(nsend) = msgsize
2004  group%to_pe(nsend) = pelist(l)
2005  group%buffer_pos_send(nsend) = update_buffer_pos
2006  group%send_size(nsend) = msgsize
2007  update_buffer_pos = update_buffer_pos + msgsize
2008  end if
2009  end do
2010  group%nsend = nsend
2011 
2012  !--- setup for pack
2013  npack = 0
2014  pack_buffer_pos = unpack_buffer_pos
2015  do l = 1, nsend_old
2016  m = ind_s(l)
2017  if(m>0) then
2018  overptr => update_s%send(m)
2019  do n = 1, overptr%count
2020  dir = overptr%dir(n)
2021  if(send_s(dir)) then
2022  npack = npack + 1
2023  if(npack > MAXOVERLAP) call mpp_error(FATAL, &
2024  "set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 1")
2025  group%pack_type(npack) = FIELD_S
2026  group%pack_buffer_pos(npack) = pack_buffer_pos
2027  group%pack_rotation(npack) = overptr%rotation(n)
2028  group%pack_is(npack) = overptr%is(n)
2029  group%pack_ie(npack) = overptr%ie(n)
2030  group%pack_js(npack) = overptr%js(n)
2031  group%pack_je(npack) = overptr%je(n)
2032  group%pack_size(npack) = overptr%msgsize(n)*nscalar
2033  pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
2034  end if
2035  end do
2036  end if
2037 
2038  m = ind_x(l)
2039  if(m>0) then
2040  overptr => update_x%send(m)
2041  do n = 1, overptr%count
2042  dir = overptr%dir(n)
2043  !--- nonsym_edge update is not for rotation of 90 or -90 degree ( cubic sphere grid )
2044  if( group%nonsym_edge .and. (overptr%rotation(n)==NINETY .or. &
2045  overptr%rotation(n)==MINUS_NINETY) ) then
2046  call mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE is not compatible '// &
2047  'with 90 or -90 degree rotation (normally cubic sphere grid' )
2048  endif
2049  if(send_x(dir)) then
2050  npack = npack + 1
2051  if(npack > MAXOVERLAP) call mpp_error(FATAL, &
2052  "set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 2")
2053  group%pack_type(npack) = FIELD_X
2054  group%pack_buffer_pos(npack) = pack_buffer_pos
2055  group%pack_rotation(npack) = overptr%rotation(n)
2056  group%pack_is(npack) = overptr%is(n)
2057  group%pack_ie(npack) = overptr%ie(n)
2058  group%pack_js(npack) = overptr%js(n)
2059  group%pack_je(npack) = overptr%je(n)
2060  group%pack_size(npack) = overptr%msgsize(n)*nvector
2061  pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
2062  end if
2063  end do
2064  end if
2065 
2066  m = ind_y(l)
2067  if(m>0) then
2068  overptr => update_y%send(m)
2069  do n = 1, overptr%count
2070  dir = overptr%dir(n)
2071  if( group%nonsym_edge .and. (overptr%rotation(n)==NINETY .or. &
2072  overptr%rotation(n)==MINUS_NINETY) ) then
2073  call mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE is not compatible '// &
2074  'with 90 or -90 degree rotation (normally cubic sphere grid' )
2075  endif
2076  if(send_y(dir)) then
2077  npack = npack + 1
2078  if(npack > MAXOVERLAP) call mpp_error(FATAL, &
2079  "set_group_update: npack is greater than MAXOVERLAP, increase MAXOVERLAP 3")
2080  group%pack_type(npack) = FIELD_Y
2081  group%pack_buffer_pos(npack) = pack_buffer_pos
2082  group%pack_rotation(npack) = overptr%rotation(n)
2083  group%pack_is(npack) = overptr%is(n)
2084  group%pack_ie(npack) = overptr%ie(n)
2085  group%pack_js(npack) = overptr%js(n)
2086  group%pack_je(npack) = overptr%je(n)
2087  group%pack_size(npack) = overptr%msgsize(n)*nvector
2088  pack_buffer_pos = pack_buffer_pos + group%pack_size(npack)*ksize
2089  end if
2090  end do
2091  end if
2092  end do
2093  group%npack = npack
2094  if(update_buffer_pos .NE. pack_buffer_pos ) call mpp_error(FATAL, &
2095  "set_group_update: update_buffer_pos .NE. pack_buffer_pos")
2096 
2097  !--- make sure the buffer is large enough
2098  mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, tot_recv_size+tot_send_size )
2099 
2101  write( text,'(i8)' )mpp_domains_stack_hwm
2102  call mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '// &
2103  'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
2104  end if
2105 
2106  group%tot_msgsize = tot_recv_size+tot_send_size
2107 
2108 end subroutine set_group_update
2109 
2110 
2111 !######################################################################
2112  subroutine mpp_clear_group_update(group)
2113  type(mpp_group_update_type), intent(inout) :: group
2114 
2115  group%nscalar = 0
2116  group%nvector = 0
2117  group%nsend = 0
2118  group%nrecv = 0
2119  group%npack = 0
2120  group%nunpack = 0
2121  group%initialized = .false.
2122 
2123  end subroutine mpp_clear_group_update
2124 
2125 !#####################################################################
2126  function mpp_group_update_initialized(group)
2127  type(mpp_group_update_type), intent(in) :: group
2128  logical :: mpp_group_update_initialized
2129 
2130  mpp_group_update_initialized = group%initialized
2131 
2132  end function mpp_group_update_initialized
2133 
2134 !#####################################################################
2135  function mpp_group_update_is_set(group)
2136  type(mpp_group_update_type), intent(in) :: group
2137  logical :: mpp_group_update_is_set
2138 
2139  mpp_group_update_is_set = (group%nscalar > 0 .OR. group%nvector > 0)
2140 
2141  end function mpp_group_update_is_set
2142 
2143 
2144 
integer mpp_domains_stack_hwm
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
l_size ! loop over number of fields ke do je do ie to PE
subroutine left(x1, y1, z1, x2, y2, z2, x0, y0, z0, output)
character(len=1), parameter equal
integer, private je
Definition: fms_io.F90:494
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz ksize
integer, parameter recv
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
subroutine, public read_data(clim_type, src_field, hdata, nt, i, Time)
read_data receives various climate data as inputs and returns a horizontally interpolated climatology...
character(len=256) text
Definition: mpp_io.F90:1051
integer(long), parameter true
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_DO_REDISTRIBUTE_3D_(f_in, f_out, d_comm, d_type) integer(LONG_KIND), intent(in) ::f_in(:), f_out(:) type(DomainCommunicator2D), intent(in) ::d_comm MPP_TYPE_, intent(in) ::d_type MPP_TYPE_ ::field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end, d_comm%ke) pointer(ptr_field_in, field_in) MPP_TYPE_ ::field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end, d_comm%ke) pointer(ptr_field_out, field_out) type(domain2D), pointer ::domain_in, domain_out integer ::i, j, k, l, n, l_size integer ::is, ie, js, je integer ::ke integer ::list, pos, msgsize integer ::to_pe, from_pe MPP_TYPE_ ::buffer(size(mpp_domains_stack(:))) pointer(ptr, buffer) integer ::buffer_pos, wordlen, errunit!fix ke errunit=stderr() l_size=size(f_out(:)) ! equal to size(f_in(:)) ke=d_comm%ke domain_in=> d_comm domain_in
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
real(r8), dimension(cast_m, cast_n) p
integer(long), parameter false
from from_pe
integer, parameter send
character(len=32) name
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
logical module_is_initialized
l_size ! loop over number of fields ke do je do ie to to_pe
integer, parameter m
character(len=128) version
real(double), parameter zero
logical debug
Definition: mpp.F90:1297
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
integer error
Definition: mpp.F90:1310
l_size ! loop over number of fields ke do je do ie to je msgsize
integer, private isg
Definition: fms_io.F90:496
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> id
integer mpp_domains_stack_size
real(double), parameter one
integer, dimension(:), pointer io_layout
integer pack_size
Definition: diag_data.F90:749
logical function received(this, seqno)
integer, save, private isc
Definition: oda_core.F90:124
type(field_def), target, save root
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, save, private jsc
Definition: oda_core.F90:124
integer, private jsg
Definition: fms_io.F90:496
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
#define max(a, b)
Definition: mosaic_util.h:33
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> dimension(MAX_DOMAIN_FIELDS)
real(kind_real), parameter bound
Definition: type_diag.F90:29
integer, parameter, public cyclic
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
l_size ! loop over number of fields ke do je do ie pos
update nsend overPtr
integer, dimension(:), pointer layout
#define min(a, b)
Definition: mosaic_util.h:32
integer nthread_control_loop
real per
Longitude of perihelion with respect to autumnal equinox in NH [degrees].
Definition: astronomy.F90:345
subroutine, public some(xmap, some_arr, grid_id)
Definition: xgrid.F90:3421
integer, pointer ntiles
l_size ! loop over number of fields ke do je do ie to js
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin