3 !***********************************************************************
4 !* GNU Lesser General Public License
6 !* This file
is part of the GFDL Flexible Modeling System (FMS).
8 !* FMS
is free software: you can redistribute it and/or modify it under
9 !* the terms of the GNU Lesser General Public License as published by
10 !* the Free Software Foundation, either
version 3 of the License, or (at
11 !* your option) any later
version.
13 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
14 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 !* You should have
received a copy of the GNU Lesser General Public
19 !* License along with FMS. If
not, see <http:
20 !***********************************************************************
22 ! <SUBROUTINE NAME=
"mpp_domains_set_stack_size">
24 ! Set user stack
size.
27 ! This sets the
size of an array that
is used
for internal storage by
28 ! <TT>mpp_domains</TT>. This array
is used,
for instance, to buffer the
29 ! data sent and
received in halo updates.
31 ! This call has implied
global synchronization. It should be
32 ! placed somewhere where all PEs can call it.
35 ! call mpp_domains_set_stack_size(
n)
37 ! <IN NAME=
"n" TYPE=
"integer"></IN>
39 subroutine mpp_domains_set_stack_size(
n)
40 !
set the mpp_domains_stack variable to be at least
n LONG words
long 49 if( allocated(mpp_domains_stack) )deallocate(mpp_domains_stack)
50 allocate( mpp_domains_stack(
n) )
51 if( allocated(mpp_domains_stack_nonblock) )deallocate(mpp_domains_stack_nonblock)
52 allocate( mpp_domains_stack_nonblock(
n) )
57 if( mpp_pe().EQ.mpp_root_pe() )call
mpp_error( NOTE,
'MPP_DOMAINS_SET_STACK_SIZE: stack size set to ' 60 end subroutine mpp_domains_set_stack_size
63 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
65 ! MPP_DOMAINS: overloaded operators (==, /=) !
67 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
69 function mpp_domain1D_eq(
a,
b )
70 logical :: mpp_domain1D_eq
71 type(domain1D), intent(in) ::
a,
b 73 mpp_domain1D_eq = (
a%compute%
begin.EQ.
b%compute%
begin .AND. &
74 a%compute%
end .EQ.
b%compute%
end .AND. &
80 !
if( mpp_domain1D_eq )mpp_domain1D_eq = ASSOCIATED(
a%list) .AND. ASSOCIATED(
b%list)
81 !
if( mpp_domain1D_eq )mpp_domain1D_eq =
size(
a%list(:)).EQ.
size(
b%list(:))
82 !
if( mpp_domain1D_eq )mpp_domain1D_eq = ALL(
a%list%
pe.EQ.
b%list%
pe)
85 end function mpp_domain1D_eq
87 function mpp_domain1D_ne(
a,
b )
88 logical :: mpp_domain1D_ne
89 type(domain1D), intent(in) ::
a,
b 91 mpp_domain1D_ne = .NOT. (
a.EQ.
b )
93 end function mpp_domain1D_ne
95 function mpp_domain2D_eq(
a,
b )
96 logical :: mpp_domain2D_eq
97 type(domain2D), intent(in) ::
a,
b 100 mpp_domain2d_eq =
size(
a%x(:)) .EQ.
size(
b%x(:))
103 if(mpp_domain2d_eq) mpp_domain2D_eq =
a%x(
n).EQ.
b%x(
n) .AND.
a%y(
n).EQ.
b%y(
n)
106 if( mpp_domain2D_eq .AND. ((
a%
pe.EQ.NULL_PE).OR.(
b%
pe.EQ.NULL_PE)) )return !NULL_DOMAIN2D
108 if( mpp_domain2D_eq )mpp_domain2D_eq = ASSOCIATED(
a%list) .AND. ASSOCIATED(
b%list)
109 if( mpp_domain2D_eq )mpp_domain2D_eq =
size(
a%list(:)).EQ.
size(
b%list(:))
110 if( mpp_domain2D_eq )mpp_domain2D_eq = ALL(
a%list%
pe.EQ.
b%list%
pe)
112 if( mpp_domain2D_eq )mpp_domain2D_eq =
a%symmetry .eqv.
b%symmetry
115 end function mpp_domain2D_eq
117 !#####################################################################
119 function mpp_domain2D_ne(
a,
b )
120 logical :: mpp_domain2D_ne
121 type(domain2D), intent(in) ::
a,
b 123 mpp_domain2D_ne = .NOT. (
a.EQ.
b )
125 end function mpp_domain2D_ne
127 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
129 ! MPP_GET and SET routiness: retrieve various components of domains !
131 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
133 subroutine mpp_get_compute_domain1D( domain,
begin,
end,
size, max_size, is_global )
134 type(domain1D), intent(in) :: domain
136 logical, intent(
out), optional :: is_global
141 if( PRESENT(max_size) )max_size = domain%compute%max_size
142 if( PRESENT(is_global) )is_global = domain%compute%is_global
144 end subroutine mpp_get_compute_domain1D
146 !#####################################################################
147 subroutine mpp_get_data_domain1D( domain,
begin,
end,
size, max_size, is_global )
148 type(domain1D), intent(in) :: domain
150 logical, intent(
out), optional :: is_global
155 if( PRESENT(max_size) )max_size = domain%data%max_size
156 if( PRESENT(is_global) )is_global = domain%data%is_global
158 end subroutine mpp_get_data_domain1D
160 !#####################################################################
161 subroutine mpp_get_global_domain1D( domain,
begin,
end,
size, max_size )
162 type(domain1D), intent(in) :: domain
168 if( PRESENT(max_size) )max_size = domain%
global%max_size
170 end subroutine mpp_get_global_domain1D
172 !#####################################################################
173 subroutine mpp_get_memory_domain1D( domain,
begin,
end,
size, max_size, is_global )
174 type(domain1D), intent(in) :: domain
176 logical, intent(
out), optional :: is_global
181 if( PRESENT(max_size) )max_size = domain%memory%max_size
182 if( PRESENT(is_global) )is_global = domain%memory%is_global
184 end subroutine mpp_get_memory_domain1D
186 !#####################################################################
187 subroutine mpp_get_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
188 x_is_global, y_is_global, tile_count, position )
189 type(domain2D), intent(in) :: domain
190 integer, intent(
out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
191 logical, intent(
out), optional :: x_is_global, y_is_global
192 integer, intent(in), optional :: tile_count, position
196 if(present(tile_count))
tile = tile_count
198 call mpp_get_compute_domain( domain%x(
tile), xbegin, xend, xsize, xmax_size, x_is_global )
199 call mpp_get_compute_domain( domain%y(
tile), ybegin, yend, ysize, ymax_size, y_is_global )
200 call mpp_get_domain_shift( domain, ishift, jshift, position )
201 if( PRESENT(xend) ) xend = xend + ishift
202 if( PRESENT(yend) ) yend = yend + jshift
203 if( PRESENT(xsize)) xsize = xsize + ishift
204 if( PRESENT(ysize)) ysize = ysize + jshift
205 if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
206 if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
209 end subroutine mpp_get_compute_domain2D
211 !#####################################################################
212 subroutine mpp_get_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
213 x_is_global, y_is_global, tile_count, position )
214 type(domain2D), intent(in) :: domain
215 integer, intent(
out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
216 logical, intent(
out), optional :: x_is_global, y_is_global
217 integer, intent(in), optional :: tile_count, position
221 if(present(tile_count))
tile = tile_count
223 call mpp_get_data_domain( domain%x(
tile), xbegin, xend, xsize, xmax_size, x_is_global )
224 call mpp_get_data_domain( domain%y(
tile), ybegin, yend, ysize, ymax_size, y_is_global )
225 call mpp_get_domain_shift( domain, ishift, jshift, position )
226 if( PRESENT(xend) ) xend = xend + ishift
227 if( PRESENT(yend) ) yend = yend + jshift
228 if( PRESENT(xsize)) xsize = xsize + ishift
229 if( PRESENT(ysize)) ysize = ysize + jshift
230 if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
231 if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
234 end subroutine mpp_get_data_domain2D
236 !#####################################################################
237 subroutine mpp_get_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
238 tile_count, position )
239 type(domain2D), intent(in) :: domain
240 integer, intent(
out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
241 integer, intent(in), optional :: tile_count, position
245 if(present(tile_count))
tile = tile_count
247 call mpp_get_global_domain( domain%x(
tile), xbegin, xend, xsize, xmax_size )
248 call mpp_get_global_domain( domain%y(
tile), ybegin, yend, ysize, ymax_size )
249 call mpp_get_domain_shift( domain, ishift, jshift, position )
250 if( PRESENT(xend) ) xend = xend + ishift
251 if( PRESENT(yend) ) yend = yend + jshift
252 if( PRESENT(xsize)) xsize = xsize + ishift
253 if( PRESENT(ysize)) ysize = ysize + jshift
254 if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
255 if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
258 end subroutine mpp_get_global_domain2D
260 !#####################################################################
261 subroutine mpp_get_memory_domain2D( domain, xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size, &
262 x_is_global, y_is_global, position)
263 type(domain2D), intent(in) :: domain
264 integer, intent(
out), optional :: xbegin, xend, ybegin, yend, xsize, xmax_size, ysize, ymax_size
265 logical, intent(
out), optional :: x_is_global, y_is_global
266 integer, intent(in), optional :: position
271 call mpp_get_memory_domain( domain%x(
tile), xbegin, xend, xsize, xmax_size, x_is_global )
272 call mpp_get_memory_domain( domain%y(
tile), ybegin, yend, ysize, ymax_size, y_is_global )
273 call mpp_get_domain_shift( domain, ishift, jshift, position )
274 if( PRESENT(xend) ) xend = xend + ishift
275 if( PRESENT(yend) ) yend = yend + jshift
276 if( PRESENT(xsize)) xsize = xsize + ishift
277 if( PRESENT(ysize)) ysize = ysize + jshift
278 if(PRESENT(xmax_size))xmax_size = xmax_size + ishift
279 if(PRESENT(ymax_size))ymax_size = ymax_size + jshift
282 end subroutine mpp_get_memory_domain2D
284 !#####################################################################
285 subroutine mpp_set_compute_domain1D( domain,
begin,
end,
size, is_global )
286 type(domain1D), intent(inout) :: domain
288 logical, intent(in), optional :: is_global
293 if(present(is_global)) domain%compute%is_global = is_global
295 end subroutine mpp_set_compute_domain1D
297 !#####################################################################
298 subroutine mpp_set_compute_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
299 x_is_global, y_is_global, tile_count )
300 type(domain2D), intent(inout) :: domain
301 integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
302 logical, intent(in), optional :: x_is_global, y_is_global
303 integer, intent(in), optional :: tile_count
307 if(present(tile_count))
tile = tile_count
309 call mpp_set_compute_domain(domain%x(
tile), xbegin, xend, xsize, x_is_global)
310 call mpp_set_compute_domain(domain%y(
tile), ybegin, yend, ysize, y_is_global)
312 end subroutine mpp_set_compute_domain2D
314 !#####################################################################
315 subroutine mpp_set_data_domain1D( domain,
begin,
end,
size, is_global )
316 type(domain1D), intent(inout) :: domain
318 logical, intent(in), optional :: is_global
323 if(present(is_global)) domain%data%is_global = is_global
325 end subroutine mpp_set_data_domain1D
327 !#####################################################################
328 subroutine mpp_set_data_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, &
329 x_is_global, y_is_global, tile_count )
330 type(domain2D), intent(inout) :: domain
331 integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
332 logical, intent(in), optional :: x_is_global, y_is_global
333 integer, intent(in), optional :: tile_count
337 if(present(tile_count))
tile = tile_count
339 call mpp_set_data_domain(domain%x(
tile), xbegin, xend, xsize, x_is_global)
340 call mpp_set_data_domain(domain%y(
tile), ybegin, yend, ysize, y_is_global)
342 end subroutine mpp_set_data_domain2D
344 !#####################################################################
345 subroutine mpp_set_global_domain1D( domain,
begin,
end,
size)
346 type(domain1D), intent(inout) :: domain
353 end subroutine mpp_set_global_domain1D
355 !#####################################################################
356 subroutine mpp_set_global_domain2D( domain, xbegin, xend, ybegin, yend, xsize, ysize, tile_count )
357 type(domain2D), intent(inout) :: domain
358 integer, intent(in), optional :: xbegin, xend, ybegin, yend, xsize, ysize
359 integer, intent(in), optional :: tile_count
363 if(present(tile_count))
tile = tile_count
364 call mpp_set_global_domain(domain%x(
tile), xbegin, xend, xsize)
365 call mpp_set_global_domain(domain%y(
tile), ybegin, yend, ysize)
367 end subroutine mpp_set_global_domain2D
369 !#####################################################################
370 ! <SUBROUTINE NAME=
"mpp_get_domain_components">
372 ! Retrieve 1D components of 2D decomposition.
375 ! It
is sometime necessary to have direct recourse to the domain1D types
376 ! that compose
a domain2D object. This call retrieves them.
379 ! call mpp_get_domain_components( domain, x, y )
381 ! <IN NAME=
"domain" TYPE=
"type(domain2D)"></IN>
382 ! <OUT NAME=
"x,y" TYPE=
"type(domain1D)"></OUT>
384 subroutine mpp_get_domain_components( domain, x, y, tile_count )
385 type(domain2D), intent(in) :: domain
386 type(domain1D), intent(inout), optional :: x, y
387 integer, intent(in), optional :: tile_count
391 if(present(tile_count))
tile = tile_count
392 if( PRESENT(x) )x = domain%x(
tile)
393 if( PRESENT(y) )y = domain%y(
tile)
395 end subroutine mpp_get_domain_components
397 !#####################################################################
398 subroutine mpp_get_compute_domains1D( domain,
begin,
end,
size )
399 type(domain1D), intent(in) :: domain
403 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
404 !we use shape instead of
size for
error checks because
size is used as an argument
406 if( any(shape(
begin).NE.shape(domain%list)) ) &
407 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: begin array size does not match domain.' )
410 if( PRESENT(
end) )then
411 if( any(shape(
end).NE.shape(domain%list)) ) &
412 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: end array size does not match domain.' )
413 end(:) = domain%list(:)%compute%
end 416 if( any(shape(
size).NE.shape(domain%list)) ) &
417 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: size array size does not match domain.' )
418 size(:) = domain%list(:)%compute%
size 421 end subroutine mpp_get_compute_domains1D
423 !#####################################################################
424 subroutine mpp_get_compute_domains2D( domain, xbegin, xend, xsize, ybegin, yend, ysize, position )
425 type(domain2D), intent(in) :: domain
427 integer, intent(in ), optional :: position
431 call mpp_get_domain_shift( domain, ishift, jshift, position )
435 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: must first call mpp_domains_init.' )
437 if( PRESENT(xbegin) )then
438 if(
size(xbegin(:)).NE.
size(domain%list(:)) ) &
439 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: xbegin array size does not match domain.' )
440 do
i = 1,
size(xbegin(:))
441 xbegin(
i) = domain%list(
i-1)%x(1)%compute%
begin 444 if( PRESENT(xend) )then
446 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: xend array size does not match domain.' )
447 do
i = 1,
size(xend(:))
448 xend(
i) = domain%list(
i-1)%x(1)%compute%
end + ishift
451 if( PRESENT(xsize) )then
452 if(
size(xsize(:)).NE.
size(domain%list(:)) ) &
453 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: xsize array size does not match domain.' )
454 do
i = 1,
size(xsize(:))
455 xsize(
i) = domain%list(
i-1)%x(1)%compute%
size + ishift
458 if( PRESENT(ybegin) )then
459 if(
size(ybegin(:)).NE.
size(domain%list(:)) ) &
460 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: ybegin array size does not match domain.' )
461 do
i = 1,
size(ybegin(:))
462 ybegin(
i) = domain%list(
i-1)%y(1)%compute%
begin 465 if( PRESENT(yend) )then
467 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: yend array size does not match domain.' )
468 do
i = 1,
size(yend(:))
469 yend(
i) = domain%list(
i-1)%y(1)%compute%
end + jshift
472 if( PRESENT(ysize) )then
473 if(
size(ysize(:)).NE.
size(domain%list(:)) ) &
474 call
mpp_error( FATAL,
'MPP_GET_COMPUTE_DOMAINS: ysize array size does not match domain.' )
475 do
i = 1,
size(ysize(:))
476 ysize(
i) = domain%list(
i-1)%y(1)%compute%
size + jshift
480 end subroutine mpp_get_compute_domains2D
482 !#####################################################################
483 subroutine mpp_get_domain_extents1D(domain, xextent, yextent)
484 type(domain2d), intent(in) :: domain
488 if(domain%
ntiles .NE. 1) call
mpp_error(FATAL,
"mpp_domains_util.inc(mpp_get_domain_extents1D): " 489 "ntiles is more than 1, please use mpp_get_domain_extents2D")
490 if(
size(xextent) .NE.
size(domain%x(1)%list(:))) call
mpp_error(FATAL,
"mpp_domains_util.inc(mpp_get_domain_extents1D): " 491 "size(xextent) does not equal to size(domain%x(1)%list(:)))")
492 if(
size(yextent) .NE.
size(domain%y(1)%list(:))) call
mpp_error(FATAL,
"mpp_domains_util.inc(mpp_get_domain_extents1D): " 493 "size(yextent) does not equal to size(domain%y(1)%list(:)))")
494 do
n = 0,
size(domain%x(1)%list(:))-1
495 xextent(
n) = domain%x(1)%list(
n)%compute%
size 497 do
n = 0,
size(domain%y(1)%list(:))-1
498 yextent(
n) = domain%y(1)%list(
n)%compute%
size 501 end subroutine mpp_get_domain_extents1D
503 !#####################################################################
504 ! This
will return xextent and yextent for each
tile 505 subroutine mpp_get_domain_extents2D(domain, xextent, yextent)
506 type(domain2d), intent(in) :: domain
511 nlist =
size(domain%list(:))
513 "mpp_domains_utile.inc: the second dimension size of xextent/yextent is not correct")
514 ndivx =
size(xextent,1); ndivy =
size(yextent,1)
522 xextent = 0; yextent=0
525 do
m = 1,
size(domain%list(
n)%tile_id(:))
526 tile = domain%list(
n)%tile_id(
m)
535 end subroutine mpp_get_domain_extents2D
537 !
##################################################################### 538 function mpp_get_domain_pe(domain)
539 type(domain2d), intent(in) :: domain
542 mpp_get_domain_pe = domain%
pe 545 end function mpp_get_domain_pe
548 function mpp_get_domain_tile_root_pe(domain)
549 type(domain2d), intent(in) :: domain
550 integer :: mpp_get_domain_tile_root_pe
552 mpp_get_domain_tile_root_pe = domain%tile_root_pe
554 end function mpp_get_domain_tile_root_pe
556 function mpp_get_io_domain(domain)
557 type(domain2d), intent(in) :: domain
558 type(domain2d), pointer :: mpp_get_io_domain
560 if(ASSOCIATED(domain%io_domain)) then
561 mpp_get_io_domain => domain%io_domain
563 mpp_get_io_domain =>
NULL()
566 end function mpp_get_io_domain
568 !#####################################################################
569 ! <SUBROUTINE NAME=
"mpp_get_pelist1D" INTERFACE=
"mpp_get_pelist">
570 ! <IN NAME=
"domain" TYPE=
"type(domain1D)"></IN>
571 ! <OUT NAME=
"pelist" TYPE=
"integer" DIM=
"(:)"></OUT>
572 ! <OUT NAME=
"pos" TYPE=
"integer"></OUT>
574 subroutine mpp_get_pelist1D( domain,
pelist,
pos )
575 type(domain1D), intent(in) :: domain
581 call
mpp_error( FATAL,
'MPP_GET_PELIST: must first call mpp_domains_init.' )
582 ndivs =
size(domain%list(:))
585 call
mpp_error( FATAL,
'MPP_GET_PELIST: pelist array size does not match domain.' )
587 pelist(:) = domain%list(0:ndivs-1)%
pe 590 end subroutine mpp_get_pelist1D
592 !#####################################################################
593 ! <SUBROUTINE NAME=
"mpp_get_pelist2D" INTERFACE=
"mpp_get_pelist">
594 ! <IN NAME=
"domain" TYPE=
"type(domain2D)"></IN>
595 ! <OUT NAME=
"pelist" TYPE=
"integer" DIM=
"(:)"></OUT>
596 ! <OUT NAME=
"pos" TYPE=
"integer"></OUT>
598 subroutine mpp_get_pelist2D( domain,
pelist,
pos )
599 type(domain2D), intent(in) :: domain
604 call
mpp_error( FATAL,
'MPP_GET_PELIST: must first call mpp_domains_init.' )
606 call
mpp_error( FATAL,
'MPP_GET_PELIST: pelist array size does not match domain.' )
611 end subroutine mpp_get_pelist2D
613 !#####################################################################
614 ! <SUBROUTINE NAME=
"mpp_get_layout1D" INTERFACE=
"mpp_get_layout">
615 ! <IN NAME=
"domain" TYPE=
"type(domain1D)"></IN>
616 ! <OUT NAME=
"layout" TYPE=
"integer"></OUT>
618 subroutine mpp_get_layout1D( domain,
layout )
619 type(domain1D), intent(in) :: domain
623 call
mpp_error( FATAL,
'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
627 end subroutine mpp_get_layout1D
629 !#####################################################################
630 ! <SUBROUTINE NAME=
"mpp_get_layout2D" INTERFACE=
"mpp_get_layout">
631 ! <IN NAME=
"domain" TYPE=
"type(domain2D)"></IN>
632 ! <OUT NAME=
"layout" TYPE=
"integer" DIM=
"(2)"></OUT>
634 subroutine mpp_get_layout2D( domain,
layout )
635 type(domain2D), intent(in) :: domain
639 call
mpp_error( FATAL,
'MPP_GET_LAYOUT: must first call mpp_domains_init.' )
644 end subroutine mpp_get_layout2D
646 !#####################################################################
647 ! <SUBROUTINE NAME=
"mpp_get_domain_shift">
649 ! Returns the shift value in x and y-direction according to domain position..
652 ! When domain
is symmetry,
one extra point maybe needed in
653 ! x- and/or y-direction. This routine
will return the shift value based
657 ! call mpp_get_domain_shift( domain, ishift, jshift, position )
659 ! <IN NAME=
"domain" TYPE=
"type(domain2D)">
660 ! predefined data contains 2-
d domain decomposition.
662 ! <OUT NAME=
"ishift, jshift" TYPE=
"integer">
663 ! return value
will be 0 or 1.
665 ! <IN NAME=
"position" TYPE=
"integer">
666 ! position of data. Its value can be CENTER, EAST, NORTH or CORNER.
669 subroutine mpp_get_domain_shift(domain, ishift, jshift, position)
670 type(domain2D), intent(in) :: domain
672 integer, optional, intent(in) :: position
675 ishift = 0 ; jshift = 0
677 if(present(position))
pos = position
679 if(domain%symmetry) then ! shift
is non-
zero only when the domain
is symmetry.
682 ishift = 1; jshift = 1
690 end subroutine mpp_get_domain_shift
692 !#####################################################################
694 subroutine mpp_get_neighbor_pe_1d(domain, direction,
pe)
696 ! Return
PE to the righ/
left of
this PE-domain.
698 type(domain1D), intent(inout) :: domain
699 integer, intent(in) :: direction
708 select
case (direction)
711 ! neighbor on the
left 726 ! neighbor on the right
728 if(ipos2 >
npx-1) then
738 if(ipos2 >= 0)
pe = domain%list(ipos2)%
pe 740 end subroutine mpp_get_neighbor_pe_1d
741 !#####################################################################
743 subroutine mpp_get_neighbor_pe_2d(domain, direction,
pe)
745 ! Return
PE North/South/East/West of this
PE-domain.
746 ! direction must be NORTH, SOUTH, EAST or WEST.
748 type(domain2D), intent(inout) :: domain
749 integer, intent(in) :: direction
757 ipos0 = domain%x(1)%
pos 758 jpos0 = domain%y(1)%
pos 760 select
case (direction)
788 &
'MPP_GET_NEIGHBOR_PE_2D: direction must be either NORTH, ' &
796 if( (ipos < 0 .or. ipos >
npx-1) .and. domain%x(1)%
cyclic ) then
798 ipos = modulo(ipos,
npx)
801 if( (ipos < 0 .and. btest(domain%fold,WEST)) .or. &
802 & (ipos >
npx-1 .and. btest(domain%fold,EAST)) ) then
803 ! E or W folded domain
808 if( (jpos < 0 .or. jpos >
npy-1) .and. domain%y(1)%
cyclic ) then
810 jpos = modulo(jpos,
npy)
813 if( (jpos < 0 .and. btest(domain%fold,SOUTH)) .or. &
814 & (jpos >
npy-1 .and. btest(domain%fold,NORTH)) ) then
822 if(ipos >= 0 .and. ipos <=
npx-1 .and. jpos >= 0 .and. jpos <=
npy-1) then
823 pe = domain%pearray(ipos, jpos)
827 end subroutine mpp_get_neighbor_pe_2d
830 !#######################################################################
832 subroutine nullify_domain2d_list(domain)
833 type(domain2d), intent(inout) :: domain
837 end subroutine nullify_domain2d_list
839 !#######################################################################
840 function mpp_domain_is_symmetry(domain)
841 type(domain2d), intent(in) :: domain
842 logical :: mpp_domain_is_symmetry
844 mpp_domain_is_symmetry = domain%symmetry
847 end function mpp_domain_is_symmetry
849 !#######################################################################
850 function mpp_domain_is_initialized(domain)
851 type(domain2d), intent(in) :: domain
852 logical :: mpp_domain_is_initialized
854 mpp_domain_is_initialized = domain%initialized
858 end function mpp_domain_is_initialized
860 !#######################################################################
861 !--- private routine used only for mpp_update_domains. This routine
will 862 !--- compare whalo, ehalo, shalo, nhalo with the halo
size when defining
"domain" 863 !--- to decide
if update
is needed. Also it
check the sign of whalo, ehalo, shalo and nhalo.
864 function domain_update_is_needed(domain, whalo, ehalo, shalo, nhalo)
865 type(domain2d), intent(in) :: domain
866 integer, intent(in) :: whalo, ehalo, shalo, nhalo
867 logical :: domain_update_is_needed
869 domain_update_is_needed = .
true.
871 if(whalo == 0 .AND. ehalo==0 .AND. shalo == 0 .AND. nhalo==0 ) then
872 domain_update_is_needed = .
false.
874 'mpp_domains_util.inc: halo size to be updated are all zero, no update will be done')
877 if( (whalo == -domain%whalo .AND. domain%whalo .NE. 0) .or. &
878 (ehalo == -domain%ehalo .AND. domain%ehalo .NE. 0) .or. &
879 (shalo == -domain%shalo .AND. domain%shalo .NE. 0) .or. &
880 (nhalo == -domain%nhalo .AND. domain%nhalo .NE. 0) ) then
881 domain_update_is_needed = .
false.
882 call
mpp_error(NOTE,
'mpp_domains_util.inc: at least one of w/e/s/n halo size to be updated ' 883 'is the inverse of the original halo when defining domain, no update will be done')
887 end function domain_update_is_needed
888 !#######################################################################
889 ! this routine found the domain has the same halo
size with the input
891 function search_update_overlap(domain, whalo, ehalo, shalo, nhalo, position)
892 type(domain2d), intent(inout) :: domain
893 integer, intent(in) :: whalo, ehalo, shalo, nhalo
894 integer, intent(in) :: position
895 type(overlapSpec), pointer :: search_update_overlap
896 type(overlapSpec), pointer :: update_ref
898 integer :: ishift, jshift, shift
900 shift = 0;
if(domain%symmetry) shift = 1
901 select
case(position)
903 update_ref => domain%update_T
904 ishift = 0; jshift = 0
906 update_ref => domain%update_C
907 ishift = shift; jshift = shift
909 update_ref => domain%update_N
910 ishift = 0; jshift = shift
912 update_ref => domain%update_E
913 ishift = shift; jshift = 0
915 call
mpp_error(FATAL,
"mpp_domains_util.inc(search_update_overlap): position should be CENTER|CORNER|EAST|NORTH")
918 search_update_overlap => update_ref
921 if(whalo == search_update_overlap%whalo .AND. ehalo == search_update_overlap%ehalo .AND. &
922 shalo == search_update_overlap%shalo .AND. nhalo == search_update_overlap%nhalo ) then
925 !---
if not found, switch to next
926 if(.NOT. ASSOCIATED(search_update_overlap%next)) then
927 allocate(search_update_overlap%next)
928 search_update_overlap => search_update_overlap%next
929 if(domain%fold .NE. 0) then
930 call compute_overlaps(domain, position, search_update_overlap,
check, &
931 ishift, jshift, 0, 0, whalo, ehalo, shalo, nhalo)
933 call set_overlaps(domain, update_ref, search_update_overlap, whalo, ehalo, shalo, nhalo )
937 search_update_overlap => search_update_overlap%next
944 end function search_update_overlap
946 !
####################################################################### 947 !
this routine found the
check at certain position
948 function search_check_overlap(domain, position)
949 type(domain2d), intent(in) :: domain
950 integer, intent(in) :: position
951 type(overlapSpec), pointer :: search_check_overlap
953 select
case(position)
955 search_check_overlap =>
NULL()
957 search_check_overlap => domain%check_C
959 search_check_overlap => domain%check_N
961 search_check_overlap => domain%check_E
963 call
mpp_error(FATAL,
"mpp_domains_util.inc(search_check_overlap): position should be CENTER|CORNER|EAST|NORTH")
966 end function search_check_overlap
968 !#######################################################################
969 ! this routine found the
bound at certain position
970 function search_bound_overlap(domain, position)
971 type(domain2d), intent(in) :: domain
972 integer, intent(in) :: position
973 type(overlapSpec), pointer :: search_bound_overlap
975 select
case(position)
977 search_bound_overlap =>
NULL()
979 search_bound_overlap => domain%bound_C
981 search_bound_overlap => domain%bound_N
983 search_bound_overlap => domain%bound_E
985 call
mpp_error(FATAL,
"mpp_domains_util.inc(search_bound_overlap): position should be CENTER|CORNER|EAST|NORTH")
988 end function search_bound_overlap
990 !########################################################################
991 ! return the tile_id on current
pe 992 function mpp_get_tile_id(domain)
993 type(domain2d), intent(in) :: domain
996 mpp_get_tile_id = domain%tile_id
999 end function mpp_get_tile_id
1001 !#######################################################################
1003 subroutine mpp_get_tile_list(domain, tiles)
1004 type(domain2d), intent(in) :: domain
1005 integer, intent(inout) :: tiles(:)
1008 if(
size(tiles(:)).NE.
size(domain%list(:)) ) &
1009 call
mpp_error( FATAL,
'mpp_get_tile_list: tiles array size does not match domain.' )
1010 do
i = 1,
size(tiles(:))
1012 'mpp_get_tile_list: only support one-tile-per-pe now, contact developer');
1013 tiles(
i) = domain%list(
i-1)%tile_id(1)
1016 end subroutine mpp_get_tile_list
1018 !
######################################################################## 1019 !
return number of tiles in mosaic
1020 function mpp_get_ntile_count(domain)
1021 type(domain2d), intent(in) :: domain
1022 integer :: mpp_get_ntile_count
1024 mpp_get_ntile_count = domain%
ntiles 1027 end function mpp_get_ntile_count
1029 !########################################################################
1030 ! return number of
tile on current
pe 1031 function mpp_get_current_ntile(domain)
1032 type(domain2d), intent(in) :: domain
1033 integer :: mpp_get_current_ntile
1035 mpp_get_current_ntile =
size(domain%tile_id(:))
1038 end function mpp_get_current_ntile
1040 !#######################################################################
1044 function mpp_domain_is_tile_root_pe(domain)
1045 type(domain2d), intent(in) :: domain
1046 logical :: mpp_domain_is_tile_root_pe
1048 mpp_domain_is_tile_root_pe = domain%
pe == domain%tile_root_pe;
1050 end function mpp_domain_is_tile_root_pe
1052 !#########################################################################
1053 !
return number of processors used on current
tile.
1054 function mpp_get_tile_npes(domain)
1055 type(domain2d), intent(in) :: domain
1060 !--- limited to this
pe.
1061 if(
size(domain%tile_id(:)) > 1) then
1062 mpp_get_tile_npes = 1
1064 mpp_get_tile_npes = 0
1065 tile = domain%tile_id(1)
1066 do
i = 0,
size(domain%list(:))-1
1067 if(
tile == domain%list(
i)%tile_id(1) ) mpp_get_tile_npes = mpp_get_tile_npes + 1
1071 end function mpp_get_tile_npes
1073 !########################################################################
1074 ! get the processors list used on current
tile.
1075 subroutine mpp_get_tile_pelist(domain,
pelist)
1076 type(domain2d), intent(in) :: domain
1081 npes_on_tile = mpp_get_tile_npes(domain)
1083 "mpp_domains_util.inc(mpp_get_tile_pelist): size(pelist) does not equal npes on current tile")
1084 tile = domain%tile_id(1)
1086 do
i = 0,
size(domain%list(:))-1
1087 if(
tile == domain%list(
i)%tile_id(1)) then
1095 end subroutine mpp_get_tile_pelist
1097 !#####################################################################
1098 subroutine mpp_get_tile_compute_domains( domain, xbegin, xend, ybegin, yend, position )
1099 type(domain2D), intent(in) :: domain
1101 integer, intent(in ), optional :: position
1106 call mpp_get_domain_shift( domain, ishift, jshift, position )
1110 call
mpp_error( FATAL,
'mpp_get_compute_domains2D: must first call mpp_domains_init.' )
1112 npes_on_tile = mpp_get_tile_npes(domain)
1114 "mpp_domains_util.inc(mpp_get_compute_domains2D): size(xbegin) does not equal npes on current tile")
1116 "mpp_domains_util.inc(mpp_get_compute_domains2D): size(xend) does not equal npes on current tile")
1118 "mpp_domains_util.inc(mpp_get_compute_domains2D): size(ybegin) does not equal npes on current tile")
1120 "mpp_domains_util.inc(mpp_get_compute_domains2D): size(yend) does not equal npes on current tile")
1122 tile = domain%tile_id(1)
1124 do
i = 0,
size(domain%list(:))-1
1125 if(
tile == domain%list(
i)%tile_id(1)) then
1127 xbegin(
pos) = domain%list(
i)%x(1)%compute%
begin 1128 xend (
pos) = domain%list(
i)%x(1)%compute%
end + ishift
1129 ybegin(
pos) = domain%list(
i)%y(1)%compute%
begin 1130 yend (
pos) = domain%list(
i)%y(1)%compute%
end + jshift
1136 end subroutine mpp_get_tile_compute_domains
1140 !#############################################################################
1141 function mpp_get_num_overlap(domain, action,
p, position)
1142 type(domain2d), intent(in) :: domain
1145 integer, optional, intent(in) :: position
1146 integer :: mpp_get_num_overlap
1147 type(overlapSpec), pointer :: update =>
NULL()
1151 if(present(position))
pos = position
1154 update => domain%update_T
1156 update => domain%update_C
1158 update => domain%update_E
1160 update => domain%update_N
1162 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_num_overlap): invalid option of position")
1165 if(action == EVENT_SEND) then
1167 "mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nsend")
1168 mpp_get_num_overlap = update%
send(
p)%count
1169 else
if(action == EVENT_RECV) then
1171 "mpp_domains_mod(mpp_get_num_overlap): p should be between 1 and update%nrecv")
1172 mpp_get_num_overlap = update%
recv(
p)%count
1174 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_num_overlap): invalid option of action")
1177 end function mpp_get_num_overlap
1179 !#############################################################################
1180 subroutine mpp_get_update_size(domain, nsend, nrecv, position)
1181 type(domain2d), intent(in) :: domain
1183 integer, optional, intent(in) :: position
1187 if(present(position))
pos = position
1190 nsend = domain%update_T%nsend
1191 nrecv = domain%update_T%nrecv
1193 nsend = domain%update_C%nsend
1194 nrecv = domain%update_C%nrecv
1196 nsend = domain%update_E%nsend
1197 nrecv = domain%update_E%nrecv
1199 nsend = domain%update_N%nsend
1200 nrecv = domain%update_N%nrecv
1202 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_update_size): invalid option of position")
1205 end subroutine mpp_get_update_size
1207 !#############################################################################
1208 subroutine mpp_get_update_pelist(domain, action,
pelist, position)
1209 type(domain2d), intent(in) :: domain
1212 integer, optional, intent(in) :: position
1213 type(overlapSpec), pointer :: update =>
NULL()
1217 if(present(position))
pos = position
1220 update => domain%update_T
1222 update => domain%update_C
1224 update => domain%update_E
1226 update => domain%update_N
1228 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_update_pelist): invalid option of position")
1231 if(action == EVENT_SEND) then
1233 "mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nsend")
1234 do
p = 1, update%nsend
1237 else
if(action == EVENT_RECV) then
1239 "mpp_domains_mod(mpp_get_update_pelist): size of pelist does not match update%nrecv")
1240 do
p = 1, update%nrecv
1244 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_update_pelist): invalid option of action")
1247 end subroutine mpp_get_update_pelist
1249 !#############################################################################
1250 subroutine mpp_get_overlap(domain, action,
p,
is,
ie,
js,
je, dir, rot, position)
1251 type(domain2d), intent(in) :: domain
1256 integer, optional, intent(in) :: position
1257 type(overlapSpec), pointer :: update =>
NULL()
1258 type(overlap_type), pointer :: overlap =>
NULL()
1262 if(present(position))
pos = position
1265 update => domain%update_T
1267 update => domain%update_C
1269 update => domain%update_E
1271 update => domain%update_N
1273 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_overlap): invalid option of position")
1276 if(action == EVENT_SEND) then
1277 overlap => update%
send(
p)
1278 else
if(action == EVENT_RECV) then
1279 overlap => update%
recv(
p)
1281 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_overlap): invalid option of action")
1284 count = overlap%count
1286 size(
je(:)) .NE. count .OR.
size(dir(:)) .NE. count .OR.
size(rot(:)) .NE. count ) &
1287 call
mpp_error( FATAL,
"mpp_domains_mod(mpp_get_overlap): size mismatch between number of overlap and array size")
1289 is = overlap%
is (1:count)
1290 ie = overlap%
ie (1:count)
1291 js = overlap%
js (1:count)
1292 je = overlap%
je (1:count)
1293 dir = overlap%dir (1:count)
1294 rot = overlap%rotation(1:count)
1299 end subroutine mpp_get_overlap
1301 !##################################################################
1302 function mpp_get_domain_name(domain)
1303 type(domain2d), intent(in) :: domain
1304 character(
len=NAME_LENGTH) :: mpp_get_domain_name
1306 mpp_get_domain_name = domain%
name 1308 end function mpp_get_domain_name
1310 !#################################################################
1311 function mpp_get_domain_root_pe(domain)
1312 type(domain2d), intent(in) :: domain
1313 integer :: mpp_get_domain_root_pe
1315 mpp_get_domain_root_pe = domain%list(0)%
pe 1317 end function mpp_get_domain_root_pe
1318 !#################################################################
1319 function mpp_get_domain_npes(domain)
1320 type(domain2d), intent(in) :: domain
1321 integer :: mpp_get_domain_npes
1323 mpp_get_domain_npes =
size(domain%list(:))
1327 end function mpp_get_domain_npes
1329 !################################################################
1330 subroutine mpp_get_domain_pelist(domain,
pelist)
1331 type(domain2d), intent(in) :: domain
1336 call
mpp_error(FATAL,
"mpp_get_domain_pelist: size(pelist(:)) .NE. size(domain%list(:)) ")
1339 do
p = 0,
size(domain%list(:))-1
1345 end subroutine mpp_get_domain_pelist
1347 !#################################################################
1348 function mpp_get_io_domain_layout(domain)
1349 type(domain2d), intent(in) :: domain
1352 mpp_get_io_domain_layout = domain%
io_layout 1354 end function mpp_get_io_domain_layout
1356 !################################################################
1357 function get_rank_send(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1358 type(domain2D), intent(in) :: domain
1359 type(overlapSpec), intent(in) :: overlap_x, overlap_y
1360 integer, intent(
out) :: rank_x, rank_y, ind_x, ind_y
1362 integer :: nlist, nsend_x, nsend_y
1364 nlist =
size(domain%list(:))
1365 nsend_x = overlap_x%nsend
1366 nsend_y = overlap_y%nsend
1369 if(nsend_x>0) rank_x = overlap_x%
send(1)%
pe - domain%
pe 1370 if(nsend_y>0) rank_y = overlap_y%
send(1)%
pe - domain%
pe 1371 if(rank_x .LT. 0) rank_x = rank_x + nlist
1372 if(rank_y .LT. 0) rank_y = rank_y + nlist
1373 get_rank_send =
min(rank_x, rank_y)
1376 if(get_rank_send < nlist+1) then
1377 if(nsend_x>0) ind_x = 1
1378 if(nsend_y>0) ind_y = 1
1381 end function get_rank_send
1383 !############################################################################
1384 function get_rank_recv(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1385 type(domain2D), intent(in) :: domain
1386 type(overlapSpec), intent(in) :: overlap_x, overlap_y
1387 integer, intent(
out) :: rank_x, rank_y, ind_x, ind_y
1389 integer :: nlist, nrecv_x, nrecv_y
1391 nlist =
size(domain%list(:))
1392 nrecv_x = overlap_x%nrecv
1393 nrecv_y = overlap_y%nrecv
1397 rank_x = overlap_x%
recv(1)%
pe - domain%
pe 1398 if(rank_x .LE. 0) rank_x = rank_x + nlist
1401 rank_y = overlap_y%
recv(1)%
pe - domain%
pe 1402 if(rank_y .LE. 0) rank_y = rank_y + nlist
1404 get_rank_recv =
max(rank_x, rank_y)
1407 if(get_rank_recv < nlist+1) then
1408 if(nrecv_x>0) ind_x = 1
1409 if(nrecv_y>0) ind_y = 1
1412 end function get_rank_recv
1414 function get_vector_recv(domain, update_x, update_y, ind_x, ind_y, start_pos,
pelist)
1415 type(domain2D), intent(in) :: domain
1416 type(overlapSpec), intent(in) :: update_x, update_y
1420 integer :: nlist, nrecv_x, nrecv_y, ntot,
n 1421 integer :: ix, iy, rank_x, rank_y, cur_pos
1424 nlist =
size(domain%list(:))
1425 nrecv_x = update_x%nrecv
1426 nrecv_y = update_y%nrecv
1428 ntot = nrecv_x + nrecv_y
1438 if(ix <= nrecv_x ) then
1439 rank_x = update_x%
recv(ix)%
pe-domain%
pe 1440 if(rank_x .LE. 0) rank_x = rank_x + nlist
1444 if(iy <= nrecv_y ) then
1445 rank_y = update_y%
recv(iy)%
pe-domain%
pe 1446 if(rank_y .LE. 0) rank_y = rank_y + nlist
1450 get_vector_recv = get_vector_recv + 1
1451 start_pos(get_vector_recv) = cur_pos
1452 if( rank_x == rank_y ) then
1454 ind_x (get_vector_recv) = ix
1455 ind_y (get_vector_recv) = iy
1456 cur_pos = cur_pos + update_x%
recv(ix)%totsize + update_y%
recv(iy)%totsize
1460 else
if ( rank_x > rank_y ) then
1462 ind_x (get_vector_recv) = ix
1463 ind_y (get_vector_recv) = -1
1464 cur_pos = cur_pos + update_x%
recv(ix)%totsize
1467 else
if ( rank_y > rank_x ) then
1469 ind_x (get_vector_recv) = -1
1470 ind_y (get_vector_recv) = iy
1471 cur_pos = cur_pos + update_y%
recv(iy)%totsize
1478 end function get_vector_recv
1480 function get_vector_send(domain, update_x, update_y, ind_x, ind_y, start_pos,
pelist)
1481 type(domain2D), intent(in) :: domain
1482 type(overlapSpec), intent(in) :: update_x, update_y
1486 integer :: nlist, nsend_x, nsend_y, ntot,
n 1487 integer :: ix, iy, rank_x, rank_y, cur_pos
1490 nlist =
size(domain%list(:))
1491 nsend_x = update_x%nsend
1492 nsend_y = update_y%nsend
1494 ntot = nsend_x + nsend_y
1503 if(ix <= nsend_x ) then
1504 rank_x = update_x%
send(ix)%
pe-domain%
pe 1505 if(rank_x .LT. 0) rank_x = rank_x + nlist
1509 if(iy <= nsend_y ) then
1510 rank_y = update_y%
send(iy)%
pe-domain%
pe 1511 if(rank_y .LT. 0) rank_y = rank_y + nlist
1515 get_vector_send = get_vector_send + 1
1516 start_pos(get_vector_send) = cur_pos
1518 if( rank_x == rank_y ) then
1520 ind_x (get_vector_send) = ix
1521 ind_y (get_vector_send) = iy
1522 cur_pos = cur_pos + update_x%
send(ix)%totsize + update_y%
send(iy)%totsize
1526 else
if ( rank_x < rank_y ) then
1528 ind_x (get_vector_send) = ix
1529 ind_y (get_vector_send) = -1
1530 cur_pos = cur_pos + update_x%
send(ix)%totsize
1533 else
if ( rank_y < rank_x ) then
1535 ind_x (get_vector_send) = -1
1536 ind_y (get_vector_send) = iy
1537 cur_pos = cur_pos + update_y%
send(iy)%totsize
1544 end function get_vector_send
1547 !############################################################################
1548 function get_rank_unpack(domain, overlap_x, overlap_y, rank_x, rank_y, ind_x, ind_y)
1549 type(domain2D), intent(in) :: domain
1550 type(overlapSpec), intent(in) :: overlap_x, overlap_y
1551 integer, intent(
out) :: rank_x, rank_y, ind_x, ind_y
1553 integer :: nlist, nrecv_x, nrecv_y
1555 nlist =
size(domain%list(:))
1556 nrecv_x = overlap_x%nrecv
1557 nrecv_y = overlap_y%nrecv
1561 if(nrecv_x>0) rank_x = overlap_x%
recv(nrecv_x)%
pe - domain%
pe 1562 if(nrecv_y>0) rank_y = overlap_y%
recv(nrecv_y)%
pe - domain%
pe 1563 if(rank_x .LE.0) rank_x = rank_x + nlist
1564 if(rank_y .LE.0) rank_y = rank_y + nlist
1566 get_rank_unpack =
min(rank_x, rank_y)
1569 if(get_rank_unpack < nlist+1) then
1570 if(nrecv_x >0) ind_x = nrecv_x
1571 if(nrecv_y >0) ind_y = nrecv_y
1574 end function get_rank_unpack
1576 function get_mesgsize(overlap, do_dir)
1577 type(overlap_type), intent(in) :: overlap
1578 logical, intent(in) :: do_dir(:)
1583 do
n = 1, overlap%count
1584 dir = overlap%dir(
n)
1585 if(do_dir(dir)) then
1586 get_mesgsize = get_mesgsize + overlap%
msgsize(
n)
1590 end function get_mesgsize
1592 !#############################################################################
1593 subroutine mpp_set_domain_symmetry(domain, symmetry)
1594 type(domain2D), intent(inout) :: domain
1595 logical, intent(in ) :: symmetry
1597 domain%symmetry = symmetry
1599 end subroutine mpp_set_domain_symmetry
1614 end subroutine mpp_copy_domain1D
1616 !#################################################################
1617 !z1l: This
is not fully implemented. The current purpose
is to make
1653 end subroutine mpp_copy_domain2D
1655 !######################################################################
1656 subroutine set_group_update(group, domain)
1657 type(mpp_group_update_type), intent(inout) :: group
1658 type(domain2D), intent(inout) :: domain
1659 integer :: nscalar, nvector, nlist
1660 integer :: nsend, nrecv, nsend_old, nrecv_old
1661 integer :: nsend_s, nsend_x, nsend_y
1662 integer :: nrecv_s, nrecv_x, nrecv_y
1663 integer :: update_buffer_pos, tot_recv_size, tot_send_size
1665 logical :: recv_s(8), send_s(8)
1666 logical :: recv_x(8), send_x(8), recv_y(8), send_y(8)
1668 integer :: i_s, i_x, i_y, rank_s, rank_x, rank_y, rank
1669 integer :: ind_s(3*MAXOVERLAP)
1670 integer :: ind_x(3*MAXOVERLAP)
1671 integer :: ind_y(3*MAXOVERLAP)
1673 integer :: buffer_pos_recv(3*MAXOVERLAP), buffer_pos_send(3*MAXOVERLAP)
1674 integer :: recv_size(3*MAXOVERLAP), send_size(3*MAXOVERLAP)
1675 integer :: position_x, position_y, npack, nunpack, dir
1676 integer :: pack_buffer_pos, unpack_buffer_pos
1677 integer :: omp_get_num_threads, nthreads
1680 type(overlapSpec), pointer :: update_s =>
NULL()
1681 type(overlapSpec), pointer :: update_x =>
NULL()
1682 type(overlapSpec), pointer :: update_y =>
NULL()
1684 nscalar = group%nscalar
1685 nvector = group%nvector
1687 !--- get the overlap data
type 1688 select
case(group%gridtype)
1692 case (BGRID_NE, BGRID_SW)
1695 case (CGRID_NE, CGRID_SW)
1698 case (DGRID_NE, DGRID_SW)
1702 call
mpp_error(FATAL,
"set_group_update: invalid value of gridtype")
1705 update_s => search_update_overlap(domain, group%whalo_s, group%ehalo_s, &
1706 group%shalo_s, group%nhalo_s, group%position)
1709 update_x => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
1710 group%shalo_v, group%nhalo_v, position_x)
1711 update_y => search_update_overlap(domain, group%whalo_v, group%ehalo_v, &
1712 group%shalo_v, group%nhalo_v, position_y)
1715 if(nscalar > 0) then
1716 recv_s = group%recv_s
1719 if(nvector > 0) then
1720 recv_x = group%recv_x
1722 recv_y = group%recv_y
1725 nlist =
size(domain%list(:))
1726 group%initialized = .
true.
1727 nsend_s = 0; nsend_x = 0; nsend_y = 0
1728 nrecv_s = 0; nrecv_x = 0; nrecv_y = 0
1730 if(nscalar > 0) then
1731 !--- This
check could
not be done because of memory domain
1732 !
if( group%isize_s .NE. (group%ie_s-group%is_s+1) .OR. group%jsize_s .NE. (group%je_s-group%js_s+1)) &
1733 ! call
mpp_error(FATAL,
"set_group_update: mismatch of size of the field and domain memory domain")
1734 nsend_s = update_s%nsend
1735 nrecv_s = update_s%nrecv
1738 !--- ksize_s must
equal ksize_v
1739 if(nvector > 0 .AND. nscalar > 0) then
1740 if(group%ksize_s .NE. group%ksize_v) then
1743 ksize = group%ksize_s
1744 else
if (nscalar > 0) then
1745 ksize = group%ksize_s
1746 else
if (nvector > 0) then
1747 ksize = group%ksize_v
1749 call
mpp_error(FATAL, "set_group_update: nscalar and nvector are all 0")
1754 !$ nthreads = omp_get_num_threads()
1757 group%k_loop_inside = .FALSE.
1759 group%k_loop_inside = .TRUE.
1762 if(nvector > 0) then
1763 !--- This
check could
not be done because of memory domain
1764 !
if( group%isize_x .NE. (group%ie_x-group%is_x+1) .OR. group%jsize_x .NE. (group%je_x-group%js_x+1)) &
1765 ! call
mpp_error(FATAL, "set_group_update: mismatch of
size of the fieldx and domain memory domain")
1766 !
if( group%isize_y .NE. (group%ie_y-group%is_y+1) .OR. group%jsize_y .NE. (group%je_y-group%js_y+1)) &
1767 ! call
mpp_error(FATAL, "set_group_update: mismatch of
size of the fieldy and domain memory domain")
1768 nsend_x = update_x%nsend
1769 nrecv_x = update_x%nrecv
1770 nsend_y = update_y%nsend
1771 nrecv_y = update_y%nrecv
1774 !figure
out message
size for each processor.
1775 ntot = nrecv_s + nrecv_x + nrecv_y
1776 if(ntot > 3*MAXOVERLAP) call
mpp_error(FATAL, "set_group_update: ntot
is greater than 3*MAXOVERLAP")
1786 if( i_s <= nrecv_s ) then
1787 rank_s = update_s%
recv(i_s)%
pe-domain%
pe 1788 if(rank_s .LE. 0) rank_s = rank_s + nlist
1792 if( i_x <= nrecv_x ) then
1793 rank_x = update_x%
recv(i_x)%
pe-domain%
pe 1794 if(rank_x .LE. 0) rank_x = rank_x + nlist
1798 if( i_y <= nrecv_y ) then
1799 rank_y = update_y%
recv(i_y)%
pe-domain%
pe 1800 if(rank_y .LE. 0) rank_y = rank_y + nlist
1805 rank = maxval((/rank_s, rank_x, rank_y/))
1806 if(rank == rank_s) then
1812 if(rank == rank_x) then
1818 if(rank == rank_y) then
1828 update_buffer_pos = 0
1837 if(
m>0) msgsize_s = get_mesgsize(update_s%
recv(
m), recv_s)*
ksize*nscalar
1839 if(
m>0) msgsize_x = get_mesgsize(update_x%
recv(
m), recv_x)*
ksize*nvector
1841 if(
m>0) msgsize_y = get_mesgsize(update_y%
recv(
m), recv_y)*
ksize*nvector
1842 msgsize = msgsize_s + msgsize_x + msgsize_y
1844 tot_recv_size = tot_recv_size +
msgsize 1846 if(nrecv > MAXOVERLAP) then
1847 call
mpp_error(FATAL, "set_group_update: nrecv
is greater than MAXOVERLAP, increase MAXOVERLAP")
1850 group%recv_size(nrecv) =
msgsize 1851 group%buffer_pos_recv(nrecv) = update_buffer_pos
1852 update_buffer_pos = update_buffer_pos +
msgsize 1857 !--- setup for unpack
1859 unpack_buffer_pos = 0
1863 overptr => update_s%
recv(
m)
1864 do
n = 1, overptr%count
1865 dir = overptr%dir(
n)
1866 if(recv_s(dir)) then
1867 nunpack = nunpack + 1
1869 "set_group_update: nunpack
is greater than MAXOVERLAP, increase MAXOVERLAP 1")
1870 group%unpack_type(nunpack) = FIELD_S
1871 group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1872 group%unpack_rotation(nunpack) = overptr%rotation(
n)
1873 group%unpack_is(nunpack) = overptr%
is(
n)
1874 group%unpack_ie(nunpack) = overptr%
ie(
n)
1875 group%unpack_js(nunpack) = overptr%
js(
n)
1876 group%unpack_je(nunpack) = overptr%
je(
n)
1877 group%unpack_size(nunpack) = overptr%
msgsize(
n)*nscalar
1878 unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*
ksize 1885 overptr => update_x%
recv(
m)
1886 do
n = 1, overptr%count
1887 dir = overptr%dir(
n)
1888 if(recv_x(dir)) then
1889 nunpack = nunpack + 1
1891 "set_group_update: nunpack
is greater than MAXOVERLAP, increase MAXOVERLAP 2")
1892 group%unpack_type(nunpack) = FIELD_X
1893 group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1894 group%unpack_rotation(nunpack) = overptr%rotation(
n)
1895 group%unpack_is(nunpack) = overptr%
is(
n)
1896 group%unpack_ie(nunpack) = overptr%
ie(
n)
1897 group%unpack_js(nunpack) = overptr%
js(
n)
1898 group%unpack_je(nunpack) = overptr%
je(
n)
1899 group%unpack_size(nunpack) = overptr%
msgsize(
n)*nvector
1900 unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*
ksize 1907 overptr => update_y%
recv(
m)
1908 do
n = 1, overptr%count
1909 dir = overptr%dir(
n)
1910 if(recv_y(dir)) then
1911 nunpack = nunpack + 1
1913 "set_group_update: nunpack
is greater than MAXOVERLAP, increase MAXOVERLAP 3")
1914 group%unpack_type(nunpack) = FIELD_Y
1915 group%unpack_buffer_pos(nunpack) = unpack_buffer_pos
1916 group%unpack_rotation(nunpack) = overptr%rotation(
n)
1917 group%unpack_is(nunpack) = overptr%
is(
n)
1918 group%unpack_ie(nunpack) = overptr%
ie(
n)
1919 group%unpack_js(nunpack) = overptr%
js(
n)
1920 group%unpack_je(nunpack) = overptr%
je(
n)
1921 group%unpack_size(nunpack) = overptr%
msgsize(
n)*nvector
1922 unpack_buffer_pos = unpack_buffer_pos + group%unpack_size(nunpack)*
ksize 1927 group%nunpack = nunpack
1929 if(update_buffer_pos .NE. unpack_buffer_pos ) call
mpp_error(FATAL, &
1930 "set_group_update: update_buffer_pos .NE. unpack_buffer_pos")
1932 !figure
out message
size for each processor.
1933 ntot = nsend_s + nsend_x + nsend_y
1943 if( i_s <= nsend_s ) then
1944 rank_s = update_s%
send(i_s)%
pe-domain%
pe 1945 if(rank_s .LT. 0) rank_s = rank_s + nlist
1949 if( i_x <= nsend_x ) then
1950 rank_x = update_x%
send(i_x)%
pe-domain%
pe 1951 if(rank_x .LT. 0) rank_x = rank_x + nlist
1955 if( i_y <= nsend_y ) then
1956 rank_y = update_y%
send(i_y)%
pe-domain%
pe 1957 if(rank_y .LT. 0) rank_y = rank_y + nlist
1962 rank = minval((/rank_s, rank_x, rank_y/))
1963 if(rank == rank_s) then
1969 if(rank == rank_x) then
1975 if(rank == rank_y) then
1991 if(
m>0) msgsize_s = get_mesgsize(update_s%
send(
m), send_s)*
ksize*nscalar
1993 if(
m>0) msgsize_x = get_mesgsize(update_x%
send(
m), send_x)*
ksize*nvector
1995 if(
m>0) msgsize_y = get_mesgsize(update_y%
send(
m), send_y)*
ksize*nvector
1996 msgsize = msgsize_s + msgsize_x + msgsize_y
1998 tot_send_size = tot_send_size +
msgsize 2000 if(nsend > MAXOVERLAP) then
2001 call
mpp_error(FATAL, "set_group_update: nsend
is greater than MAXOVERLAP, increase MAXOVERLAP")
2005 group%buffer_pos_send(nsend) = update_buffer_pos
2006 group%send_size(nsend) =
msgsize 2007 update_buffer_pos = update_buffer_pos +
msgsize 2014 pack_buffer_pos = unpack_buffer_pos
2018 overptr => update_s%
send(
m)
2019 do
n = 1, overptr%count
2020 dir = overptr%dir(
n)
2021 if(send_s(dir)) then
2024 "set_group_update: npack
is greater than MAXOVERLAP, increase MAXOVERLAP 1")
2025 group%pack_type(npack) = FIELD_S
2026 group%pack_buffer_pos(npack) = pack_buffer_pos
2027 group%pack_rotation(npack) = overptr%rotation(
n)
2028 group%pack_is(npack) = overptr%
is(
n)
2029 group%pack_ie(npack) = overptr%
ie(
n)
2030 group%pack_js(npack) = overptr%
js(
n)
2031 group%pack_je(npack) = overptr%
je(
n)
2040 overptr => update_x%
send(
m)
2041 do
n = 1, overptr%count
2042 dir = overptr%dir(
n)
2043 !--- nonsym_edge update
is not for rotation of 90 or -90 degree ( cubic sphere grid )
2044 if( group%nonsym_edge .and. (overptr%rotation(
n)==NINETY .or. &
2045 overptr%rotation(
n)==MINUS_NINETY) ) then
2046 call
mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE
is not compatible '
2047 'with 90 or -90 degree rotation (normally cubic sphere grid' )
2049 if(send_x(dir)) then
2052 "set_group_update: npack
is greater than MAXOVERLAP, increase MAXOVERLAP 2")
2053 group%pack_type(npack) = FIELD_X
2054 group%pack_buffer_pos(npack) = pack_buffer_pos
2055 group%pack_rotation(npack) = overptr%rotation(
n)
2056 group%pack_is(npack) = overptr%
is(
n)
2057 group%pack_ie(npack) = overptr%
ie(
n)
2058 group%pack_js(npack) = overptr%
js(
n)
2059 group%pack_je(npack) = overptr%
je(
n)
2068 overptr => update_y%
send(
m)
2069 do
n = 1, overptr%count
2070 dir = overptr%dir(
n)
2071 if( group%nonsym_edge .and. (overptr%rotation(
n)==NINETY .or. &
2072 overptr%rotation(
n)==MINUS_NINETY) ) then
2073 call
mpp_error(FATAL, 'set_group_update: flags=NONSYMEDGEUPDATE
is not compatible '
2074 'with 90 or -90 degree rotation (normally cubic sphere grid' )
2076 if(send_y(dir)) then
2079 "set_group_update: npack
is greater than MAXOVERLAP, increase MAXOVERLAP 3")
2080 group%pack_type(npack) = FIELD_Y
2081 group%pack_buffer_pos(npack) = pack_buffer_pos
2082 group%pack_rotation(npack) = overptr%rotation(
n)
2083 group%pack_is(npack) = overptr%
is(
n)
2084 group%pack_ie(npack) = overptr%
ie(
n)
2085 group%pack_js(npack) = overptr%
js(
n)
2086 group%pack_je(npack) = overptr%
je(
n)
2094 if(update_buffer_pos .NE. pack_buffer_pos ) call
mpp_error(FATAL, &
2095 "set_group_update: update_buffer_pos .NE. pack_buffer_pos")
2097 !--- make sure the buffer
is large enough
2102 call
mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '
2103 'call mpp_domains_set_stack_size('
2106 group%tot_msgsize = tot_recv_size+tot_send_size
2108 end subroutine set_group_update
2111 !
###################################################################### 2112 subroutine mpp_clear_group_update(group)
2113 type(mpp_group_update_type), intent(inout) :: group
2121 group%initialized = .
false.
2123 end subroutine mpp_clear_group_update
2125 !#####################################################################
2126 function mpp_group_update_initialized(group)
2127 type(mpp_group_update_type), intent(in) :: group
2128 logical :: mpp_group_update_initialized
2130 mpp_group_update_initialized = group%initialized
2132 end function mpp_group_update_initialized
2134 !#####################################################################
2135 function mpp_group_update_is_set(group)
2136 type(mpp_group_update_type), intent(in) :: group
2137 logical :: mpp_group_update_is_set
2139 mpp_group_update_is_set = (group%nscalar > 0 .OR. group%nvector > 0)
2141 end function mpp_group_update_is_set
integer mpp_domains_stack_hwm
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
l_size ! loop over number of fields ke do je do ie to PE
character(len=1), parameter equal
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz ksize
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
subroutine, public read_data(clim_type, src_field, hdata, nt, i, Time)
read_data receives various climate data as inputs and returns a horizontally interpolated climatology...
integer(long), parameter true
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_DO_REDISTRIBUTE_3D_(f_in, f_out, d_comm, d_type) integer(LONG_KIND), intent(in) ::f_in(:), f_out(:) type(DomainCommunicator2D), intent(in) ::d_comm MPP_TYPE_, intent(in) ::d_type MPP_TYPE_ ::field_in(d_comm%domain_in%x(1)%data%begin:d_comm%domain_in%x(1)%data%end, &d_comm%domain_in%y(1)%data%begin:d_comm%domain_in%y(1)%data%end, d_comm%ke) pointer(ptr_field_in, field_in) MPP_TYPE_ ::field_out(d_comm%domain_out%x(1)%data%begin:d_comm%domain_out%x(1)%data%end, &d_comm%domain_out%y(1)%data%begin:d_comm%domain_out%y(1)%data%end, d_comm%ke) pointer(ptr_field_out, field_out) type(domain2D), pointer ::domain_in, domain_out integer ::i, j, k, l, n, l_size integer ::is, ie, js, je integer ::ke integer ::list, pos, msgsize integer ::to_pe, from_pe MPP_TYPE_ ::buffer(size(mpp_domains_stack(:))) pointer(ptr, buffer) integer ::buffer_pos, wordlen, errunit!fix ke errunit=stderr() l_size=size(f_out(:)) ! equal to size(f_in(:)) ke=d_comm%ke domain_in=> d_comm domain_in
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
real(r8), dimension(cast_m, cast_n) p
integer(long), parameter false
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
logical module_is_initialized
l_size ! loop over number of fields ke do je do ie to to_pe
character(len=128) version
real(double), parameter zero
l_size ! loop over number of fields ke do je do ie to is
integer, parameter, public global
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> id
integer mpp_domains_stack_size
real(double), parameter one
integer, dimension(:), pointer io_layout
logical function received(this, seqno)
integer, save, private isc
type(field_def), target, save root
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, save, private jsc
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> dimension(MAX_DOMAIN_FIELDS)
real(kind_real), parameter bound
integer, parameter, public cyclic
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
l_size ! loop over number of fields ke do je do ie pos
integer, dimension(:), pointer layout
integer nthread_control_loop
real per
Longitude of perihelion with respect to autumnal equinox in NH [degrees].
subroutine, public some(xmap, some_arr, grid_id)
l_size ! loop over number of fields ke do je do ie to js
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin