4 !***********************************************************************
5 !* GNU Lesser General Public License
7 !* This file
is part of the GFDL Flexible Modeling System (FMS).
9 !* FMS
is free software: you can redistribute it and/or modify it under
10 !* the terms of the GNU Lesser General Public License as published by
11 !* the Free Software Foundation, either
version 3 of the License, or (at
12 !* your option) any later
version.
14 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
15 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 !* You should have
received a copy of the GNU Lesser General Public
20 !* License along with FMS. If
not, see <http:
21 !***********************************************************************
23 subroutine MPP_DO_GET_BOUNDARY_AD_3D_( f_addrs, domain,
bound, b_addrs, bsize, ke, d_type)
24 type(domain2D), intent(in) :: domain
28 integer, intent(in) :: bsize(:), ke
29 MPP_TYPE_, intent(in) :: d_type ! creates unique interface
32 MPP_TYPE_ :: ebuffer(bsize(1), ke), sbuffer(bsize(2), ke), wbuffer(bsize(3), ke), nbuffer(bsize(4), ke)
33 pointer(ptr_field,
field)
34 pointer(ptr_ebuffer, ebuffer)
35 pointer(ptr_sbuffer, sbuffer)
36 pointer(ptr_wbuffer, wbuffer)
37 pointer(ptr_nbuffer, nbuffer)
49 pointer( ptr, buffer )
50 ptr = LOC(mpp_domains_stack)
53 l_size =
size(f_addrs,1)
55 !---- determine
recv(1) based on b_addrs (
east boundary )
56 num = count(b_addrs(1,:,1) == 0)
59 else
if( num == l_size ) then
63 "mpp_do_get_boundary: number of ebuffer with null address should be 0 or l_size")
66 !---- determine
recv(2) based on b_addrs (
south boundary )
67 num = count(b_addrs(2,:,1) == 0)
70 else
if( num == l_size ) then
74 "mpp_do_get_boundary: number of sbuffer with null address should be 0 or l_size")
77 !---- determine
recv(3) based on b_addrs (
west boundary )
78 num = count(b_addrs(3,:,1) == 0)
81 else
if( num == l_size ) then
85 "mpp_do_get_boundary: number of wbuffer with null address should be 0 or l_size")
88 !---- determine
recv(4) based on b_addrs (
north boundary )
89 num = count(b_addrs(4,:,1) == 0)
92 else
if( num == l_size ) then
96 "mpp_do_get_boundary: number of nbuffer with null address should be 0 or l_size")
100 nlist =
size(domain%list(:))
103 allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
118 call mpp_recv( msg1(l), glen=1,
from_pe=
from_pe, block=.FALSE., tag=COMM_TAG_1)
134 call mpp_sync_self(
check=EVENT_RECV)
137 if(msg1(
m) .NE. msg2(
m)) then
138 print*, "My
pe = ", mpp_pe(), ",domain
name =", trim(domain%
name), ",from
pe=", &
144 write(
outunit,*)"NOTE from mpp_do_get_boundary: message sizes are matched between
send and
recv for domain " &
146 deallocate(msg1, msg2)
164 call
mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '
165 'call mpp_domains_set_stack_size('
168 buffer_pos = buffer_pos +
msgsize 171 buffer_recv_size = buffer_pos
184 ptr_field = f_addrs(l, tMe)
196 ptr_field = f_addrs(l, tMe)
208 ptr_field = f_addrs(l, tMe)
218 case (ONE_HUNDRED_EIGHTY)
220 ptr_field = f_addrs(l, tMe)
235 !--- maybe we do
not need the following stack
size check.
239 call
mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_OLD: mpp_domains_stack overflow, '
240 'call mpp_domains_set_stack_size('
248 call mpp_sync_self(
check=EVENT_RECV)
250 buffer_pos = buffer_recv_size
266 ptr_ebuffer = b_addrs(1, l, tMe)
272 ebuffer(index,
k) = buffer(
pos)
280 ptr_sbuffer = b_addrs(2, l, tMe)
286 sbuffer(index,
k) = buffer(
pos)
294 ptr_wbuffer = b_addrs(3, l, tMe)
300 wbuffer(index,
k) = buffer(
pos)
308 ptr_nbuffer = b_addrs(4, l, tMe)
314 nbuffer(index,
k) = buffer(
pos)
325 call mpp_sync_self( )
328 end subroutine MPP_DO_GET_BOUNDARY_AD_3D_
331 subroutine MPP_DO_GET_BOUNDARY_AD_3D_V_(f_addrsx, f_addrsy, domain, boundx, boundy, b_addrsx, b_addrsy, &
332 bsizex, bsizey, ke, d_type, flags, gridtype)
333 type(domain2D), intent(in) :: domain
334 type(overlapSpec), intent(in) :: boundx, boundy
337 integer, intent(in) :: bsizex(:), bsizey(:), ke
338 MPP_TYPE_, intent(in) :: d_type ! creates unique interface
340 integer, intent(in) :: gridtype
342 MPP_TYPE_ :: fieldx(boundx%xbegin:boundx%xend, boundx%ybegin:boundx%yend,ke)
343 MPP_TYPE_ :: fieldy(boundy%xbegin:boundy%xend, boundy%ybegin:boundy%yend,ke)
344 MPP_TYPE_ :: ebufferx(bsizex(1), ke), sbufferx(bsizex(2), ke), wbufferx(bsizex(3), ke), nbufferx(bsizex(4), ke)
345 MPP_TYPE_ :: ebuffery(bsizey(1), ke), sbuffery(bsizey(2), ke), wbuffery(bsizey(3), ke), nbuffery(bsizey(4), ke)
346 pointer(ptr_fieldx, fieldx)
347 pointer(ptr_fieldy, fieldy)
348 pointer(ptr_ebufferx, ebufferx)
349 pointer(ptr_sbufferx, sbufferx)
350 pointer(ptr_wbufferx, wbufferx)
351 pointer(ptr_nbufferx, nbufferx)
352 pointer(ptr_ebuffery, ebuffery)
353 pointer(ptr_sbuffery, sbuffery)
354 pointer(ptr_wbuffery, wbuffery)
355 pointer(ptr_nbuffery, nbuffery)
358 logical :: recvx(4), sendx(4)
359 logical :: recvy(4), sendy(4)
360 integer :: nlist, buffer_pos,buffer_pos_old,
pos, pos_, tMe,
m 363 integer :: rank_x, rank_y, cur_rank, ind_x, ind_y
364 integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, num
369 pointer( ptr, buffer )
370 ptr = LOC(mpp_domains_stack)
373 l_size =
size(f_addrsx,1)
374 !---- determine
recv(1) based on b_addrs (
east boundary )
375 num = count(b_addrsx(1,:,1) == 0)
378 else
if( num == l_size ) then
382 "mpp_do_get_boundary_V: number of ebufferx with null address should be 0 or l_size")
385 !---- determine
recv(2) based on b_addrs (
south boundary )
386 num = count(b_addrsx(2,:,1) == 0)
389 else
if( num == l_size ) then
393 "mpp_do_get_boundary_V: number of sbufferx with null address should be 0 or l_size")
396 !---- determine
recv(3) based on b_addrs (
west boundary )
397 num = count(b_addrsx(3,:,1) == 0)
400 else
if( num == l_size ) then
404 "mpp_do_get_boundary_V: number of wbufferx with null address should be 0 or l_size")
407 !---- determine
recv(4) based on b_addrs (
north boundary )
408 num = count(b_addrsx(4,:,1) == 0)
411 else
if( num == l_size ) then
415 "mpp_do_get_boundary_V: number of nbufferx with null address should be 0 or l_size")
418 !---- determine
recv(1) based on b_addrs (
east boundary )
419 num = count(b_addrsy(1,:,1) == 0)
422 else
if( num == l_size ) then
426 "mpp_do_get_boundary_V: number of ebuffery with null address should be 0 or l_size")
429 !---- determine
recv(2) based on b_addrs (
south boundary )
430 num = count(b_addrsy(2,:,1) == 0)
433 else
if( num == l_size ) then
437 "mpp_do_get_boundary_V: number of sbuffery with null address should be 0 or l_size")
440 !---- determine
recv(3) based on b_addrs (
west boundary )
441 num = count(b_addrsy(3,:,1) == 0)
444 else
if( num == l_size ) then
448 "mpp_do_get_boundary_V: number of wbuffery with null address should be 0 or l_size")
451 !---- determine
recv(4) based on b_addrs (
north boundary )
452 num = count(b_addrsy(4,:,1) == 0)
455 else
if( num == l_size ) then
459 "mpp_do_get_boundary_V: number of nbuffery with null address should be 0 or l_size")
465 nlist =
size(domain%list(:))
467 nsend_x = boundx%nsend
468 nsend_y = boundy%nsend
469 nrecv_x = boundx%nrecv
470 nrecv_y = boundy%nrecv
473 allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
477 cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
479 do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
481 if(cur_rank == rank_x) then
483 do
n = 1, boundx%
recv(ind_x)%count
484 if(recvx(boundx%
recv(ind_x)%dir(
n))) then
491 if(ind_x .LE. nrecv_x) then
492 rank_x = boundx%
recv(ind_x)%
pe - domain%
pe 493 if(rank_x .LE.0) rank_x = rank_x + nlist
499 if(cur_rank == rank_y) then
501 do
n = 1, boundy%
recv(ind_y)%count
502 if(recvy(boundy%
recv(ind_y)%dir(
n))) then
509 if(ind_y .LE. nrecv_y) then
510 rank_y = boundy%
recv(ind_y)%
pe - domain%
pe 511 if(rank_y .LE.0) rank_y = rank_y + nlist
516 cur_rank =
max(rank_x, rank_y)
518 call mpp_recv( msg1(
m), glen=1,
from_pe=
from_pe, block=.FALSE., tag=COMM_TAG_3)
522 cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
523 do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
525 if(cur_rank == rank_x) then
527 do
n = 1, boundx%
send(ind_x)%count
528 if(sendx(boundx%
send(ind_x)%dir(
n))) then
535 if(ind_x .LE. nsend_x) then
536 rank_x = boundx%
send(ind_x)%
pe - domain%
pe 537 if(rank_x .LT.0) rank_x = rank_x + nlist
543 if(cur_rank == rank_y) then
545 do
n = 1, boundy%
send(ind_y)%count
546 if(sendy(boundy%
send(ind_y)%dir(
n))) then
553 if(ind_y .LE. nsend_y) then
554 rank_y = boundy%
send(ind_y)%
pe - domain%
pe 555 if(rank_y .LT.0) rank_y = rank_y + nlist
560 cur_rank =
min(rank_x, rank_y)
564 call mpp_sync_self(
check=EVENT_RECV)
566 if(msg1(
m) .NE. msg2(
m)) then
567 print*, "My
pe = ", mpp_pe(), ",domain
name =", trim(domain%
name), ",from
pe=", &
574 write(
outunit,*)"NOTE from mpp_do_get_boundary_V: message sizes are matched between
send and
recv for domain " &
576 deallocate(msg1, msg2)
579 !--- domain always
is symmetry
582 if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
584 if( domain%y(1)%data%
begin.LE.
j .AND.
j.LE.domain%y(1)%data%
end+shift )then !fold
is within domain
585 !poles
set to 0: BGRID only
586 if( gridtype.EQ.BGRID_NE )then
590 do
i =
is ,
ie, midpoint
591 if( domain%x(1)%compute%
begin ==
i )then
593 ptr_wbufferx = b_addrsx(3, l, tMe)
594 ptr_wbuffery = b_addrsy(3, l, tMe)
606 call mpp_sync_self( )
611 cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
612 do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
614 if(cur_rank == rank_x) then
616 do
n = 1, boundx%
recv(ind_x)%count
617 if(recvx(boundx%
recv(ind_x)%dir(
n))) then
624 if(ind_x .LE. nrecv_x) then
625 rank_x = boundx%
recv(ind_x)%
pe - domain%
pe 626 if(rank_x .LE.0) rank_x = rank_x + nlist
632 if(cur_rank == rank_y) then
634 do
n = 1, boundy%
recv(ind_y)%count
635 if(recvy(boundy%
recv(ind_y)%dir(
n))) then
642 if(ind_y .LE. nrecv_y) then
643 rank_y = boundy%
recv(ind_y)%
pe - domain%
pe 644 if(rank_y .LE.0) rank_y = rank_y + nlist
649 cur_rank =
max(rank_x, rank_y)
652 buffer_pos = buffer_pos +
msgsize 655 buffer_recv_size = buffer_pos
657 cur_rank = get_rank_unpack(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
659 do while(ind_x >0 .OR. ind_y >0)
660 if(cur_rank == rank_y) then
661 do
n = boundy%
recv(ind_y)%count, 1, -1
662 if(recvy(boundy%
recv(ind_y)%dir(
n))) then
668 tMe = boundy%
recv(ind_y)%tileMe(
n)
672 ptr_ebuffery = b_addrsy(1, l, tMe)
674 index = boundy%
recv(ind_y)%index(
n)
678 buffer(
pos) = ebuffery(index,
k)
679 ebuffery(index,
k) = 0.
687 ptr_sbuffery = b_addrsy(2, l, tMe)
689 index = boundy%
recv(ind_y)%index(
n)
693 buffer(
pos) = sbuffery(index,
k)
694 sbuffery(index,
k) = 0.
702 ptr_wbuffery = b_addrsy(3, l, tMe)
704 index = boundy%
recv(ind_y)%index(
n)
708 buffer(
pos) = wbuffery(index,
k)
709 wbuffery(index,
k) = 0.
717 ptr_nbuffery = b_addrsy(4, l, tMe)
719 index = boundy%
recv(ind_y)%index(
n)
723 buffer(
pos) = nbuffery(index,
k)
724 nbuffery(index,
k) = 0.
734 if(ind_y .GT. 0) then
735 rank_y = boundy%
recv(ind_y)%
pe - domain%
pe 736 if(rank_y .LE.0) rank_y = rank_y + nlist
742 if(cur_rank == rank_x) then
743 do
n = boundx%
recv(ind_x)%count, 1, -1
744 if(recvx(boundx%
recv(ind_x)%dir(
n))) then
750 tMe = boundx%
recv(ind_x)%tileMe(
n)
754 ptr_ebufferx = b_addrsx(1, l, tMe)
756 index = boundx%
recv(ind_x)%index(
n)
760 buffer(
pos) = ebufferx(index,
k)
761 ebufferx(index,
k) = 0.
769 ptr_sbufferx = b_addrsx(2, l, tMe)
771 index = boundx%
recv(ind_x)%index(
n)
775 buffer(
pos) = sbufferx(index,
k)
776 sbufferx(index,
k) = 0.
784 ptr_wbufferx = b_addrsx(3, l, tMe)
786 index = boundx%
recv(ind_x)%index(
n)
790 buffer(
pos) = wbufferx(index,
k)
791 wbufferx(index,
k) = 0.
799 ptr_nbufferx = b_addrsx(4, l, tMe)
801 index = boundx%
recv(ind_x)%index(
n)
805 buffer(
pos) = nbufferx(index,
k)
806 nbufferx(index,
k) = 0.
816 if(ind_x .GT. 0) then
817 rank_x = boundx%
recv(ind_x)%
pe - domain%
pe 818 if(rank_x .LE.0) rank_x = rank_x + nlist
823 cur_rank =
min(rank_x, rank_y)
828 cur_rank = get_rank_recv(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
830 do while ( ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y )
832 if(cur_rank == rank_x) then
834 do
n = 1, boundx%
recv(ind_x)%count
835 if(recvx(boundx%
recv(ind_x)%dir(
n))) then
839 msgsize_send = (
ie-
is+1)*(
je-
js+1)*ke*l_size
843 if(ind_x .LE. nrecv_x) then
844 rank_x = boundx%
recv(ind_x)%
pe - domain%
pe 845 if(rank_x .LE.0) rank_x = rank_x + nlist
851 if(cur_rank == rank_y) then
853 do
n = 1, boundy%
recv(ind_y)%count
854 if(recvy(boundy%
recv(ind_y)%dir(
n))) then
858 msgsize_send = (
ie-
is+1)*(
je-
js+1)*ke*l_size
862 if(ind_y .LE. nrecv_y) then
863 rank_y = boundy%
recv(ind_y)%
pe - domain%
pe 864 if(rank_y .LE.0) rank_y = rank_y + nlist
869 cur_rank =
max(rank_x, rank_y)
873 buffer_pos = buffer_pos +
msgsize 876 buffer_recv_size = buffer_pos
879 cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
881 do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
883 if(cur_rank == rank_x) then
885 do
n = 1, boundx%
send(ind_x)%count
886 if(sendx(boundx%
send(ind_x)%dir(
n))) then
889 tMe = boundx%
send(ind_x)%tileMe(
n)
892 end do !do
n = 1, boundx%count
894 if(ind_x .LE. nsend_x) then
895 rank_x = boundx%
send(ind_x)%
pe - domain%
pe 896 if(rank_x .LT.0) rank_x = rank_x + nlist
902 if(cur_rank == rank_y) then
904 do
n = 1, boundy%
send(ind_y)%count
905 if(sendy(boundy%
send(ind_y)%dir(
n))) then
908 tMe = boundy%
send(ind_y)%tileMe(
n)
911 end do ! do
n = 1, boundy%count
913 if(ind_y .LE. nsend_y) then
914 rank_y = boundy%
send(ind_y)%
pe - domain%
pe 915 if(rank_y .LT.0) rank_y = rank_y + nlist
920 cur_rank =
min(rank_x, rank_y)
923 !--- maybe we do
not need the following stack
size check.
927 call
mpp_error( FATAL, 'MPP_DO_GET_BOUNDARY_V_: mpp_domains_stack overflow, '
928 'call mpp_domains_set_stack_size('
930 call mpp_recv( buffer(buffer_pos+1), glen=
msgsize,
from_pe=
to_pe, block=.FALSE., tag=COMM_TAG_4 )
936 call mpp_sync_self(
check=EVENT_RECV)
938 !
send second part---------------------------------------------------------------
939 buffer_pos = buffer_recv_size
941 cur_rank = get_rank_send(domain, boundx, boundy, rank_x, rank_y, ind_x, ind_y)
942 buffer_pos_old = buffer_pos
945 do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
947 if(cur_rank == rank_x) then
949 do
n = boundx%
send(ind_x)%count,1,-1
950 if(sendx(boundx%
send(ind_x)%dir(
n))) then
953 tMe = boundx%
send(ind_x)%tileMe(
n)
954 select
case( boundx%
send(ind_x)%rotation(
n) )
957 ptr_fieldx = f_addrsx(l, tMe)
962 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)+ buffer(
pos)
968 if( BTEST(flags,SCALAR_BIT) ) then
970 ptr_fieldy = f_addrsy(l, tMe)
975 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)+ buffer(
pos)
982 ptr_fieldy = f_addrsy(l, tMe)
987 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)- buffer(
pos)
995 ptr_fieldy = f_addrsy(l, tMe)
1000 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)+ buffer(
pos)
1005 case (ONE_HUNDRED_EIGHTY)
1006 if( BTEST(flags,SCALAR_BIT) ) then
1008 ptr_fieldx = f_addrsx(l, tMe)
1013 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)+ buffer(
pos)
1020 ptr_fieldx = f_addrsx(l, tMe)
1025 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)- buffer(
pos)
1033 end do !do
n = 1, boundx%count
1035 if(ind_x .LE. nsend_x) then
1036 rank_x = boundx%
send(ind_x)%
pe - domain%
pe 1037 if(rank_x .LT.0) rank_x = rank_x + nlist
1043 if(cur_rank == rank_y) then
1045 do
n = boundy%
send(ind_y)%count,1,-1
1046 if(sendy(boundy%
send(ind_y)%dir(
n))) then
1049 tMe = boundy%
send(ind_y)%tileMe(
n)
1050 select
case( boundy%
send(ind_y)%rotation(
n) )
1053 ptr_fieldy = f_addrsy(l, tMe)
1058 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)+ buffer(
pos)
1063 case( MINUS_NINETY )
1065 ptr_fieldx = f_addrsx(l, tMe)
1070 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)+ buffer(
pos)
1076 if( BTEST(flags,SCALAR_BIT) ) then
1078 ptr_fieldx = f_addrsx(l, tMe)
1083 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)+ buffer(
pos)
1090 ptr_fieldx = f_addrsx(l, tMe)
1095 fieldx(
i,
j,
k)= fieldx(
i,
j,
k)- buffer(
pos)
1101 case (ONE_HUNDRED_EIGHTY)
1102 if( BTEST(flags,SCALAR_BIT) ) then
1104 ptr_fieldy = f_addrsy(l, tMe)
1109 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)+ buffer(
pos)
1116 ptr_fieldy = f_addrsy(l, tMe)
1121 fieldy(
i,
j,
k)= fieldy(
i,
j,
k)- buffer(
pos)
1129 end do ! do
n = 1, boundy%count
1131 if(ind_y .LE. nsend_y) then
1132 rank_y = boundy%
send(ind_y)%
pe - domain%
pe 1133 if(rank_y .LT.0) rank_y = rank_y + nlist
1139 cur_rank =
min(rank_x, rank_y)
1147 call mpp_sync_self( )
1150 end subroutine MPP_DO_GET_BOUNDARY_AD_3D_V_
integer mpp_domains_stack_hwm
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
type(ext_fieldtype), dimension(:), pointer, save, private field
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer(long), parameter true
integer(long), parameter false
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
l_size ! loop over number of fields ke do j
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
l_size ! loop over number of fields ke do je do ie to to_pe
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
integer, parameter, public global
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public east
integer mpp_domains_stack_size
real(fvprc) function, dimension(size(a, 1), size(a, 2)) reverse(A)
logical function received(this, seqno)
logical debug_message_passing
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
real(kind_real), parameter bound
l_size ! loop over number of fields ke do je do ie pos
integer, parameter, public order
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, parameter, public south
l_size ! loop over number of fields ke do je do ie to js
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin