2 !***********************************************************************
3 !* GNU Lesser General Public License
5 !* This file
is part of the GFDL Flexible Modeling System (FMS).
7 !* FMS
is free software: you can redistribute it and/or modify it under
8 !* the terms of the GNU Lesser General Public License as published by
9 !* the Free Software Foundation, either
version 3 of the License, or (at
10 !* your option) any later
version.
12 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
13 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 !* You should have
received a copy of the GNU Lesser General Public
18 !* License along with FMS. If
not, see <http:
19 !***********************************************************************
20 subroutine MPP_START_DO_UPDATE_3D_V_(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
21 d_type, ke_max, ke_list, gridtype, flags, reuse_id_update,
name)
22 integer, intent(in) :: id_update
24 type(domain2d), intent(in) :: domain
25 type(overlapSpec), intent(in) :: update_x, update_y
27 integer, intent(in) :: ke_list(:,:)
28 MPP_TYPE_, intent(in) :: d_type ! creates unique interface
30 logical, intent(in) :: reuse_id_update
31 character(
len=*), intent(in) ::
name 34 !---local variable ------------------------------------------
39 logical ::
send(8),
recv(8), update_edge_only
42 integer :: nsend, nrecv, sendsize, recvsize
44 integer :: send_msgsize(update_x%nsend+update_y%nsend)
45 integer :: ind_send_x(update_x%nsend+update_y%nsend), ind_send_y(update_x%nsend+update_y%nsend)
46 integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
47 integer :: from_pe_list(update_x%nrecv+update_y%nrecv), to_pe_list(update_x%nsend+update_y%nsend)
48 integer :: start_pos_recv(update_x%nrecv+update_y%nrecv), start_pos_send(update_x%nsend+update_y%nsend)
49 MPP_TYPE_ :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
50 MPP_TYPE_ :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
53 pointer(ptr_fieldx, fieldx)
54 pointer(ptr_fieldy, fieldy)
55 pointer( ptr, buffer )
57 update_edge_only = BTEST(flags, EDGEONLY)
59 recv(1) = BTEST(flags,EAST)
60 recv(3) = BTEST(flags,SOUTH)
61 recv(5) = BTEST(flags,WEST)
62 recv(7) = BTEST(flags,NORTH)
63 if( update_edge_only ) then
80 l_size =
size(f_addrsx,1)
81 nlist =
size(domain%list(:))
82 ptr = LOC(mpp_domains_stack_nonblock)
84 nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
85 nsend = get_vector_send(domain, update_x, update_y, ind_send_x, ind_send_y, start_pos_send, to_pe_list)
86 if( nrecv > MAX_REQUEST ) then
87 write(
text,
'(a,i8,a,i8)' )
'nrecv =', nrecv,
' greater than MAX_REQEUST =', MAX_REQUEST
88 call
mpp_error(FATAL,
'MPP_START_DO_UPDATE_V: ' 90 if( nsend > MAX_REQUEST ) then
91 write(
text,
'(a,i8,a,i8)' )
'nsend =', nsend,
' greater than MAX_REQEUST =', MAX_REQUEST
92 call
mpp_error(FATAL,
'MPP_START_DO_UPDATE_V: ' 94 !--- make sure the domain stack
size is big enough.
100 ind_x = ind_recv_x(
m)
101 ind_y = ind_recv_y(
m)
103 do
n = 1, update_x%
recv(ind_x)%count
104 dir = update_x%
recv(ind_x)%dir(
n)
111 do
n = 1, update_y%
recv(ind_y)%count
112 dir = update_y%
recv(ind_y)%dir(
n)
123 buffer_pos = buffer_pos +
msgsize 130 ind_x = ind_send_x(
m)
131 ind_y = ind_send_y(
m)
133 do
n = 1, update_x%
send(ind_x)%count
134 dir = update_x%
send(ind_x)%dir(
n)
141 do
n = 1, update_y%
send(ind_y)%count
142 dir = update_y%
send(ind_y)%dir(
n)
152 buffer_pos = buffer_pos +
msgsize 160 call
mpp_error( FATAL,
'MPP_START_DO_UPDATE_V: mpp_domains_stack overflow, ' 161 'call mpp_domains_set_stack_size(' 164 if( reuse_id_update ) then
166 call
mpp_error(FATAL,
'MPP_START_DO_UPDATE: mismatch of recv msgsize for field ' 169 call
mpp_error(FATAL,
'MPP_START_DO_UPDATE: mismatch of send msgsize for field ' 186 tag=id_update, request=request)
200 !$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,
pos,dir,tMe, &
201 !$OMP
is,
ie,
js,
je,ptr_fieldx,ptr_fieldy)
204 ind_x = ind_send_x(
m)
205 ind_y = ind_send_y(
m)
209 select
case( gridtype )
210 case(BGRID_NE, BGRID_SW, AGRID)
212 do
n = 1, update_x%
send(ind_x)%count
213 dir = update_x%
send(ind_x)%dir(
n)
215 tMe = update_x%
send(ind_x)%tileMe(
n)
218 select
case( update_x%
send(ind_x)%rotation(
n) )
220 do l=1,l_size ! loop over number of
fields 221 ptr_fieldx = f_addrsx(l,tMe)
222 ptr_fieldy = f_addrsy(l,tMe)
223 do k = 1,ke_list(l,tMe)
227 buffer(
pos-1) = fieldx(
i,
j,
k)
228 buffer(
pos) = fieldy(
i,
j,
k)
234 if( BTEST(flags,SCALAR_BIT) ) then
235 do l=1,l_size ! loop over number of
fields 236 ptr_fieldx = f_addrsx(l,tMe)
237 ptr_fieldy = f_addrsy(l,tMe)
238 do k = 1,ke_list(l,tMe)
242 buffer(
pos-1) = fieldy(
i,
j,
k)
243 buffer(
pos) = fieldx(
i,
j,
k)
249 do l=1,l_size ! loop over number of
fields 250 ptr_fieldx = f_addrsx(l,tMe)
251 ptr_fieldy = f_addrsy(l,tMe)
252 do k = 1,ke_list(l,tMe)
256 buffer(
pos-1) = -fieldy(
i,
j,
k)
257 buffer(
pos) = fieldx(
i,
j,
k)
264 if( BTEST(flags,SCALAR_BIT) ) then
265 do l=1,l_size ! loop over number of
fields 266 ptr_fieldx = f_addrsx(l,tMe)
267 ptr_fieldy = f_addrsy(l,tMe)
268 do k = 1,ke_list(l,tMe)
272 buffer(
pos-1) = fieldy(
i,
j,
k)
273 buffer(
pos) = fieldx(
i,
j,
k)
279 do l=1,l_size ! loop over number of
fields 280 ptr_fieldx = f_addrsx(l,tMe)
281 ptr_fieldy = f_addrsy(l,tMe)
282 do k = 1,ke_list(l,tMe)
286 buffer(
pos-1) = fieldy(
i,
j,
k)
287 buffer(
pos) = -fieldx(
i,
j,
k)
293 case( ONE_HUNDRED_EIGHTY )
294 if( BTEST(flags,SCALAR_BIT) ) then
295 do l=1,l_size ! loop over number of
fields 296 ptr_fieldx = f_addrsx(l,tMe)
297 ptr_fieldy = f_addrsy(l,tMe)
298 do k = 1,ke_list(l,tMe)
302 buffer(
pos-1) = fieldx(
i,
j,
k)
303 buffer(
pos) = fieldy(
i,
j,
k)
309 do l=1,l_size ! loop over number of
fields 310 ptr_fieldx = f_addrsx(l,tMe)
311 ptr_fieldy = f_addrsy(l,tMe)
312 do k = 1,ke_list(l,tMe)
316 buffer(
pos-1) = -fieldx(
i,
j,
k)
317 buffer(
pos) = -fieldy(
i,
j,
k)
323 end select ! select
case( rotation(
n) )
325 end do !
do n = 1, update_x%
send(ind_x)%count
327 case(CGRID_NE, CGRID_SW)
329 do n = 1, update_x%
send(ind_x)%count
330 dir = update_x%
send(ind_x)%dir(
n)
332 tMe = update_x%
send(ind_x)%tileMe(
n)
335 select
case( update_x%
send(ind_x)%rotation(
n) )
337 do l=1,l_size ! loop over number of
fields 338 ptr_fieldx = f_addrsx(l,tMe)
339 ptr_fieldy = f_addrsy(l,tMe)
340 do k = 1,ke_list(l,tMe)
344 buffer(
pos) = fieldx(
i,
j,
k)
350 if( BTEST(flags,SCALAR_BIT) ) then
351 do l=1,l_size ! loop over number of
fields 352 ptr_fieldx = f_addrsx(l,tMe)
353 ptr_fieldy = f_addrsy(l,tMe)
354 do k = 1,ke_list(l,tMe)
358 buffer(
pos) = fieldy(
i,
j,
k)
364 do l=1,l_size ! loop over number of
fields 365 ptr_fieldx = f_addrsx(l,tMe)
366 ptr_fieldy = f_addrsy(l,tMe)
367 do k = 1,ke_list(l,tMe)
371 buffer(
pos) = -fieldy(
i,
j,
k)
378 do l=1,l_size ! loop over number of
fields 379 ptr_fieldx = f_addrsx(l,tMe)
380 ptr_fieldy = f_addrsy(l,tMe)
381 do k = 1, ke_list(l,tMe)
385 buffer(
pos) = fieldy(
i,
j,
k)
390 case(ONE_HUNDRED_EIGHTY)
391 if( BTEST(flags,SCALAR_BIT) ) then
392 do l=1,l_size ! loop over number of
fields 393 ptr_fieldx = f_addrsx(l,tMe)
394 ptr_fieldy = f_addrsy(l,tMe)
395 do k = 1,ke_list(l,tMe)
399 buffer(
pos) = fieldx(
i,
j,
k)
405 do l=1,l_size ! loop over number of
fields 406 ptr_fieldx = f_addrsx(l,tMe)
407 ptr_fieldy = f_addrsy(l,tMe)
408 do k = 1,ke_list(l,tMe)
412 buffer(
pos) = -fieldx(
i,
j,
k)
423 do n = 1, update_y%
send(ind_y)%count
424 dir = update_y%
send(ind_y)%dir(
n)
426 tMe = update_y%
send(ind_y)%tileMe(
n)
429 select
case( update_y%
send(ind_y)%rotation(
n) )
431 do l=1,l_size ! loop over number of
fields 432 ptr_fieldx = f_addrsx(l,tMe)
433 ptr_fieldy = f_addrsy(l,tMe)
434 do k = 1,ke_list(l,tMe)
438 buffer(
pos) = fieldy(
i,
j,
k)
444 do l=1,l_size ! loop over number of
fields 445 ptr_fieldx = f_addrsx(l,tMe)
446 ptr_fieldy = f_addrsy(l,tMe)
447 do k = 1,ke_list(l,tMe)
451 buffer(
pos) = fieldx(
i,
j,
k)
457 if( BTEST(flags,SCALAR_BIT) ) then
458 do l=1,l_size ! loop over number of
fields 459 ptr_fieldx = f_addrsx(l,tMe)
460 ptr_fieldy = f_addrsy(l,tMe)
461 do k = 1,ke_list(l,tMe)
465 buffer(
pos) = fieldx(
i,
j,
k)
471 do l=1,l_size ! loop over number of
fields 472 ptr_fieldx = f_addrsx(l,tMe)
473 ptr_fieldy = f_addrsy(l,tMe)
474 do k = 1,ke_list(l,tMe)
478 buffer(
pos) = -fieldx(
i,
j,
k)
484 case(ONE_HUNDRED_EIGHTY)
485 if( BTEST(flags,SCALAR_BIT) ) then
486 do l=1,l_size ! loop over number of
fields 487 ptr_fieldx = f_addrsx(l,tMe)
488 ptr_fieldy = f_addrsy(l,tMe)
489 do k = 1,ke_list(l,tMe)
493 buffer(
pos) = fieldy(
i,
j,
k)
499 do l=1,l_size ! loop over number of
fields 500 ptr_fieldx = f_addrsx(l,tMe)
501 ptr_fieldy = f_addrsy(l,tMe)
502 do k = 1,ke_list(l,tMe)
506 buffer(
pos) = -fieldy(
i,
j,
k)
517 send_msgsize(
m) =
pos - buffer_pos
519 !$OMP
end parallel
do 526 tag=id_update, request=request )
534 end subroutine MPP_START_DO_UPDATE_3D_V_
536 !###############################################################################
537 subroutine MPP_COMPLETE_DO_UPDATE_3D_V_(id_update, f_addrsx, f_addrsy, domain, update_x, update_y, &
538 d_type, ke_max, ke_list, gridtype, flags)
539 integer, intent(in) :: id_update
541 type(domain2d), intent(in) :: domain
542 type(overlapSpec), intent(in) :: update_x, update_y
544 integer, intent(in) :: ke_list(:,:)
545 MPP_TYPE_, intent(in) :: d_type ! creates unique interface
546 integer, intent(in) :: gridtype
551 MPP_TYPE_ :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke_max)
552 MPP_TYPE_ :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke_max)
553 pointer(ptr_fieldx, fieldx)
554 pointer(ptr_fieldy, fieldy)
556 MPP_TYPE_ :: recv_buffer(
size(mpp_domains_stack_nonblock(:)))
557 pointer( ptr, recv_buffer )
559 integer ::
i,
j,
k, l,
is,
ie,
js,
je,
n, ke_sum, l_size,
m 561 integer :: ind_x, ind_y, nrecv, nsend
562 integer :: ind_recv_x(update_x%nrecv+update_y%nrecv), ind_recv_y(update_x%nrecv+update_y%nrecv)
563 integer :: start_pos_recv(update_x%nrecv+update_y%nrecv)
564 integer :: from_pe_list(update_x%nrecv+update_y%nrecv)
565 logical ::
recv(8),
send(8), update_edge_only
569 update_edge_only = BTEST(flags, EDGEONLY)
570 recv(1) = BTEST(flags,EAST)
571 recv(3) = BTEST(flags,SOUTH)
572 recv(5) = BTEST(flags,WEST)
573 recv(7) = BTEST(flags,NORTH)
574 if( update_edge_only ) then
590 ke_sum = sum(ke_list)
591 l_size =
size(f_addrsx,1)
592 nlist =
size(domain%list(:))
593 ptr = LOC(mpp_domains_stack_nonblock)
595 nrecv = get_vector_recv(domain, update_x, update_y, ind_recv_x, ind_recv_y, start_pos_recv, from_pe_list)
612 !$OMP parallel do schedule(dynamic) default(shared) private(ind_x,ind_y,buffer_pos,
pos,dir,tMe,
is,
ie,
js,
je, &
613 !$OMP
msgsize,ptr_fieldx,ptr_fieldy)
615 ind_x = ind_recv_x(
m)
616 ind_y = ind_recv_y(
m)
619 select
case ( gridtype )
620 case(BGRID_NE, BGRID_SW, AGRID)
622 do
n = update_x%
recv(ind_x)%count, 1, -1
623 dir = update_x%
recv(ind_x)%dir(
n)
625 tMe = update_x%
recv(ind_x)%tileMe(
n)
631 do l=1, l_size ! loop over number of
fields 632 ptr_fieldx = f_addrsx(l, tMe)
633 ptr_fieldy = f_addrsy(l, tMe)
634 do k = 1,ke_list(l,tMe)
638 fieldx(
i,
j,
k) = recv_buffer(
pos-1)
639 fieldy(
i,
j,
k) = recv_buffer(
pos)
645 end do ! do dir=8,1,-1
647 case(CGRID_NE, CGRID_SW)
649 do
n = update_y%
recv(ind_y)%count, 1, -1
651 dir = update_y%
recv(ind_y)%dir(
n)
653 tMe = update_y%
recv(ind_y)%tileMe(
n)
659 do l=1, l_size ! loop over number of
fields 660 ptr_fieldy = f_addrsy(l, tMe)
661 do
k = 1,ke_list(l,tMe)
665 fieldy(
i,
j,
k) = recv_buffer(
pos)
674 do
n = update_x%
recv(ind_x)%count, 1, -1
675 dir = update_x%
recv(ind_x)%dir(
n)
677 tMe = update_x%
recv(ind_x)%tileMe(
n)
683 do l=1, l_size ! loop over number of
fields 684 ptr_fieldx = f_addrsx(l, tMe)
685 do
k = 1,ke_list(l,tMe)
689 fieldx(
i,
j,
k) = recv_buffer(
pos)
699 !$OMP
end parallel do
701 ! ---northern boundary fold
704 if(domain%symmetry) shift = 1
705 if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then
707 if( domain%y(1)%data%
begin.LE.
j .AND.
j.LE.domain%y(1)%data%
end+shift )then !fold
is within domain
708 !poles
set to 0: BGRID only
709 if( gridtype.EQ.BGRID_NE )then
713 if( .NOT. domain%symmetry )
is =
is - 1
714 do
i =
is ,
ie, midpoint
715 if( domain%x(1)%data%
begin.LE.
i .AND.
i.LE. domain%x(1)%data%
end+shift )then
717 ptr_fieldx = f_addrsx(l, 1)
718 ptr_fieldy = f_addrsy(l, 1)
719 do
k = 1,ke_list(l,tMe)
728 ! the following code code block correct an
error where the data in your halo coming from
729 ! other
half may have the wrong sign
733 select
case(gridtype)
735 if(domain%symmetry) then
742 if( 2*
is-domain%x(1)%data%
begin.GT.domain%x(1)%data%
end+shift ) &
745 ptr_fieldx = f_addrsx(l, 1)
746 ptr_fieldy = f_addrsy(l, 1)
748 do
k = 1,ke_list(l,tMe)
759 if( 2*
is-domain%x(1)%data%
begin-1.GT.domain%x(1)%data%
end ) &
762 ptr_fieldy = f_addrsy(l, 1)
763 do
k = 1,ke_list(l,tMe)
765 fieldy(
i,
j,
k) = fieldy(2*
is-
i-1,
j,
k)
776 ie = domain%x(1)%data%
end 778 select
case(gridtype)
783 ptr_fieldx = f_addrsx(l, 1)
784 ptr_fieldy = f_addrsy(l, 1)
785 do
k = 1,ke_list(l,tMe)
787 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
788 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
794 ptr_fieldy = f_addrsy(l, 1)
795 do
k = 1,ke_list(l,tMe)
797 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
804 else
if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---southern boundary fold
805 ! NOTE: symmetry
is assumed for fold-
south boundary
807 if( domain%y(1)%data%
begin.LE.
j .AND.
j.LE.domain%y(1)%data%
end+shift )then !fold
is within domain
809 !poles
set to 0: BGRID only
810 if( gridtype.EQ.BGRID_NE )then
813 do
i =
is ,
ie, midpoint
814 if( domain%x(1)%data%
begin.LE.
i .AND.
i.LE. domain%x(1)%data%
end+shift )then
816 ptr_fieldx = f_addrsx(l, 1)
817 ptr_fieldy = f_addrsy(l, 1)
818 do
k = 1,ke_list(l,tMe)
827 ! the following code code block correct an
error where the data in your halo coming from
828 ! other
half may have the wrong sign
832 select
case(gridtype)
836 if( 2*
is-domain%x(1)%data%
begin.GT.domain%x(1)%data%
end+shift ) &
839 ptr_fieldx = f_addrsx(l, 1)
840 ptr_fieldy = f_addrsy(l, 1)
841 do
k = 1,ke_list(l,tMe)
852 if( 2*
is-domain%x(1)%data%
begin-1.GT.domain%x(1)%data%
end ) &
855 ptr_fieldy = f_addrsy(l, 1)
856 do
k = 1,ke_list(l,tMe)
858 fieldy(
i,
j,
k) = fieldy(2*
is-
i-1,
j,
k)
869 ie = domain%x(1)%data%
end 871 select
case(gridtype)
876 ptr_fieldx = f_addrsx(l, 1)
877 ptr_fieldy = f_addrsy(l, 1)
878 do
k = 1,ke_list(l,tMe)
880 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
881 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
887 ptr_fieldy = f_addrsy(l, 1)
888 do
k = 1,ke_list(l,tMe)
890 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
897 else
if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
898 ! NOTE: symmetry
is assumed for fold-
west boundary
900 if( domain%x(1)%data%
begin.LE.
i .AND.
i.LE.domain%x(1)%data%
end+shift )then !fold
is within domain
902 !poles
set to 0: BGRID only
903 if( gridtype.EQ.BGRID_NE )then
906 do
j =
js ,
je, midpoint
907 if( domain%y(1)%data%
begin.LE.
j .AND.
j.LE. domain%y(1)%data%
end+shift )then
909 ptr_fieldx = f_addrsx(l, 1)
910 ptr_fieldy = f_addrsy(l, 1)
911 do
k = 1,ke_list(l,tMe)
920 ! the following code code block correct an
error where the data in your halo coming from
921 ! other
half may have the wrong sign
925 select
case(gridtype)
930 if( 2*
js-domain%y(1)%data%
begin.GT.domain%y(1)%data%
end+shift ) &
933 ptr_fieldx = f_addrsx(l, 1)
934 ptr_fieldy = f_addrsy(l, 1)
935 do
k = 1,ke_list(l,tMe)
946 if( 2*
js-domain%y(1)%data%
begin-1.GT.domain%y(1)%data%
end ) &
949 ptr_fieldx = f_addrsx(l, 1)
950 do
k = 1,ke_list(l,tMe)
952 fieldx(
i,
j,
k) = fieldx(
i, 2*
js-
j-1,
k)
963 je = domain%y(1)%data%
end 965 select
case(gridtype)
970 ptr_fieldx = f_addrsx(l, 1)
971 ptr_fieldy = f_addrsy(l, 1)
972 do
k = 1,ke_list(l,tMe)
974 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
975 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
981 ptr_fieldx = f_addrsx(l, 1)
982 do
k = 1,ke_list(l,tMe)
984 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
991 else
if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(flags,SCALAR_BIT)) )then ! ---eastern boundary fold
992 ! NOTE: symmetry
is assumed for fold-
west boundary
994 if( domain%x(1)%data%
begin.LE.
i .AND.
i.LE.domain%x(1)%data%
end+shift )then !fold
is within domain
996 !poles
set to 0: BGRID only
997 if( gridtype.EQ.BGRID_NE )then
1000 do
j =
js ,
je, midpoint
1001 if( domain%y(1)%data%
begin.LE.
j .AND.
j.LE. domain%y(1)%data%
end+shift )then
1003 ptr_fieldx = f_addrsx(l, 1)
1004 ptr_fieldy = f_addrsy(l, 1)
1005 do
k = 1,ke_list(l,tMe)
1014 ! the following code code block correct an
error where the data in your halo coming from
1015 ! other
half may have the wrong sign
1019 select
case(gridtype)
1024 if( 2*
js-domain%y(1)%data%
begin.GT.domain%y(1)%data%
end+shift ) &
1027 ptr_fieldx = f_addrsx(l, 1)
1028 ptr_fieldy = f_addrsy(l, 1)
1029 do
k = 1,ke_list(l,tMe)
1040 if( 2*
js-domain%y(1)%data%
begin-1.GT.domain%y(1)%data%
end ) &
1043 ptr_fieldx = f_addrsx(l, 1)
1044 do
k = 1,ke_list(l,tMe)
1046 fieldx(
i,
j,
k) = fieldx(
i, 2*
js-
j-1,
k)
1056 if(domain%y(1)%
cyclic .AND.
js.LT.domain%y(1)%data%
end )then
1057 je = domain%y(1)%data%
end 1059 select
case(gridtype)
1064 ptr_fieldx = f_addrsx(l, 1)
1065 ptr_fieldy = f_addrsy(l, 1)
1066 do
k = 1,ke_list(l,tMe)
1068 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
1069 fieldy(
i,
j,
k) = -fieldy(
i,
j,
k)
1075 ptr_fieldx = f_addrsx(l, 1)
1076 do
k = 1,ke_list(l,tMe)
1078 fieldx(
i,
j,
k) = -fieldx(
i,
j,
k)
1092 nsend = update_x%nsend+update_y%nsend
1107 end subroutine MPP_COMPLETE_DO_UPDATE_3D_V_
integer mpp_domains_stack_hwm
real(fp), parameter, public half
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
subroutine, public copy(self, rhs)
integer nonblock_buffer_pos
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer(long), parameter true
type(field_mgr_type), dimension(max_fields), private fields
integer(long), parameter false
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
l_size ! loop over number of fields ke do j
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
l_size ! loop over number of fields ke do je do ie to to_pe
character(len=128) version
integer recv_clock_nonblock
integer send_pack_clock_nonblock
l_size ! loop over number of fields ke do je do ie to is
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, parameter, public global
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public east
integer mpp_domains_stack_size
logical function received(this, seqno)
integer, dimension(:), allocatable request_recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer wait_clock_nonblock
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
type(nonblock_type), dimension(:), allocatable nonblock_data
integer, parameter, public cyclic
integer, dimension(:), allocatable size_recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call MPI_TYPE_
l_size ! loop over number of fields ke do je do ie pos
integer, dimension(:), allocatable request_send
integer, parameter, public south
l_size ! loop over number of fields ke do je do ie to js
integer unpk_clock_nonblock
integer, dimension(:), allocatable type_recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin