1 !***********************************************************************
2 !* GNU Lesser General Public License
4 !* This file
is part of the GFDL Flexible Modeling System (FMS).
6 !* FMS
is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either
version 3 of the License, or (at
9 !* your option) any later
version.
11 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 !* You should have
received a copy of the GNU Lesser General Public
17 !* License along with FMS. If
not, see <http:
18 !***********************************************************************
20 !#####################################################################
21 subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index,
name)
22 type(domainUG), intent(inout) :: UG_domain
23 type(domain2d), target, intent(in) :: SG_domain
24 integer, intent(in) :: npts_tile(:) ! number of unstructured points on each
tile 25 integer, intent(in) :: grid_nlev(:) ! number of levels in each unstructured grid.
27 integer, intent(in) :: npes_io_group ! number of processors in
a io group. Only
pe with same tile_id
29 integer, intent(in) :: grid_index(:)
30 character(
len=*), optional, intent(in) ::
name 33 integer ::
ntiles, ntotal_pts, ndivs_used, max_npts, cur_tile, cur_npts
34 integer ::
n, ts, te,
p,
pos, tile_id, ngroup, group_id, my_pos,
i 35 integer :: npes_in_group,
is,
ie, ntotal_costs, max_cost, cur_cost, costs_left
36 integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
40 UG_domain%SG_domain => SG_domain
44 !--- total number of points must be
no less than ndivs
45 if(sum(npts_tile)<ndivs) call
mpp_error(FATAL,
"mpp_define_unstruct_domain: total number of points is less than ndivs")
46 !--- We are assuming
nlev on each grid
is at least
one.
47 do
n = 1,
size(grid_nlev(:))
48 if(grid_nlev(
n) < 1) call
mpp_error(FATAL,
"mpp_define_unstruct_domain: grid_nlev at some point is less than 1")
51 !-- costs for each
tile.
55 do
i = 1, npts_tile(
n)
57 costs(
n) = costs(
n) + grid_nlev(
pos)
60 ! compute number of divisions for each
tile.
61 ntotal_costs = sum(costs)
62 !--- get the
upper limit of ndivs for each
tile.
64 ndivs_tile(
n) = CEILING(real(costs(
n)*ndivs)/ntotal_costs)
67 ndivs_used = sum(ndivs_tile)
68 do while (ndivs_used > ndivs)
72 if( ndivs_tile(
n) > 1 ) then
73 cur_cost = CEILING(real(costs(
n))/(ndivs_tile(
n)-1))
74 if( max_cost == 0 .OR. cur_cost<max_cost) then
80 ndivs_used = ndivs_used-1
81 ndivs_tile(cur_tile) = ndivs_tile(cur_tile) - 1
88 te = ts + ndivs_tile(
n) - 1
90 ndiv_left = ndivs_tile(
n)
91 npts_left = npts_tile(
n)
93 do ndiv = 1, ndivs_tile(
n)
95 ibegin(ts+ndiv-1) = cur_pos
96 avg_cost = real(costs_left)/ndiv_left
97 do
i = cur_pos, npts_tile(
n)
98 cur_cost = cur_cost + grid_nlev(
i+ioff)
99 costs_left = costs_left - grid_nlev(
i+ioff)
100 if(npts_left < ndiv_left ) then
101 call
mpp_error(FATAL,
"mpp_define_unstruct_domain: npts_left < ndiv_left")
102 else
if(npts_left == ndiv_left ) then
105 else
if(cur_cost .GE. avg_cost) then
106 prev_cost = cur_cost - grid_nlev(
i+ioff)
110 else
if( cur_cost - avg_cost .LE. avg_cost - prev_cost ) then
116 costs_left = costs_left + grid_nlev(
i+ioff)
117 npts_left = npts_left+1
121 npts_left = npts_left-1
123 iend(ts+ndiv-1) = cur_pos - 1
124 costs_list(ts+ndiv-1) = cur_cost
125 ndiv_left = ndiv_left-1
126 npts_left = npts_left-1
130 ioff = ioff+ npts_tile(
n)
132 allocate(UG_domain%list(0:ndivs-1))
134 UG_domain%list(
p)%compute%
begin = ibegin(
p)
135 UG_domain%list(
p)%compute%
end = iend(
p)
136 UG_domain%list(
p)%compute%
size = UG_domain%list(
p)%compute%
end - UG_domain%list(
p)%compute%
begin + 1
137 UG_domain%list(
p)%compute%max_size = 0
138 UG_domain%list(
p)%
pos =
p 139 UG_domain%list(
p)%
pe =
p + mpp_root_pe()
142 if(
p .GE. pe_start(
n) .AND.
p .LE. pe_end(
n) ) then
143 UG_domain%list(
p)%tile_id =
n 150 UG_domain%list(
p)%compute%begin_index = minval(grid_index(
is:
ie))
151 UG_domain%list(
p)%compute%end_index = maxval(grid_index(
is:
ie))
154 !--- write
out domain decomposition from
root pe 155 if(mpp_pe() == mpp_root_pe() .and. present(
name)) then
156 write(stdout(),*)
"unstruct domain name = ", trim(
name)
157 write(stdout(),*) UG_domain%list(:)%compute%
size 160 pos = mpp_pe() - mpp_root_pe()
161 UG_domain%
pe = mpp_pe()
163 UG_domain%tile_id = UG_domain%list(
pos)%tile_id
164 p = pe_start(UG_domain%tile_id)
165 UG_domain%tile_root_pe = UG_domain%list(
p)%
pe 166 UG_domain%tile_npes = pe_end(UG_domain%tile_id) - pe_start(UG_domain%tile_id) + 1
167 UG_domain%compute = UG_domain%list(
pos)%compute
168 UG_domain%compute%max_size = MAXVAL( UG_domain%list(:)%compute%
size )
170 UG_domain%
global%
end = npts_tile(UG_domain%tile_id)
172 UG_domain%
global%max_size = -1 ! currently this
is not supposed to be used.
174 do
n = 1, UG_domain%tile_id-1
177 UG_domain%
global%begin_index = grid_index(
pos+1)
178 UG_domain%
global%end_index = grid_index(
pos+npts_tile(
n))
180 allocate(UG_domain%grid_index(UG_domain%compute%
size))
181 do
n = 1, UG_domain%compute%
size 182 UG_domain%grid_index(
n) = grid_index(
pos+UG_domain%compute%
begin+
n-1)
185 !--- define io_domain
186 allocate(UG_domain%io_domain)
187 tile_id = UG_domain%tile_id
188 UG_domain%io_domain%
pe = UG_domain%
pe 189 !--- figure
out number groups for current
tile 190 if(npes_io_group == 0) then
193 ngroup = CEILING(real(ndivs_tile(tile_id))/ npes_io_group)
198 UG_domain%npes_io_group = npes_io_group
202 call mpp_compute_extent(1, ndivs_tile(tile_id), ngroup, ibegin(0:ngroup-1), iend(0:ngroup-1))
203 my_pos = UG_domain%
pe - UG_domain%tile_root_pe + 1
205 if( my_pos .GE. ibegin(
n) .AND. my_pos .LE. iend(
n) ) then
211 UG_domain%io_domain%tile_id = group_id+1
212 UG_domain%io_domain%compute = UG_domain%compute
213 UG_domain%io_domain%
pe = UG_domain%
pe 214 UG_domain%io_domain%
pos = my_pos - ibegin(group_id) + 1
215 UG_domain%io_domain%tile_root_pe = ibegin(group_id) + UG_domain%tile_root_pe - 1
216 pos = UG_domain%io_domain%tile_root_pe - mpp_root_pe()
218 UG_domain%io_domain%
global%begin_index = UG_domain%list(
pos)%compute%begin_index
219 pos = iend(group_id) + UG_domain%tile_root_pe - mpp_root_pe() - 1
221 UG_domain%io_domain%
global%end_index = UG_domain%list(
pos)%compute%end_index
224 npes_in_group = iend(group_id) - ibegin(group_id) + 1
225 allocate(UG_domain%io_domain%list(0:npes_in_group-1))
226 do
n = 0, npes_in_group-1
227 pos = UG_domain%io_domain%tile_root_pe - mpp_root_pe() +
n 228 UG_domain%io_domain%list(
n)%compute = UG_domain%list(
pos)%compute
229 UG_domain%io_domain%list(
n)%
pos =
n 230 UG_domain%io_domain%list(
n)%
pe = UG_domain%list(
pos)%
pe 231 UG_domain%io_domain%list(
n)%tile_id = group_id+1
234 call compute_overlap_SG2UG(UG_domain, SG_domain)
235 call compute_overlap_UG2SG(UG_domain)
239 end subroutine mpp_define_unstruct_domain
242 !####################################################################
243 subroutine compute_overlap_SG2UG(UG_domain, SG_domain)
244 type(domainUG), intent(inout) :: UG_domain
245 type(domain2d), intent(in) :: SG_domain
250 integer :: tile_id, nlist, nxg, begin_index, end_index,
i,
j 251 integer ::
m,
n, list, l,
isc,
iec,
jsc,
jec, ibegin, iend, grid_index
252 integer :: nrecv, nsend, send_pos, recv_pos,
pos 255 tile_id = UG_domain%tile_id
256 nlist =
size(SG_domain%list(:))
258 begin_index = UG_domain%compute%begin_index
259 end_index = UG_domain%compute%end_index
262 allocate(index_list(UG_domain%compute%
size))
263 allocate(send_buffer(UG_domain%compute%
size))
266 if(SG_domain%list(
n)%tile_id(1) .NE. tile_id) cycle
267 isc = SG_domain%list(
n)%x(1)%compute%
begin;
iec = SG_domain%list(
n)%x(1)%compute%
end 268 jsc = SG_domain%list(
n)%y(1)%compute%
begin;
jec = SG_domain%list(
n)%y(1)%compute%
end 269 ibegin = (
jsc-1)*nxg +
isc 271 if(ibegin > end_index .OR. iend < begin_index) cycle
272 do l = 1, UG_domain%compute%
size 273 grid_index = UG_domain%grid_index(l)
274 i = mod((grid_index-1), nxg) + 1
275 j = (grid_index-1)/nxg + 1
277 recv_cnt(
n) = recv_cnt(
n) + 1
280 'compute_overlap_SG2UG: pos > UG_domain%compute%size')
282 send_buffer(
pos) = grid_index
287 !--- make sure sum(recv_cnt) == UG_domain%compute%
size 288 if( UG_domain%compute%
size .NE. sum(recv_cnt) ) then
289 print*,"
pe=", mpp_pe(), UG_domain%compute%
size, sum(recv_cnt)
291 "compute_overlap_SG2UG: UG_domain%compute%
size .NE. sum(recv_cnt)")
293 allocate(buffer_pos(0:nlist-1))
296 buffer_pos(list) =
pos 297 pos =
pos + recv_cnt(list)
300 nrecv = count( recv_cnt > 0 )
301 UG_domain%SG2UG%nrecv = nrecv
302 allocate(UG_domain%SG2UG%
recv(nrecv))
306 m = mod( SG_domain%
pos+nlist-list, nlist )
307 if( recv_cnt(
m) > 0 ) then
309 UG_domain%SG2UG%
recv(nrecv)%count = recv_cnt(
m)
310 UG_domain%SG2UG%
recv(nrecv)%
pe = UG_domain%list(
m)%
pe 311 allocate(UG_domain%SG2UG%
recv(nrecv)%
i(recv_cnt(
m)))
313 do l = 1, recv_cnt(
m)
315 UG_domain%SG2UG%
recv(nrecv)%
i(l) = index_list(
pos)
323 call mpp_alltoall(send_cnt,1,recv_cnt,1)
324 !--- make sure sum(send_cnt) == UG_domain%compute%
size 325 if( UG_domain%compute%
size .NE. sum(send_cnt) ) call
mpp_error(FATAL, &
326 "compute_overlap_SG2UG: UG_domain%compute%
size .NE. sum(send_cnt)")
327 allocate(recv_buffer(sum(recv_cnt)))
328 send_buffer_pos = 0; recv_buffer_pos = 0
329 send_pos = 0; recv_pos = 0
331 if(send_cnt(
n) > 0) then
332 send_buffer_pos(
n) = send_pos
333 send_pos = send_pos + send_cnt(
n)
335 if(recv_cnt(
n) > 0) then
336 recv_buffer_pos(
n) = recv_pos
337 recv_pos = recv_pos + recv_cnt(
n)
341 call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
342 recv_buffer, recv_cnt, recv_buffer_pos)
344 nsend = count( recv_cnt(:) > 0 )
345 UG_domain%SG2UG%nsend = nsend
346 allocate(UG_domain%SG2UG%
send(nsend))
351 m = mod( SG_domain%
pos+list, nlist )
352 if( recv_cnt(
m) > 0 ) then
354 UG_domain%SG2UG%
send(nsend)%count = recv_cnt(
m)
355 UG_domain%SG2UG%
send(nsend)%
pe = UG_domain%list(
m)%
pe 356 allocate(UG_domain%SG2UG%
send(nsend)%
i(recv_cnt(
m)))
357 allocate(UG_domain%SG2UG%
send(nsend)%
j(recv_cnt(
m)))
358 pos = recv_buffer_pos(
m)
359 do l = 1, recv_cnt(
m)
360 grid_index = recv_buffer(
pos+l)
361 UG_domain%SG2UG%
send(nsend)%
i(l) = mod(grid_index-1,nxg) + 1
362 UG_domain%SG2UG%
send(nsend)%
j(l) = (grid_index-1)/nxg + 1
366 deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
370 end subroutine compute_overlap_SG2UG
372 !
#################################################################### 373 subroutine compute_overlap_UG2SG(UG_domain)
374 type(domainUG), intent(inout) :: UG_domain
377 UG_domain%UG2SG%nsend = UG_domain%SG2UG%nrecv
378 UG_domain%UG2SG%
send => UG_domain%SG2UG%
recv 379 UG_domain%UG2SG%nrecv = UG_domain%SG2UG%nsend
380 UG_domain%UG2SG%
recv => UG_domain%SG2UG%
send 384 end subroutine compute_overlap_UG2SG
386 !####################################################################
387 subroutine mpp_get_UG_SG_domain(UG_domain,SG_domain)
388 type(domainUG), intent(inout) :: UG_domain
389 type(domain2d), pointer :: SG_domain
391 SG_domain => UG_domain%SG_domain
395 end subroutine mpp_get_UG_SG_domain
397 !####################################################################
398 function mpp_get_UG_io_domain(domain)
399 type(domainUG), intent(in) :: domain
400 type(domainUG), pointer :: mpp_get_UG_io_domain
402 if(ASSOCIATED(domain%io_domain)) then
403 mpp_get_UG_io_domain => domain%io_domain
405 call
mpp_error(FATAL,
"mpp_get_UG_io_domain: io_domain is not defined, contact developer")
408 end function mpp_get_UG_io_domain
410 !#####################################################################
411 subroutine mpp_get_UG_compute_domain( domain,
begin,
end,
size)
412 type(domainUG), intent(in) :: domain
419 end subroutine mpp_get_UG_compute_domain
421 !#####################################################################
422 subroutine mpp_get_UG_global_domain( domain,
begin,
end,
size)
423 type(domainUG), intent(in) :: domain
430 end subroutine mpp_get_UG_global_domain
432 !#####################################################################
433 subroutine mpp_get_UG_compute_domains( domain,
begin,
end,
size )
434 type(domainUG), intent(in) :: domain
437 !we use shape instead of
size for
error checks because
size is used as an argument
439 if( any(shape(
begin).NE.shape(domain%list)) ) &
440 call
mpp_error( FATAL,
'mpp_get_UG_compute_domains: begin array size does not match domain.' )
443 if( PRESENT(
end) )then
444 if( any(shape(
end).NE.shape(domain%list)) ) &
445 call
mpp_error( FATAL,
'mpp_get_UG_compute_domains: end array size does not match domain.' )
446 end(:) = domain%list(:)%compute%
end 449 if( any(shape(
size).NE.shape(domain%list)) ) &
450 call
mpp_error( FATAL,
'mpp_get_UG_compute_domains: size array size does not match domain.' )
451 size(:) = domain%list(:)%compute%
size 454 end subroutine mpp_get_UG_compute_domains
456 !#####################################################################
457 subroutine mpp_get_UG_domains_index( domain,
begin,
end)
458 type(domainUG), intent(in) :: domain
461 !we use shape instead of
size for
error checks because
size is used as an argument
462 if( any(shape(
begin).NE.shape(domain%list)) ) &
463 call
mpp_error( FATAL,
'mpp_get_UG_compute_domains: begin array size does not match domain.' )
464 begin(:) = domain%list(:)%compute%begin_index
465 if( any(shape(
end).NE.shape(domain%list)) ) &
466 call
mpp_error( FATAL,
'mpp_get_UG_compute_domains: end array size does not match domain.' )
467 end(:) = domain%list(:)%compute%end_index
469 end subroutine mpp_get_UG_domains_index
471 !#####################################################################
472 function mpp_get_UG_domain_ntiles(domain)
473 type(domainUG), intent(in) :: domain
474 integer :: mpp_get_UG_domain_ntiles
476 mpp_get_UG_domain_ntiles = domain%
ntiles 478 end function mpp_get_UG_domain_ntiles
480 !#######################################################################
481 subroutine mpp_get_ug_domain_tile_list(domain, tiles)
482 type(domainUG), intent(in) :: domain
483 integer, intent(inout) :: tiles(:)
486 if(
size(tiles(:)).NE.
size(domain%list(:)) ) &
487 call
mpp_error( FATAL,
'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
488 do
i = 1,
size(tiles(:))
489 tiles(
i) = domain%list(
i-1)%tile_id
492 end subroutine mpp_get_ug_domain_tile_list
494 !#####################################################################
495 function mpp_get_UG_domain_tile_id(domain)
496 type(domainUG), intent(in) :: domain
497 integer :: mpp_get_UG_domain_tile_id
499 mpp_get_UG_domain_tile_id = domain%tile_id
501 end function mpp_get_UG_domain_tile_id
503 !####################################################################
504 function mpp_get_UG_domain_npes(domain)
505 type(domainUG), intent(in) :: domain
506 integer :: mpp_get_UG_domain_npes
508 mpp_get_UG_domain_npes =
size(domain%list(:))
511 end function mpp_get_UG_domain_npes
514 !####################################################################
515 subroutine mpp_get_UG_domain_pelist( domain,
pelist)
516 type(domainUG), intent(in) :: domain
520 call
mpp_error( FATAL,
'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
525 end subroutine mpp_get_UG_domain_pelist
527 !###################################################################
529 type(domainUG), intent(in) :: domain
538 call
mpp_error( FATAL,
'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
539 pelist(:) = domain%list(domain%
pos:domain%
pos+domain%tile_npes-1)%
pe 543 end subroutine mpp_get_UG_domain_tile_pe_inf
546 !####################################################################
547 subroutine mpp_get_UG_domain_grid_index( domain, grid_index)
548 type(domainUG), intent(in) :: domain
551 if(
size(grid_index(:)).NE.
size(domain%grid_index(:)) ) &
552 call
mpp_error( FATAL,
'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
554 grid_index(:) = domain%grid_index(:)
557 end subroutine mpp_get_UG_domain_grid_index
559 !###################################################################
560 subroutine mpp_define_null_UG_domain(domain)
561 type(domainUG), intent(inout) :: domain
564 domain%compute%
begin = -1; domain%compute%
end = -1; domain%compute%
size = 0
569 domain%tile_root_pe = -1
571 end subroutine mpp_define_null_UG_domain
573 !##############################################################################
574 subroutine mpp_broadcast_domain_ug( domain )
575 !broadcast domain (useful only outside the context of its own
pelist)
576 type(domainUG), intent(inout) :: domain
578 logical :: native !
true if I
'm on the pelist of this domain 579 integer :: listsize, listpos 581 integer, dimension(7) :: msg, info !pe and compute domain of each item in list 585 if( .NOT.module_is_initialized ) & 586 call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.
' ) 588 !get the current pelist 589 allocate( pes(0:mpp_npes()-1) ) 590 call mpp_get_current_pelist(pes) 592 !am I part of this domain? 593 native = ASSOCIATED(domain%list) 597 listsize = size(domain%list(:)) 601 call mpp_max(listsize) 603 if( .NOT.native )then 604 !initialize domain%list and set null values in message 605 allocate( domain%list(0:listsize-1) ) 609 domain%compute%begin = 1 610 domain%compute%end = -1 611 domain%compute%begin_index = 1 612 domain%compute%end_index = -1 613 domain%global %begin = -1 614 domain%global %end = -1 616 domain%tile_root_pe = -1 618 !initialize values in info 621 info(3) = domain%tile_id 622 call mpp_get_UG_compute_domain( domain, info(4), info(5)) 623 info(6) = domain%compute%begin_index 624 info(7) = domain%compute%end_index 625 !broadcast your info across current pelist and unpack if needed 627 do n = 0,mpp_npes()-1 629 if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg
', msg 630 call mpp_broadcast( msg, 7, pes(n) ) 631 !no need to unpack message if native 632 !no need to unpack message from non-native PE 633 if( .NOT.native .AND. msg(1).NE.NULL_PE )then 634 domain%list(listpos)%pe = msg(1) 635 domain%list(listpos)%pos = msg(2) 636 domain%list(listpos)%tile_id = msg(3) 637 domain%list(listpos)%compute%begin = msg(4) 638 domain%list(listpos)%compute%end = msg(5) 639 domain%list(listpos)%compute%begin_index = msg(6) 640 domain%list(listpos)%compute%end_index = msg(7) 641 listpos = listpos + 1 642 if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from
PE ', msg(1), 'ls,le=
', msg(4:5) 646 end subroutine mpp_broadcast_domain_ug 648 !------------------------------------------------------------------------------ 649 function mpp_domain_UG_is_tile_root_pe(domain) result(is_root) 652 type(domainUG),intent(in) :: domain 653 logical(INT_KIND) :: is_root 655 if (domain%pe .eq. domain%tile_root_pe) then 662 end function mpp_domain_UG_is_tile_root_pe 664 !------------------------------------------------------------------------------ 665 !HELP: There needs to be a subroutine to return the "io_layout" for 666 ! an unstructured domain, so I made one. Someone should check 667 ! to see if this is correct. 668 function mpp_get_io_domain_UG_layout(domain) result(io_layout) 671 type(domainUG),intent(in) :: domain 672 integer(INT_KIND) :: io_layout 674 io_layout = domain%io_layout 680 !------------------------------------------------------------------ 681 subroutine deallocate_unstruct_overlap_type(overlap) 682 type(unstruct_overlap_type), intent(inout) :: overlap 684 if(associated(overlap%i)) deallocate(overlap%i) 685 if(associated(overlap%j)) deallocate(overlap%j) 687 end subroutine deallocate_unstruct_overlap_type 689 !------------------------------------------------------------------ 690 subroutine deallocate_unstruct_pass_type(passobj) 691 type(unstruct_pass_type), intent(inout) :: passobj 694 do n = 1, passobj%nsend 695 call deallocate_unstruct_overlap_type(passobj%send(n)) 697 do n = 1, passobj%nrecv 698 call deallocate_unstruct_overlap_type(passobj%recv(n)) 701 if(associated(passobj%send)) deallocate(passobj%send) 702 if(associated(passobj%recv)) deallocate(passobj%recv) 704 end subroutine deallocate_unstruct_pass_type 706 !------------------------------------------------------------------ 707 subroutine mpp_deallocate_domainUG(domain) 710 type(domainUG),intent(inout) :: domain 713 integer(INT_KIND) :: i !<Loop variable. 715 if (associated(domain%list)) then 716 deallocate(domain%list) 717 domain%list => null() 720 if (associated(domain%io_domain)) then 721 if (associated(domain%io_domain%list)) then 722 deallocate(domain%io_domain%list) 723 domain%io_domain%list => null() 725 deallocate(domain%io_domain) 726 domain%io_domain => null() 729 call deallocate_unstruct_pass_type(domain%SG2UG) 730 call deallocate_unstruct_pass_type(domain%UG2SG) 732 if (associated(domain%grid_index)) then 733 deallocate(domain%grid_index) 734 domain%grid_index => null() 737 if (associated(domain%SG_domain)) then 738 domain%SG_domain => null() 742 end subroutine mpp_deallocate_domainUG 744 !################################################################### 745 !> Overload the .eq. for UG 746 function mpp_domainUG_eq( a, b ) 747 logical :: mpp_domainUG_eq 748 type(domainUG), intent(in) :: a, b 750 if (associated(a%SG_domain) .and. associated(b%SG_domain)) then 751 if (a%SG_domain .ne. b%SG_domain) then 752 mpp_domainUG_eq = .false. 755 elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then 756 mpp_domainUG_eq = .false. 758 elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then 759 mpp_domainUG_eq = .false. 763 mpp_domainUG_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. & 764 (a%pos .EQ. b%pos) .AND. & 765 (a%ntiles .EQ. b%ntiles) .AND. & 766 (a%tile_id .EQ. b%tile_id) .AND. & 767 (a%tile_npes .EQ. b%tile_npes) .AND. & 768 (a%tile_root_pe .EQ. b%tile_root_pe) 770 if(.not. mpp_domainUG_eq) return 772 mpp_domainUG_eq = ( a%compute%begin.EQ.b%compute%begin .AND. & 773 a%compute%end .EQ.b%compute%end .AND. & 774 a%global%begin .EQ.b%global%begin .AND. & 775 a%global%end .EQ.b%global%end .AND. & 776 a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. & 777 a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. & 778 a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. & 779 a%UG2SG%nrecv .EQ.b%UG2SG%nrecv & 783 end function mpp_domainUG_eq 785 !> Overload the .ne. for UG 786 function mpp_domainUG_ne( a, b ) 787 logical :: mpp_domainUG_ne 788 type(domainUG), intent(in) :: a, b 790 mpp_domainUG_ne = .NOT. ( a.EQ.b ) 792 end function mpp_domainUG_ne 795 #define MPP_TYPE_ real(DOUBLE_KIND) 796 #undef mpp_pass_SG_to_UG_2D_ 797 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r8_2d 798 #undef mpp_pass_SG_to_UG_3D_ 799 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r8_3d 800 #undef mpp_pass_UG_to_SG_2D_ 801 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r8_2d 802 #undef mpp_pass_UG_to_SG_3D_ 803 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r8_3d 804 #include <mpp_unstruct_pass_data.h> 808 #define MPP_TYPE_ real(FLOAT_KIND) 809 #undef mpp_pass_SG_to_UG_2D_ 810 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r4_2d 811 #undef mpp_pass_SG_to_UG_3D_ 812 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r4_3d 813 #undef mpp_pass_UG_to_SG_2D_ 814 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r4_2d 815 #undef mpp_pass_UG_to_SG_3D_ 816 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r4_3d 817 #include <mpp_unstruct_pass_data.h> 821 #define MPP_TYPE_ integer(INT_KIND) 822 #undef mpp_pass_SG_to_UG_2D_ 823 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_i4_2d 824 #undef mpp_pass_SG_to_UG_3D_ 825 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_i4_3d 826 #undef mpp_pass_UG_to_SG_2D_ 827 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_i4_2d 828 #undef mpp_pass_UG_to_SG_3D_ 829 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_i4_3d 830 #include <mpp_unstruct_pass_data.h> 833 #define MPP_TYPE_ logical(INT_KIND) 834 #undef mpp_pass_SG_to_UG_2D_ 835 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_l4_2d 836 #undef mpp_pass_SG_to_UG_3D_ 837 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_l4_3d 838 #undef mpp_pass_UG_to_SG_2D_ 839 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_l4_2d 840 #undef mpp_pass_UG_to_SG_3D_ 841 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_l4_3d 842 #include <mpp_unstruct_pass_data.h> 844 #undef MPP_GLOBAL_FIELD_UG_2D_ 845 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r8_2d 846 #undef MPP_GLOBAL_FIELD_UG_3D_ 847 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r8_3d 848 #undef MPP_GLOBAL_FIELD_UG_4D_ 849 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r8_4d 850 #undef MPP_GLOBAL_FIELD_UG_5D_ 851 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r8_5d 853 #define MPP_TYPE_ real(DOUBLE_KIND) 854 #include <mpp_global_field_ug.h> 856 #ifndef no_8byte_integers 857 #undef MPP_GLOBAL_FIELD_UG_2D_ 858 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i8_2d 859 #undef MPP_GLOBAL_FIELD_UG_3D_ 860 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i8_3d 861 #undef MPP_GLOBAL_FIELD_UG_4D_ 862 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i8_4d 863 #undef MPP_GLOBAL_FIELD_UG_5D_ 864 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i8_5d 866 #define MPP_TYPE_ integer(LONG_KIND) 867 #include <mpp_global_field_ug.h> 871 #undef MPP_GLOBAL_FIELD_UG_2D_ 872 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r4_2d 873 #undef MPP_GLOBAL_FIELD_UG_3D_ 874 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r4_3d 875 #undef MPP_GLOBAL_FIELD_UG_4D_ 876 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r4_4d 877 #undef MPP_GLOBAL_FIELD_UG_5D_ 878 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r4_5d 880 #define MPP_TYPE_ real(FLOAT_KIND) 881 #include <mpp_global_field_ug.h> 884 #undef MPP_GLOBAL_FIELD_UG_2D_ 885 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i4_2d 886 #undef MPP_GLOBAL_FIELD_UG_3D_ 887 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i4_3d 888 #undef MPP_GLOBAL_FIELD_UG_4D_ 889 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i4_4d 890 #undef MPP_GLOBAL_FIELD_UG_5D_ 891 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i4_5d 893 #define MPP_TYPE_ integer(INT_KIND) 894 #include <mpp_global_field_ug.h> ************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
l_size ! loop over number of fields ke do je do ie to PE
integer, save, private iec
integer, parameter, public no
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer(long), parameter true
real(r8), dimension(cast_m, cast_n) p
l_size ! loop over number of fields ke do j
subroutine upper(string, length)
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
integer, parameter, public global
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
real(fvprc) function, dimension(size(a, 1), size(a, 2)) reverse(A)
real(double), parameter one
integer, dimension(:), pointer io_layout
logical function received(this, seqno)
integer, save, private isc
type(field_def), target, save root
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, save, private jsc
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> dimension(MAX_DOMAIN_FIELDS)
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, save, private jec
l_size ! loop over number of fields ke do je do ie pos
integer nlev
No description.
integer, parameter, public information
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin