1 !***********************************************************************
2 !* GNU Lesser General Public License
4 !* This file
is part of the GFDL Flexible Modeling System (FMS).
6 !* FMS
is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either
version 3 of the License, or (at
9 !* your option) any later
version.
11 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 !* You should have
received a copy of the GNU Lesser General Public
17 !* License along with FMS. If
not, see <http:
18 !***********************************************************************
19 function MPP_START_UPDATE_DOMAINS_2D_(
field, domain, flags, position, &
20 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete)
21 type(domain2D), intent(inout) :: domain
23 integer, intent(in), optional :: flags
24 integer, intent(in), optional :: position
25 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
26 character(
len=*), intent(in), optional ::
name 27 integer, intent(in), optional :: tile_count
28 integer, intent(in), optional :: update_id
29 logical, intent(in), optional :: complete
30 integer :: MPP_START_UPDATE_DOMAINS_2D_
33 pointer( ptr, field3D )
36 MPP_START_UPDATE_DOMAINS_2D_ = mpp_start_update_domains(field3D, domain, flags, position, &
37 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete)
40 end function MPP_START_UPDATE_DOMAINS_2D_
42 function MPP_START_UPDATE_DOMAINS_3D_(
field, domain, flags, position, &
43 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
45 type(domain2D), intent(inout) :: domain
47 integer, intent(in), optional :: flags
48 integer, intent(in), optional :: position
49 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
50 character(
len=*), intent(in), optional ::
name 51 integer, intent(in), optional :: tile_count
52 integer, intent(in), optional :: update_id
53 logical, intent(in), optional :: complete
54 integer :: MPP_START_UPDATE_DOMAINS_3D_
58 integer :: update_whalo, update_ehalo, update_shalo, update_nhalo,
update_flags, update_position
60 logical :: set_mismatch, is_complete
61 logical :: do_update, reuse_id_update
63 integer, save ::
pos, whalosz, ehalosz, shalosz, nhalosz, update_flags_saved
65 integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
67 type(overlapSpec), pointer :: update =>
NULL()
73 if(present(whalo)) then
75 if(abs(update_whalo) > domain%whalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D: " 76 "optional argument whalo should not be larger than the whalo when define domain.")
78 update_whalo = domain%whalo
80 if(present(ehalo)) then
82 if(abs(update_ehalo) > domain%ehalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D: " 83 "optional argument ehalo should not be larger than the ehalo when define domain.")
85 update_ehalo = domain%ehalo
87 if(present(shalo)) then
89 if(abs(update_shalo) > domain%shalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D: " 90 "optional argument shalo should not be larger than the shalo when define domain.")
92 update_shalo = domain%shalo
94 if(present(nhalo)) then
96 if(abs(update_nhalo) > domain%nhalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D: " 97 "optional argument nhalo should not be larger than the nhalo when define domain.")
99 update_nhalo = domain%nhalo
105 update_position = CENTER
106 if(present(position)) then
107 !--- when there
is NINETY or MINUS_NINETY rotation for
some contact, the salar data can
not be on E or N-cell,
108 if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
109 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_3D: hen there is NINETY or MINUS_NINETY rotation, ' 110 'can not use scalar version update_domain for data on E or N-cell' )
111 update_position = position
114 max_ntile = domain%max_ntile_pe
115 ntile =
size(domain%x(:))
117 if(PRESENT(complete)) then
118 is_complete = complete
123 if(ntile>MAX_TILES) then
124 write(
text,
'(i2)' ) MAX_TILES
125 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_3D: MAX_TILES=' 127 if(.NOT. present(tile_count) ) call
mpp_error(FATAL,
"MPP_UPDATE_3D: " 128 "optional argument tile_count should be present when number of tiles on this pe is more than 1")
132 do_update = (
tile == ntile) .AND. is_complete
135 if(list > MAX_DOMAIN_FIELDS)then
136 write(
text,
'(i2)' ) MAX_DOMAIN_FIELDS
137 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS: MAX_DOMAIN_FIELDS=' 142 !make sure the
field is not called mpp_start_update_domains. Currently we only
check the address at
tile = 1.
147 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_3D is called again before calling ' 148 'mpp_complte_UPDATE_DOMAINS_3D for field ' 154 if(list == 1 .AND.
tile == 1 )then
156 whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
159 set_mismatch = .false.
162 set_mismatch = set_mismatch .OR. (update_position /=
pos)
163 set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
164 set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
165 set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
166 set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
167 set_mismatch = set_mismatch .OR. (update_flags_saved /=
update_flags)
169 write(
text,
'(i2)' ) list
170 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: Incompatible
field at count '
181 " can
not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
184 if( PRESENT(update_id) ) then
185 if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
186 write(
text,'(
a,i8,
a,i8)' ) 'optional argument update_id =', update_id, &
187 '
is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
188 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '
190 current_id = update_id
191 reuse_id_update = .
true.
192 !--- when reuse the update_id, make sure update_flag, halo
size and update_position are still the same
194 nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
195 nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
196 nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
197 nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
198 nonblock_data(current_id)%update_position .NE. update_position ) then
199 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: mismatch for optional argument for
field '
202 reuse_id_update = .
false.
206 ' greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
207 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS: '
219 nonblock_data(current_id)%field_addrs(1:l_size) = f_addrs(1:l_size,1)
220 MPP_START_UPDATE_DOMAINS_3D_ = current_id
222 ke_max = maxval(ke_list(1:l_size,1:ntile))
223 if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
224 update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
225 call mpp_start_do_update(current_id, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
228 l_size=0; f_addrs=-9999;
isize=0;
jsize=0; ke_list=0
230 if(present(update_id)) then
231 MPP_START_UPDATE_DOMAINS_3D_ = update_id
233 MPP_START_UPDATE_DOMAINS_3D_ = 0
238 end function MPP_START_UPDATE_DOMAINS_3D_
240 !
########################################################################################## 241 function MPP_START_UPDATE_DOMAINS_4D_(
field, domain, flags, position, &
242 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
243 type(domain2D), intent(inout) :: domain
245 integer, intent(in), optional :: flags
246 integer, intent(in), optional :: position
247 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
248 character(
len=*), intent(in), optional ::
name 249 integer, intent(in), optional :: tile_count
250 integer, intent(in), optional :: update_id
251 logical, intent(in), optional :: complete
252 integer :: MPP_START_UPDATE_DOMAINS_4D_
255 pointer( ptr, field3D )
258 MPP_START_UPDATE_DOMAINS_4D_ = mpp_start_update_domains(field3D, domain, flags, position, &
259 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete)
262 end function MPP_START_UPDATE_DOMAINS_4D_
264 !##########################################################################################
265 function MPP_START_UPDATE_DOMAINS_5D_(
field, domain, flags, position, &
266 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete)
267 type(domain2D), intent(inout) :: domain
269 integer, intent(in), optional :: flags
270 integer, intent(in), optional :: position
271 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
272 character(
len=*), intent(in), optional ::
name 273 integer, intent(in), optional :: tile_count
274 integer, intent(in), optional :: update_id
275 logical, intent(in), optional :: complete
276 integer :: MPP_START_UPDATE_DOMAINS_5D_
279 pointer( ptr, field3D )
282 MPP_START_UPDATE_DOMAINS_5D_ = mpp_start_update_domains(field3D, domain, flags, position, &
283 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
286 end function MPP_START_UPDATE_DOMAINS_5D_
288 !##################################################################################
289 subroutine MPP_COMPLETE_UPDATE_DOMAINS_2D_( id_update,
field, domain, flags, position, &
290 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
291 integer, intent(in) :: id_update
292 type(domain2D), intent(inout) :: domain
294 integer, intent(in), optional :: flags
295 integer, intent(in), optional :: position
296 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
297 character(
len=*), intent(in), optional ::
name 298 integer, intent(in), optional :: tile_count
299 logical, intent(in), optional :: complete
302 pointer( ptr, field3D )
304 call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
305 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
307 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_2D_
309 !##################################################################################
310 subroutine MPP_COMPLETE_UPDATE_DOMAINS_3D_( id_update,
field, domain, flags, position, &
311 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
312 integer, intent(in) :: id_update
313 type(domain2D), intent(inout) :: domain
315 integer, intent(in), optional :: flags
316 integer, intent(in), optional :: position
317 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
318 character(
len=*), intent(in), optional ::
name 319 integer, intent(in), optional :: tile_count
320 logical, intent(in), optional :: complete
323 integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
325 type(overlapSpec), pointer :: update =>
NULL()
327 logical :: is_complete
330 integer, save :: list=0, l_size=0
331 integer, save :: ke_list(MAX_DOMAIN_FIELDS, MAX_TILES)=0
336 if(present(whalo)) then
338 if(abs(update_whalo) > domain%whalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 339 "optional argument whalo should not be larger than the whalo when define domain.")
341 update_whalo = domain%whalo
343 if(present(ehalo)) then
345 if(abs(update_ehalo) > domain%ehalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 346 "optional argument ehalo should not be larger than the ehalo when define domain.")
348 update_ehalo = domain%ehalo
350 if(present(shalo)) then
352 if(abs(update_shalo) > domain%shalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 353 "optional argument shalo should not be larger than the shalo when define domain.")
355 update_shalo = domain%shalo
357 if(present(nhalo)) then
359 if(abs(update_nhalo) > domain%nhalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 360 "optional argument nhalo should not be larger than the nhalo when define domain.")
362 update_nhalo = domain%nhalo
365 update_position = CENTER
366 if(present(position)) update_position = position
370 max_ntile = domain%max_ntile_pe
371 ntile =
size(domain%x(:))
373 if(PRESENT(complete)) then
374 is_complete = complete
379 if(ntile>MAX_TILES) then
380 write(
text,
'(i2)' ) MAX_TILES
381 call
mpp_error(FATAL,
'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_TILES=' 383 if(.NOT. present(tile_count) ) call
mpp_error(FATAL,
"MPP_UPDATE_3D: " 384 "optional argument tile_count should be present when number of tiles on this pe is more than 1")
387 do_update = (
tile == ntile) .AND. is_complete
389 if(list > MAX_DOMAIN_FIELDS)then
390 write(
text,
'(i2)' ) MAX_DOMAIN_FIELDS
391 call
mpp_error(FATAL,
'MPP_COMPLETE_UPDATE_DOMAINS_3D: MAX_DOMAIN_FIELDS=' 394 !-- make sure the f_addrs match the
one at mpp_start_update_domains
397 call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 398 "mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
404 !
check to make sure the consistency of halo
size, position and flags.
406 "mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
407 if(
nonblock_data(id_update)%update_whalo .NE. update_whalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 408 "mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
409 if(
nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 410 "mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
411 if(
nonblock_data(id_update)%update_shalo .NE. update_shalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 412 "mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
413 if(
nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 414 "mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
415 if(
nonblock_data(id_update)%update_position .NE. update_position ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D: " 416 "mismatch of optional argument position between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
425 "mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
427 if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) ) then
428 update => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, update_position)
429 ke_max = maxval(ke_list(1:l_size,1:ntile))
430 call mpp_complete_do_update(id_update, f_addrs(1:l_size,1:ntile), domain, update, d_type, &
435 l_size=0; f_addrs=-9999; ke_list=0
436 !--- For the
last call of mpp_complete_update_domains
447 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_3D_
449 !##################################################################################
450 subroutine MPP_COMPLETE_UPDATE_DOMAINS_4D_( id_update,
field, domain, flags, position, &
451 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
452 integer, intent(in) :: id_update
453 type(domain2D), intent(inout) :: domain
455 integer, intent(in), optional :: flags
456 integer, intent(in), optional :: position
457 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
458 character(
len=*), intent(in), optional ::
name 459 integer, intent(in), optional :: tile_count
460 logical, intent(in), optional :: complete
463 pointer( ptr, field3D )
465 call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
466 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
468 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_4D_
470 !##################################################################################
471 subroutine MPP_COMPLETE_UPDATE_DOMAINS_5D_( id_update,
field, domain, flags, position, &
472 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
473 integer, intent(in) :: id_update
474 type(domain2D), intent(inout) :: domain
476 integer, intent(in), optional :: flags
477 integer, intent(in), optional :: position
478 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
479 character(
len=*), intent(in), optional ::
name 480 integer, intent(in), optional :: tile_count
481 logical, intent(in), optional :: complete
484 pointer( ptr, field3D )
486 call mpp_complete_update_domains(id_update, field3D, domain, flags, position, &
487 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
489 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_5D_
492 function MPP_START_UPDATE_DOMAINS_2D_V_( fieldx, fieldy, domain, flags, gridtype, &
493 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
494 !updates data domain of 3D
field whose computational domains have been computed
495 MPP_TYPE_, intent(inout) :: fieldx(:,:), fieldy(:,:)
496 type(domain2D), intent(inout) :: domain
497 integer, intent(in), optional :: flags, gridtype
498 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
499 character(
len=*), intent(in), optional ::
name 500 integer, intent(in), optional :: tile_count
501 integer, intent(in), optional :: update_id
502 logical, intent(in), optional :: complete
503 integer :: MPP_START_UPDATE_DOMAINS_2D_V_
506 pointer( ptrx, field3Dx )
507 pointer( ptry, field3Dy )
511 MPP_START_UPDATE_DOMAINS_2D_V_ = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
512 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
516 end function MPP_START_UPDATE_DOMAINS_2D_V_
518 !###################################################################################
519 function MPP_START_UPDATE_DOMAINS_3D_V_( fieldx, fieldy, domain, flags, gridtype, &
520 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
521 !updates data domain of 3D
field whose computational domains have been computed
522 MPP_TYPE_, intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
523 type(domain2D), intent(inout) :: domain
524 integer, intent(in), optional :: flags, gridtype
525 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
526 character(
len=*), intent(in), optional ::
name 527 integer, intent(in), optional :: tile_count
528 integer, intent(in), optional :: update_id
529 logical, intent(in), optional :: complete
531 integer :: MPP_START_UPDATE_DOMAINS_3D_V_
532 integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
534 logical :: do_update, is_complete, set_mismatch
536 logical :: exchange_uv, reuse_id_update
538 integer, save :: whalosz, ehalosz, shalosz, nhalosz
540 integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
543 type(overlapSpec), pointer :: updatex =>
NULL()
544 type(overlapSpec), pointer :: updatey =>
NULL()
550 if(present(whalo)) then
552 if(abs(update_whalo) > domain%whalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 553 "optional argument whalo should not be larger than the whalo when define domain.")
555 update_whalo = domain%whalo
557 if(present(ehalo)) then
559 if(abs(update_ehalo) > domain%ehalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 560 "optional argument ehalo should not be larger than the ehalo when define domain.")
562 update_ehalo = domain%ehalo
564 if(present(shalo)) then
566 if(abs(update_shalo) > domain%shalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 567 "optional argument shalo should not be larger than the shalo when define domain.")
569 update_shalo = domain%shalo
571 if(present(nhalo)) then
573 if(abs(update_nhalo) > domain%nhalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 574 "optional argument nhalo should not be larger than the nhalo when define domain.")
576 update_nhalo = domain%nhalo
579 grid_offset_type = AGRID
580 if( PRESENT(gridtype) ) grid_offset_type = gridtype
583 if( PRESENT(flags) ) then
585 ! The following
test is so that SCALAR_PAIR can be used alone with the
586 ! same default update pattern as without.
594 if( BTEST(
update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(grid_offset_type,SOUTH) ) &
595 call
mpp_error( FATAL,
'MPP_START_UPDATE_DOMAINS_V: Incompatible grid offset and fold.' )
597 max_ntile = domain%max_ntile_pe
598 ntile =
size(domain%x(:))
601 if(PRESENT(complete)) then
602 is_complete = complete
607 if(ntile>MAX_TILES) then
608 write(
text,
'(i2)' ) MAX_TILES
609 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_V: MAX_TILES=' 611 if(.NOT. present(tile_count) ) call
mpp_error(FATAL,
"MPP_UPDATE_3D_V: " 612 "optional argument tile_count should be present when number of tiles on some pe is more than 1")
616 do_update = (
tile == ntile) .AND. is_complete
618 if(list > MAX_DOMAIN_FIELDS)then
619 write(
text,
'(i2)' ) MAX_DOMAIN_FIELDS
620 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_V: MAX_DOMAIN_FIELDS=' 623 f_addrsx(list,
tile) = LOC(fieldx)
624 f_addrsy(list,
tile) = LOC(fieldy)
631 call
mpp_error(FATAL,
'MPP_START_UPDATE_DOMAINS_V is called again before calling ' 632 'mpp_complte_UPDATE_DOMAINS_V for field ' 638 ke_list(list,
tile) =
size(fieldx,3)
640 if(list == 1 .AND.
tile == 1)then
643 offset_type = grid_offset_type
644 whalosz = update_whalo; ehalosz = update_ehalo; shalosz = update_shalo; nhalosz = update_nhalo
646 set_mismatch = .
false.
647 set_mismatch = set_mismatch .OR. (
isize(1) /=
size(fieldx,1))
648 set_mismatch = set_mismatch .OR. (
jsize(1) /=
size(fieldx,2))
649 set_mismatch = set_mismatch .OR. (
isize(2) /=
size(fieldy,1))
650 set_mismatch = set_mismatch .OR. (
jsize(2) /=
size(fieldy,2))
651 set_mismatch = set_mismatch .OR. (grid_offset_type /= offset_type)
652 set_mismatch = set_mismatch .OR. (update_whalo /= whalosz)
653 set_mismatch = set_mismatch .OR. (update_ehalo /= ehalosz)
654 set_mismatch = set_mismatch .OR. (update_shalo /= shalosz)
655 set_mismatch = set_mismatch .OR. (update_nhalo /= nhalosz)
657 write(
text,'(i2)' ) list
658 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: Incompatible
field at count '
667 " can
not be called in the middle of mpp_start_group_update/mpp_complete_group_update call")
669 if( PRESENT(update_id) ) then
670 reuse_id_update = .
true.
671 if( update_id < 1 .OR. update_id > MAX_NONBLOCK_UPDATE ) then
672 write(
text,'(
a,i8,
a,i8)' ) 'optional argument update_id =', update_id, &
673 '
is less than 1 or greater than MAX_NONBLOCK_UPDATE =', MAX_NONBLOCK_UPDATE
674 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '
676 current_id = update_id
677 !--- when reuse the update_id, make sure update_flag, halo
size and update_position are still the same
679 nonblock_data(current_id)%update_whalo .NE. update_whalo .OR. &
680 nonblock_data(current_id)%update_ehalo .NE. update_ehalo .OR. &
681 nonblock_data(current_id)%update_shalo .NE. update_shalo .OR. &
682 nonblock_data(current_id)%update_nhalo .NE. update_nhalo .OR. &
683 nonblock_data(current_id)%update_gridtype .NE. grid_offset_type ) then
684 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: mismatch for optional argument for
field '
687 reuse_id_update = .
false.
692 call
mpp_error(FATAL,'MPP_START_UPDATE_DOMAINS_V: '
703 nonblock_data(current_id)%field_addrs(1:l_size) = f_addrsx(1:l_size,1)
704 nonblock_data(current_id)%field_addrs2(1:l_size) = f_addrsy(1:l_size,1)
705 MPP_START_UPDATE_DOMAINS_3D_V_ = current_id
706 if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
707 exchange_uv = .
false.
708 if(grid_offset_type == DGRID_NE) then
710 grid_offset_type = CGRID_NE
711 else
if( grid_offset_type == DGRID_SW ) then
713 grid_offset_type = CGRID_SW
716 select
case(grid_offset_type)
720 case (BGRID_NE, BGRID_SW)
723 case (CGRID_NE, CGRID_SW)
727 call
mpp_error(FATAL, "mpp_update_domains2D_nonblock.h: invalid value of grid_offset_type")
729 updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
730 updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
732 ke_max = maxval(ke_list(1:l_size,1:ntile))
734 call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
735 updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
738 call mpp_start_do_update(current_id, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
739 updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), grid_offset_type, &
743 l_size=0; f_addrsx=-9999; f_addrsy=-9999;
isize=0;
jsize=0; ke_list=0
745 if(present(update_id)) then
746 MPP_START_UPDATE_DOMAINS_3D_V_ = update_id
748 MPP_START_UPDATE_DOMAINS_3D_V_ = 0
754 end function MPP_START_UPDATE_DOMAINS_3D_V_
756 function MPP_START_UPDATE_DOMAINS_4D_V_( fieldx, fieldy, domain, flags, gridtype, &
757 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
758 !updates data domain of 3D
field whose computational domains have been computed
759 MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
760 type(domain2D), intent(inout) :: domain
761 integer, intent(in), optional :: flags, gridtype
762 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
763 character(
len=*), intent(in), optional ::
name 764 integer, intent(in), optional :: tile_count
765 integer, intent(in), optional :: update_id
766 logical, intent(in), optional :: complete
767 integer :: MPP_START_UPDATE_DOMAINS_4D_V_
770 pointer( ptrx, field3Dx )
771 pointer( ptry, field3Dy )
775 MPP_START_UPDATE_DOMAINS_4D_V_ = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
776 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
780 end function MPP_START_UPDATE_DOMAINS_4D_V_
782 function MPP_START_UPDATE_DOMAINS_5D_V_( fieldx, fieldy, domain, flags, gridtype, &
783 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
784 !updates data domain of 3D
field whose computational domains have been computed
785 MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
786 type(domain2D), intent(inout) :: domain
787 integer, intent(in), optional :: flags, gridtype
788 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
789 character(
len=*), intent(in), optional ::
name 790 integer, intent(in), optional :: tile_count
791 integer, intent(in), optional :: update_id
792 logical, intent(in), optional :: complete
793 integer :: MPP_START_UPDATE_DOMAINS_5D_V_
796 pointer( ptrx, field3Dx )
797 pointer( ptry, field3Dy )
801 MPP_START_UPDATE_DOMAINS_5D_V_ = mpp_start_update_domains(field3Dx, field3Dy, domain, flags, gridtype, &
802 whalo, ehalo, shalo, nhalo,
name, tile_count, update_id, complete )
806 end function MPP_START_UPDATE_DOMAINS_5D_V_
808 !
#################################################################################### 809 subroutine MPP_COMPLETE_UPDATE_DOMAINS_2D_V_( id_update, fieldx, fieldy, domain, flags, gridtype, &
810 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
811 !updates data domain of 3D
field whose computational domains have been computed
812 integer, intent(in) :: id_update
813 MPP_TYPE_, intent(inout) :: fieldx(:,:), fieldy(:,:)
814 type(domain2D), intent(inout) :: domain
815 integer, intent(in), optional :: flags, gridtype
816 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
817 character(
len=*), intent(in), optional ::
name 818 integer, intent(in), optional :: tile_count
819 logical, intent(in), optional :: complete
823 pointer( ptrx, field3Dx )
824 pointer( ptry, field3Dy )
828 call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
829 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
833 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_2D_V_
835 !####################################################################################
836 subroutine MPP_COMPLETE_UPDATE_DOMAINS_3D_V_( id_update, fieldx, fieldy, domain, flags, gridtype, &
837 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
838 !updates data domain of 3D
field whose computational domains have been computed
839 integer, intent(in) :: id_update
840 MPP_TYPE_, intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
841 type(domain2D), intent(inout) :: domain
842 integer, intent(in), optional :: flags, gridtype
843 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
844 character(
len=*), intent(in), optional ::
name 845 integer, intent(in), optional :: tile_count
846 logical, intent(in), optional :: complete
848 integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
850 logical :: do_update, is_complete
852 logical :: exchange_uv
854 integer, save :: l_size=0, list=0
855 integer, save :: ke_list (MAX_DOMAIN_FIELDS, MAX_TILES)=0
858 type(overlapSpec), pointer :: updatex =>
NULL()
859 type(overlapSpec), pointer :: updatey =>
NULL()
862 if(present(whalo)) then
864 if(abs(update_whalo) > domain%whalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 865 "optional argument whalo should not be larger than the whalo when define domain.")
867 update_whalo = domain%whalo
869 if(present(ehalo)) then
871 if(abs(update_ehalo) > domain%ehalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 872 "optional argument ehalo should not be larger than the ehalo when define domain.")
874 update_ehalo = domain%ehalo
876 if(present(shalo)) then
878 if(abs(update_shalo) > domain%shalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 879 "optional argument shalo should not be larger than the shalo when define domain.")
881 update_shalo = domain%shalo
883 if(present(nhalo)) then
885 if(abs(update_nhalo) > domain%nhalo ) call
mpp_error(FATAL,
"MPP_START_UPDATE_DOMAINS_3D_V: " 886 "optional argument nhalo should not be larger than the nhalo when define domain.")
888 update_nhalo = domain%nhalo
891 grid_offset_type = AGRID
892 if( PRESENT(gridtype) ) grid_offset_type = gridtype
895 if( PRESENT(flags) ) then
897 ! The following
test is so that SCALAR_PAIR can be used alone with the
898 ! same default update pattern as without.
906 !
check to make sure the consistency of halo
size, position and flags.
908 "mismatch of optional argument flag between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
909 if(
nonblock_data(id_update)%update_whalo .NE. update_whalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D_V: " 910 "mismatch of optional argument whalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
911 if(
nonblock_data(id_update)%update_ehalo .NE. update_ehalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D_V: " 912 "mismatch of optional argument ehalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
913 if(
nonblock_data(id_update)%update_shalo .NE. update_shalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D_V: " 914 "mismatch of optional argument shalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
915 if(
nonblock_data(id_update)%update_nhalo .NE. update_nhalo ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D_V: " 916 "mismatch of optional argument nhalo between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
917 if(
nonblock_data(id_update)%update_gridtype .NE. grid_offset_type ) call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_3D_V: " 918 "mismatch of optional argument gridtype between MPP_COMPLETE_UPDATE_DOMAINS and MPP_START_UPDATE_DOMAINS")
920 max_ntile = domain%max_ntile_pe
921 ntile =
size(domain%x(:))
924 if(PRESENT(complete)) then
925 is_complete = complete
930 if(ntile>MAX_TILES) then
931 write(
text,
'(i2)' ) MAX_TILES
932 call
mpp_error(FATAL,
'MPP_UPDATE_3D_V: MAX_TILES=' 934 if(.NOT. present(tile_count) ) call
mpp_error(FATAL,
"MPP_UPDATE_3D_V: " 935 "optional argument tile_count should be present when number of tiles on some pe is more than 1")
939 do_update = (
tile == ntile) .AND. is_complete
941 if(list > MAX_DOMAIN_FIELDS)then
942 write(
text,
'(i2)' ) MAX_DOMAIN_FIELDS
943 call
mpp_error(FATAL,
'MPP_UPDATE_3D_V: MAX_DOMAIN_FIELDS=' 946 f_addrsx(list,
tile) = LOC(fieldx)
947 f_addrsy(list,
tile) = LOC(fieldy)
948 !-- make sure the f_addrs match the
one at mpp_start_update_domains
952 call
mpp_error(FATAL,
"MPP_COMPLETE_UPDATE_DOMAINS_V: " 953 "mismatch of address between mpp_start_update_domains and mpp_complete_update_domains")
957 ke_list(list,
tile) =
size(fieldx,3)
965 "mismatch of number of fields between mpp_start_update_domains and mpp_complete_update_domains")
967 if( domain_update_is_needed(domain, update_whalo, update_ehalo, update_shalo, update_nhalo) )then
968 exchange_uv = .
false.
969 if(grid_offset_type == DGRID_NE) then
971 grid_offset_type = CGRID_NE
972 else
if( grid_offset_type == DGRID_SW ) then
974 grid_offset_type = CGRID_SW
977 select
case(grid_offset_type)
981 case (BGRID_NE, BGRID_SW)
984 case (CGRID_NE, CGRID_SW)
988 call
mpp_error(FATAL,
"mpp_update_domains2D.h: invalid value of grid_offset_type")
990 updatex => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_x)
991 updatey => search_update_overlap(domain, update_whalo, update_ehalo, update_shalo, update_nhalo, position_y)
993 ke_max = maxval(ke_list(1:l_size,1:ntile))
995 call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
996 updatey, updatex, d_type, ke_max, ke_list(1:l_size,1:ntile), &
999 call mpp_complete_do_update(id_update, f_addrsx(1:l_size,1:ntile), f_addrsy(1:l_size,1:ntile), domain, &
1000 updatex, updatey, d_type, ke_max, ke_list(1:l_size,1:ntile), &
1007 l_size=0; f_addrsx=-9999; f_addrsy=-9999; ke_list=0
1008 !--- For the
last call of mpp_complete_update_domains
1020 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_3D_V_
1022 !####################################################################################
1023 subroutine MPP_COMPLETE_UPDATE_DOMAINS_4D_V_( id_update, fieldx, fieldy, domain, flags, gridtype, &
1024 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
1025 !updates data domain of 3D
field whose computational domains have been computed
1026 integer, intent(in) :: id_update
1027 MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
1028 type(domain2D), intent(inout) :: domain
1029 integer, intent(in), optional :: flags, gridtype
1030 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
1031 character(
len=*), intent(in), optional ::
name 1032 integer, intent(in), optional :: tile_count
1033 logical, intent(in), optional :: complete
1037 pointer( ptrx, field3Dx )
1038 pointer( ptry, field3Dy )
1042 call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
1043 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
1047 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_4D_V_
1049 !####################################################################################
1050 subroutine MPP_COMPLETE_UPDATE_DOMAINS_5D_V_( id_update, fieldx, fieldy, domain, flags, gridtype, &
1051 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
1052 !updates data domain of 3D
field whose computational domains have been computed
1053 integer, intent(in) :: id_update
1054 MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
1055 type(domain2D), intent(inout) :: domain
1056 integer, intent(in), optional :: flags, gridtype
1057 integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
1058 character(
len=*), intent(in), optional ::
name 1059 integer, intent(in), optional :: tile_count
1060 logical, intent(in), optional :: complete
1064 pointer( ptrx, field3Dx )
1065 pointer( ptry, field3Dy )
1069 call mpp_complete_update_domains(id_update, field3Dx, field3Dy, domain, flags, gridtype, &
1070 whalo, ehalo, shalo, nhalo,
name, tile_count, complete )
1074 end subroutine MPP_COMPLETE_UPDATE_DOMAINS_5D_V_
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
type(ext_fieldtype), dimension(:), pointer, save, private field
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
subroutine, public copy(self, rhs)
integer nonblock_buffer_pos
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer(long), parameter true
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
character(len=max_len_name), dimension(max_num_field) field_name
integer(long), parameter false
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
integer current_id_update
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_GLOBAL_FIELD_2D_(domain, local, global, flags, position, tile_count, default_data) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::local(:,:) MPP_TYPE_, intent(out) ::global(:,:) integer, intent(in), optional ::flags integer, intent(in), optional ::position integer, intent(in), optional ::tile_count MPP_TYPE_, intent(in), optional ::default_data MPP_TYPE_ ::local3D(size(local, 1), size(local, 2), 1) MPP_TYPE_ ::global3D(size(global, 1), size(global, 2), 1) pointer(lptr, local3D) pointer(gptr, global3D) lptr=LOC(local) gptr=LOC(global) call mpp_global_field(domain, local3D, global3D, flags, position, tile_count, default_data) end subroutine MPP_GLOBAL_FIELD_2D_ subroutine MPP_GLOBAL_FIELD_3D_(domain, local, global, flags, position, tile_count, default_data)!get a global field from a local field!local field may be on compute OR data domain type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::local(:,:,:) MPP_TYPE_, intent(out) ::global(:,:,:) integer, intent(in), optional ::flags integer, intent(in), optional ::position integer, intent(in), optional ::tile_count MPP_TYPE_, intent(in), optional ::default_data integer ::ishift, jshift integer ::tile integer ::isize, jsize tile=1;if(PRESENT(tile_count)) tile=tile_count call mpp_get_domain_shift(domain, ishift, jshift, position) ! The alltoallw method requires that local and global be contiguous. ! We presume that `local` is contiguous if it matches the data domain;! `global` is presumed to always be contiguous. ! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate ! contiguity, but it is not yet suppored in many compilers. ! Also worth noting that many of the nD-> conversion also assumes so there many be other issues here isize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
real(double), parameter one
logical function received(this, seqno)
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_flags
type(tms), dimension(nblks), private last
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
type(nonblock_type), dimension(:), allocatable nonblock_data
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz jsize
l_size ! loop over number of fields ke do je do ie pos
subroutine, public some(xmap, some_arr, grid_id)
type(var_state_type), dimension(2) state
integer num_nonblock_group_update
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin