4 !***********************************************************************
5 !* GNU Lesser General Public License
7 !* This file
is part of the GFDL Flexible Modeling System (FMS).
9 !* FMS
is free software: you can redistribute it and/or modify it under
10 !* the terms of the GNU Lesser General Public License as published by
11 !* the Free Software Foundation, either
version 3 of the License, or (at
12 !* your option) any later
version.
14 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
15 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 !* You should have
received a copy of the GNU Lesser General Public
20 !* License along with FMS. If
not, see <http:
21 !***********************************************************************
23 !#############################################################################
24 ! Currently the contact
will be limited to overlap contact.
25 subroutine mpp_define_nest_domains(nest_domain, domain_fine, domain_coarse, tile_fine, tile_coarse, &
26 istart_fine, iend_fine, jstart_fine, jend_fine, &
27 istart_coarse, iend_coarse, jstart_coarse, jend_coarse, &
29 type(nest_domain_type), intent(inout) :: nest_domain
30 type(domain2D), target, intent(in ) :: domain_fine, domain_coarse
31 integer, intent(in ) :: tile_fine, tile_coarse
32 integer, intent(in ) :: istart_fine, iend_fine, jstart_fine, jend_fine
33 integer, intent(in ) :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
35 integer, optional, intent(in ) :: extra_halo
36 character(
len=*), optional, intent(in ) ::
name 51 if(len_trim(
name) > NAME_LENGTH) then
52 call
mpp_error(FATAL,
"mpp_domains_define.inc(mpp_define_nest_domain): " 53 "the len_trim of optional argument name =" 54 " is greater than NAME_LENGTH, change the argument name or increase NAME_LENGTH")
60 if(present(extra_halo)) then
61 if(extra_halo .NE. 0) call
mpp_error(FATAL,
"mpp_define_nest_domains.inc: only support extra_halo=0, contact developer")
62 extra_halo_local = extra_halo
65 nest_domain%tile_fine = tile_fine
66 nest_domain%tile_coarse = tile_coarse
67 nest_domain%istart_fine = istart_fine
68 nest_domain%iend_fine = iend_fine
69 nest_domain%jstart_fine = jstart_fine
70 nest_domain%jend_fine = jend_fine
71 nest_domain%istart_coarse = istart_coarse
72 nest_domain%iend_coarse = iend_coarse
73 nest_domain%jstart_coarse = jstart_coarse
74 nest_domain%jend_coarse = jend_coarse
76 ! since it
is overlap contact, ie_fine > is_fine, je_fine > js_fine
77 ! and ie_coarse>is_coarse, je_coarse>js_coarse
79 !
if( tile_fine .NE. 1 ) call
mpp_error(FATAL,
"mpp_define_nest_domains.inc: only support tile_fine = 1, contact developer")
81 if( iend_fine .LE. istart_fine .OR. jend_fine .LE. jstart_fine ) then
82 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc: ie_fine <= is_fine or je_fine <= js_fine " 85 if( iend_coarse .LE. istart_coarse .OR. jend_coarse .LE. jstart_coarse ) then
86 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc: ie_coarse <= is_coarse or je_coarse <= js_coarse " 96 allocate( pes(mpp_npes()) )
97 call mpp_get_current_pelist(pes)
101 npes_coarse =
size(domain_coarse%list(:))
102 npes_fine =
size(domain_fine%list(:))
103 !--- pes_fine and pes_coarse should be subset of
pelist 104 allocate( pes_coarse(npes_coarse) )
105 allocate( pes_fine (npes_fine ) )
106 do
n = 1, npes_coarse
107 pes_coarse(
n) = domain_coarse%list(
n-1)%
pe 108 if( .NOT. ANY(pes(:) == pes_coarse(
n)) ) then
109 call
mpp_error(FATAL,
"mpp_domains_define.inc: pelist_coarse is not subset of pelist")
113 pes_fine(
n) = domain_fine%list(
n-1)%
pe 114 if( .NOT. ANY(pes(:) == pes_fine(
n)) ) then
115 call
mpp_error(FATAL,
"mpp_domains_define.inc: pelist_fine is not subset of pelist")
119 allocate(nest_domain%pelist_fine(npes_fine))
120 allocate(nest_domain%pelist_coarse(npes_coarse))
121 nest_domain%pelist_fine = pes_fine
122 nest_domain%pelist_coarse = pes_coarse
123 nest_domain%is_fine_pe = ANY(pes_fine(:) == mpp_pe())
124 nest_domain%is_coarse_pe = ANY(pes_coarse(:) == mpp_pe())
126 !--- We are assuming the
fine grid
is fully overlapped with
coarse grid.
127 if( nest_domain%is_fine_pe ) then
128 if( iend_fine - istart_fine + 1 .NE. domain_fine%x(1)%
global%
size .OR. &
129 jend_fine - jstart_fine + 1 .NE. domain_fine%y(1)%
global%
size ) then
130 call
mpp_error(FATAL,
"mpp_domains_define.inc: The fine global domain is not covered by coarse domain")
134 if(
npes == npes_coarse ) then
136 else
if( npes_fine + npes_coarse ==
npes ) then
139 call
mpp_error(FATAL,
"mpp_domains_define.inc: size(pelist_coarse) .NE. size(pelist) and " 140 "size(pelist_coarse)+size(pelist_fine) .NE. size(pelist)")
144 nx_coarse = iend_coarse - istart_coarse + 1
145 ny_coarse = jend_coarse - jstart_coarse + 1
146 nx_fine = iend_fine - istart_fine + 1
147 ny_fine = jend_fine - jstart_fine + 1
149 if( mod(nx_fine,nx_coarse) .NE. 0 ) call
mpp_error(FATAL, &
150 "mpp_domains_define.inc: The refinement in x-direction is not integer for nest domain" 152 if( mod(ny_fine,ny_coarse) .NE. 0 ) call
mpp_error(FATAL, &
153 "mpp_domains_define.inc: The refinement in y-direction is not integer for nest domain" 156 !---
coarse grid and
fine grid should be both symmetry or non-symmetry.
157 if(domain_coarse%symmetry .AND. .NOT. domain_fine%symmetry) then
158 call
mpp_error(FATAL,
"mpp_domains_define.inc: coarse grid domain is symmetric, fine grid domain is not")
161 if(.NOT. domain_coarse%symmetry .AND. domain_fine%symmetry) then
162 call
mpp_error(FATAL,
"mpp_domains_define.inc: fine grid domain is symmetric, coarse grid domain is not")
167 nest_domain%domain_fine => domain_fine
168 nest_domain%domain_coarse => domain_coarse
170 allocate( nest_domain%C2F_T, nest_domain%C2F_C, nest_domain%C2F_E, nest_domain%C2F_N )
171 nest_domain%C2F_T%next =>
NULL()
172 nest_domain%C2F_C%next =>
NULL()
173 nest_domain%C2F_N%next =>
NULL()
174 nest_domain%C2F_E%next =>
NULL()
175 allocate( nest_domain%F2C_T, nest_domain%F2C_C, nest_domain%F2C_E, nest_domain%F2C_N )
177 call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_T, CENTER, trim(nest_domain%
name)
178 call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_E, EAST, trim(nest_domain%
name)
179 call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_C, CORNER, trim(nest_domain%
name)
180 call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_N, NORTH, trim(nest_domain%
name)
182 call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_T, extra_halo_local, CENTER, trim(nest_domain%
name)
183 call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_E, extra_halo_local, EAST, trim(nest_domain%
name)
184 call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_C, extra_halo_local, CORNER, trim(nest_domain%
name)
185 call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_N, extra_halo_local, NORTH, trim(nest_domain%
name)
187 deallocate(pes, pes_fine, pes_coarse)
190 end subroutine mpp_define_nest_domains
192 !###############################################################################
193 subroutine compute_overlap_coarse_to_fine(nest_domain, overlap, extra_halo, position,
name)
194 type(nest_domain_type), intent(inout) :: nest_domain
195 type(nestSpec), intent(inout) :: overlap
196 integer, intent(in ) :: extra_halo
197 integer, intent(in ) :: position
198 character(
len=*), intent(in ) ::
name 200 type(domain2D), pointer :: domain_fine =>
NULL()
201 type(domain2D), pointer :: domain_coarse=>
NULL()
204 integer :: tile_fine, tile_coarse
205 integer :: istart_fine, iend_fine, jstart_fine, jend_fine
206 integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
207 integer :: whalo, ehalo, shalo, nhalo
209 integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
210 integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
211 integer :: is_coarse, ie_coarse, js_coarse, je_coarse
212 integer :: isc_fine, iec_fine, jsc_fine, jec_fine
213 integer :: isd_fine, ied_fine, jsd_fine, jed_fine
214 integer :: isc_east, iec_east, jsc_east, jec_east
215 integer :: isc_west, iec_west, jsc_west, jec_west
216 integer :: isc_south, iec_south, jsc_south, jec_south
217 integer :: isc_north, iec_north, jsc_north, jec_north
227 domain_fine => nest_domain%domain_fine
228 domain_coarse => nest_domain%domain_coarse
229 call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
230 tile_fine = nest_domain%tile_fine
231 tile_coarse = nest_domain%tile_coarse
232 istart_fine = nest_domain%istart_fine
233 iend_fine = nest_domain%iend_fine
234 jstart_fine = nest_domain%jstart_fine
235 jend_fine = nest_domain%jend_fine
236 istart_coarse = nest_domain%istart_coarse
237 iend_coarse = nest_domain%iend_coarse + ishift
238 jstart_coarse = nest_domain%jstart_coarse
239 jend_coarse = nest_domain%jend_coarse + jshift
243 npes_fine =
size(nest_domain%pelist_fine(:))
244 npes_coarse =
size(nest_domain%pelist_coarse(:))
245 whalo = domain_fine%whalo + extra_halo
246 ehalo = domain_fine%ehalo + extra_halo
247 shalo = domain_fine%shalo + extra_halo
248 nhalo = domain_fine%nhalo + extra_halo
251 allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse))
252 allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse))
253 allocate(isl_fine (npes_fine ), iel_fine (npes_fine ))
254 allocate(jsl_fine (npes_fine ), jel_fine (npes_fine ))
257 call mpp_get_global_domain (domain_fine, xbegin=isg_fine, xend=ieg_fine, &
258 ybegin=jsg_fine, yend=jeg_fine, position=position)
259 call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, &
260 ybegin=jsc_coarse, yend=jec_coarse, position=position)
261 call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, &
262 ybegin=jsc_fine, yend=jec_fine, position=position)
263 call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, &
264 ybegin=jsl_coarse, yend=jel_coarse, position=position)
265 call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, &
266 ybegin=jsl_fine, yend=jel_fine, position=position)
268 overlap%extra_halo = extra_halo
269 if( nest_domain%is_coarse_pe ) then
270 overlap%xbegin = isc_coarse - domain_coarse%whalo
271 overlap%xend = iec_coarse + domain_coarse%ehalo
272 overlap%ybegin = jsc_coarse - domain_coarse%shalo
273 overlap%yend = jec_coarse + domain_coarse%nhalo
275 overlap%xbegin = isc_fine - domain_fine%whalo
276 overlap%xend = iec_fine + domain_fine%ehalo
277 overlap%ybegin = jsc_fine - domain_fine%shalo
278 overlap%yend = jec_fine + domain_fine%nhalo
281 isd_fine = isc_fine - whalo
282 ied_fine = iec_fine + ehalo
283 jsd_fine = jsc_fine - shalo
284 jed_fine = jec_fine + nhalo
288 call init_index_type(overlap%
west)
289 call init_index_type(overlap%
east)
290 call init_index_type(overlap%
south)
291 call init_index_type(overlap%
north)
293 !--- first compute the halo region and corresponding index in
coarse grid.
294 if( nest_domain%is_fine_pe ) then
295 if( ieg_fine == iec_fine .AND. domain_fine%tile_id(1) == tile_fine ) then !
east halo
296 is_coarse = iend_coarse
297 ie_coarse = iend_coarse + ehalo
298 js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/
y_refine 299 je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/
y_refine 300 js_coarse = js_coarse - shalo
301 je_coarse = je_coarse + nhalo
303 overlap%
east%is_me = iec_fine + 1
304 overlap%
east%ie_me = ied_fine
305 overlap%
east%js_me = jsd_fine
306 overlap%
east%je_me = jed_fine
307 overlap%
east%is_you = is_coarse
308 overlap%
east%ie_you = ie_coarse
309 overlap%
east%js_you = js_coarse
310 overlap%
east%je_you = je_coarse
313 if( jsg_fine == jsc_fine .AND. domain_fine%tile_id(1) == tile_fine) then !
south 314 is_coarse = istart_coarse + ( isc_fine - isg_fine )/
x_refine 315 ie_coarse = istart_coarse + ( iec_fine - isg_fine )/
x_refine 316 is_coarse = is_coarse - whalo
317 ie_coarse = ie_coarse + ehalo
318 js_coarse = jstart_coarse - shalo
319 je_coarse = jstart_coarse
320 overlap%
south%is_me = isd_fine
321 overlap%
south%ie_me = ied_fine
322 overlap%
south%js_me = jsd_fine
323 overlap%
south%je_me = jsc_fine-1
324 overlap%
south%is_you = is_coarse
325 overlap%
south%ie_you = ie_coarse
326 overlap%
south%js_you = js_coarse
327 overlap%
south%je_you = je_coarse
330 if( isg_fine == isc_fine .AND. domain_fine%tile_id(1) == tile_fine) then !
west 331 is_coarse = istart_coarse - whalo
332 ie_coarse = istart_coarse
333 js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/
y_refine 334 je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/
y_refine 335 js_coarse = js_coarse - shalo
336 je_coarse = je_coarse + nhalo
337 overlap%
west%is_me = isd_fine
338 overlap%
west%ie_me = isc_fine-1
339 overlap%
west%js_me = jsd_fine
340 overlap%
west%je_me = jed_fine
341 overlap%
west%is_you = is_coarse
342 overlap%
west%ie_you = ie_coarse
343 overlap%
west%js_you = js_coarse
344 overlap%
west%je_you = je_coarse
347 if( jeg_fine == jec_fine .AND. domain_fine%tile_id(1) == tile_fine) then !
north 348 is_coarse = istart_coarse + ( isc_fine - isg_fine )/
x_refine 349 ie_coarse = istart_coarse + ( iec_fine - isg_fine )/
x_refine 350 is_coarse = is_coarse - whalo
351 ie_coarse = ie_coarse + ehalo
352 js_coarse = jend_coarse
353 je_coarse = jend_coarse + nhalo
354 overlap%
north%is_me = isd_fine
355 overlap%
north%ie_me = ied_fine
356 overlap%
north%js_me = jec_fine+1
357 overlap%
north%je_me = jed_fine
358 overlap%
north%is_you = is_coarse
359 overlap%
north%ie_you = ie_coarse
360 overlap%
north%js_you = js_coarse
361 overlap%
north%je_you = je_coarse
364 allocate(overLaplist(npes_coarse))
366 !-------------------------------------------------------------------------
370 !-------------------------------------------------------------------------
373 do
n = 1, npes_coarse
374 if( domain_coarse%list(
n-1)%tile_id(1) .NE. tile_coarse ) cycle
376 !---
east halo receiving
377 is_coarse = overlap%
east%is_you
378 ie_coarse = overlap%
east%ie_you
379 js_coarse = overlap%
east%js_you
380 je_coarse = overlap%
east%je_you
381 if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
383 is_coarse =
max( is_coarse, isl_coarse(
n) )
384 ie_coarse =
min( ie_coarse, iel_coarse(
n) )
385 js_coarse =
max( js_coarse, jsl_coarse(
n) )
386 je_coarse =
min( je_coarse, jel_coarse(
n) )
387 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
390 call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
393 call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(
n), &
394 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
398 !---
south halo receiving
399 is_coarse = overlap%
south%is_you
400 ie_coarse = overlap%
south%ie_you
401 js_coarse = overlap%
south%js_you
402 je_coarse = overlap%
south%je_you
403 if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
405 is_coarse =
max( is_coarse, isl_coarse(
n) )
406 ie_coarse =
min( ie_coarse, iel_coarse(
n) )
407 js_coarse =
max( js_coarse, jsl_coarse(
n) )
408 je_coarse =
min( je_coarse, jel_coarse(
n) )
410 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
413 call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
416 call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(
n), &
417 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
421 !---
west halo receiving
422 is_coarse = overlap%
west%is_you
423 ie_coarse = overlap%
west%ie_you
424 js_coarse = overlap%
west%js_you
425 je_coarse = overlap%
west%je_you
426 if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
428 is_coarse =
max( is_coarse, isl_coarse(
n) )
429 ie_coarse =
min( ie_coarse, iel_coarse(
n) )
430 js_coarse =
max( js_coarse, jsl_coarse(
n) )
431 je_coarse =
min( je_coarse, jel_coarse(
n) )
433 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
436 call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
439 call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(
n), &
440 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
444 !---
north halo receiving
445 is_coarse = overlap%
north%is_you
446 ie_coarse = overlap%
north%ie_you
447 js_coarse = overlap%
north%js_you
448 je_coarse = overlap%
north%je_you
449 if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
451 is_coarse =
max( is_coarse, isl_coarse(
n) )
452 ie_coarse =
min( ie_coarse, iel_coarse(
n) )
453 js_coarse =
max( js_coarse, jsl_coarse(
n) )
454 je_coarse =
min( je_coarse, jel_coarse(
n) )
456 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
459 call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
462 call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(
n), &
463 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
468 !---
copy the overlapping into nest_domain data.
469 overlap%nrecv = nrecv
471 allocate(overlap%
recv(nrecv))
473 call copy_nest_overlap( overlap%
recv(
n), overLaplist(
n) )
474 call deallocate_nest_overlap( overLaplist(
n) )
477 if(allocated(overlaplist))deallocate(overlapList)
479 !-----------------------------------------------------------------------
483 !-----------------------------------------------------------------------
485 if( nest_domain%is_coarse_pe ) then
487 if(domain_coarse%tile_id(1) == tile_coarse) then
488 isc_east = iend_coarse
489 iec_east = iend_coarse + ehalo
490 jsc_east = jstart_coarse - shalo
491 jec_east = jend_coarse + nhalo
492 isc_east =
max(isc_coarse, isc_east)
493 iec_east =
min(iec_coarse, iec_east)
494 jsc_east =
max(jsc_coarse, jsc_east)
495 jec_east =
min(jec_coarse, jec_east)
497 isc_south = istart_coarse - whalo
498 iec_south = iend_coarse + ehalo
499 jsc_south = jstart_coarse - shalo
500 jec_south = jstart_coarse
501 isc_south =
max(isc_coarse, isc_south)
502 iec_south =
min(iec_coarse, iec_south)
503 jsc_south =
max(jsc_coarse, jsc_south)
504 jec_south =
min(jec_coarse, jec_south)
506 isc_west = istart_coarse - whalo
507 iec_west = istart_coarse
508 jsc_west = jstart_coarse - shalo
509 jec_west = jend_coarse + nhalo
510 isc_west =
max(isc_coarse, isc_west)
511 iec_west =
min(iec_coarse, iec_west)
512 jsc_west =
max(jsc_coarse, jsc_west)
513 jec_west =
min(jec_coarse, jec_west)
515 isc_north = istart_coarse - whalo
516 iec_north = iend_coarse + ehalo
517 jsc_north = jend_coarse
518 jec_north = jend_coarse + nhalo
519 isc_north =
max(isc_coarse, isc_north)
520 iec_north =
min(iec_coarse, iec_north)
521 jsc_north =
max(jsc_coarse, jsc_north)
522 jec_north =
min(jec_coarse, jec_north)
524 isc_west = 0; iec_west = -1; jsc_west = 0; jec_west = -1
525 isc_east = 0; iec_east = -1; jsc_east = 0; jec_west = -1
526 isc_south = 0; iec_south = -1; jsc_south = 0; jec_south = -1
527 isc_north = 0; iec_north = -1; jsc_north = 0; jec_north = -1
530 allocate(overLaplist(npes_fine))
533 if( domain_fine%list(
n-1)%tile_id(1) .NE. tile_fine ) cycle
537 if( ieg_fine == iel_fine(n) ) then 539 if( iec_east .GE. isc_east .AND. jec_east .GE. jsc_east ) then 540 is_coarse = iend_coarse 541 ie_coarse = iend_coarse + ehalo 542 js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine 543 je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine 544 js_coarse = js_coarse - shalo 545 je_coarse = je_coarse + nhalo 546 is_coarse = max(isc_east, is_coarse) 547 ie_coarse = min(iec_east, ie_coarse) 548 js_coarse = max(jsc_east, js_coarse) 549 je_coarse = min(jec_east, je_coarse) 550 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then 553 call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP) 556 call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), & 557 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO) 563 if( jsg_fine == jsl_fine(
n) ) then
565 if( iec_south .GE. isc_south .AND. jec_south .GE. jsc_south ) then
566 is_coarse = istart_coarse + ( isl_fine(
n) - isg_fine )/
x_refine 567 ie_coarse = istart_coarse + ( iel_fine(
n) - isg_fine )/
x_refine 568 is_coarse = is_coarse - shalo
569 ie_coarse = ie_coarse + nhalo
570 js_coarse = jstart_coarse - shalo
571 je_coarse = jstart_coarse
572 is_coarse =
max(isc_south, is_coarse)
573 ie_coarse =
min(iec_south, ie_coarse)
574 js_coarse =
max(jsc_south, js_coarse)
575 je_coarse =
min(jec_south, je_coarse)
576 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
579 call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
582 call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(
n), &
583 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
589 if( isg_fine == isl_fine(n) ) then 591 if( iec_west .GE. isc_west .AND. jec_west .GE. jsc_west ) then 592 is_coarse = istart_coarse - whalo 593 ie_coarse = istart_coarse 594 js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine 595 je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine 596 js_coarse = js_coarse - shalo 597 je_coarse = je_coarse + nhalo 598 is_coarse = max(isc_west, is_coarse) 599 ie_coarse = min(iec_west, ie_coarse) 600 js_coarse = max(jsc_west, js_coarse) 601 je_coarse = min(jec_west, je_coarse) 602 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then 605 call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP) 608 call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), & 609 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO) 615 if( jeg_fine == jel_fine(
n) ) then
617 if( iec_north .GE. isc_north .AND. jec_north .GE. jsc_north ) then
618 is_coarse = istart_coarse + ( isl_fine(
n) - isg_fine )/
x_refine 619 ie_coarse = istart_coarse + ( iel_fine(
n) - isg_fine )/
x_refine 620 is_coarse = is_coarse - shalo
621 ie_coarse = ie_coarse + nhalo
622 js_coarse = jend_coarse
623 je_coarse = jend_coarse + nhalo
624 is_coarse =
max(isc_north, is_coarse)
625 ie_coarse =
min(iec_north, ie_coarse)
626 js_coarse =
max(jsc_north, js_coarse)
627 je_coarse =
min(jec_north, je_coarse)
628 if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
631 call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
634 call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(
n), &
635 is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
641 !---
copy the overlapping into nest_domain data.
642 overlap%nsend = nsend
644 allocate(overlap%
send(nsend))
646 call copy_nest_overlap( overlap%
send(
n), overLaplist(
n) )
647 call deallocate_nest_overlap( overLaplist(
n) )
650 if(allocated(overlaplist))deallocate(overLaplist)
654 deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
655 deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
658 allocate(msg1(0:
npes-1), msg2(0:
npes-1) )
661 do
m = 1, overlap%nrecv
663 do
n = 1, overlap%
recv(
m)%count
670 call mpp_recv( msg1(l), glen=1,
from_pe=
from_pe, block=.FALSE., tag=COMM_TAG_1)
674 do
m = 1, overlap%nsend
676 do
n = 1, overlap%
send(
m)%count
683 call mpp_sync_self(
check=EVENT_RECV)
686 if(msg1(
m) .NE. msg2(
m)) then
687 print*, "compute_overlap_coarse_to_fine: My
pe = ", mpp_pe(), ",
name =", trim(
name),", from
pe=", &
693 write(
outunit,*)"NOTE from compute_overlap_coarse_to_fine: "
694 "message sizes are matched between
send and
recv for "
695 deallocate(msg1, msg2)
699 end subroutine compute_overlap_coarse_to_fine
701 !
############################################################################### 703 !-- region. The data
is assumed on
T-cell
center.
704 subroutine compute_overlap_fine_to_coarse(nest_domain, overlap, position,
name)
705 type(nest_domain_type), intent(inout) :: nest_domain
706 type(nestSpec), intent(inout) :: overlap
707 integer, intent(in ) :: position
708 character(
len=*), intent(in ) ::
name 712 type(domain2D), pointer :: domain_fine =>
NULL()
713 type(domain2D), pointer :: domain_coarse=>
NULL()
716 integer :: tile_fine, tile_coarse
717 integer :: istart_fine, iend_fine, jstart_fine, jend_fine
718 integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
719 integer :: whalo, ehalo, shalo, nhalo
721 integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
722 integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
723 integer :: is_coarse, ie_coarse, js_coarse, je_coarse
724 integer :: is_fine, ie_fine, js_fine, je_fine
725 integer :: isc_fine, iec_fine, jsc_fine, jec_fine
726 integer :: is_you, ie_you, js_you, je_you
736 domain_fine => nest_domain%domain_fine
737 domain_coarse => nest_domain%domain_coarse
738 tile_fine = nest_domain%tile_fine
739 tile_coarse = nest_domain%tile_coarse
740 istart_fine = nest_domain%istart_fine
741 iend_fine = nest_domain%iend_fine
742 jstart_fine = nest_domain%jstart_fine
743 jend_fine = nest_domain%jend_fine
744 istart_coarse = nest_domain%istart_coarse
745 iend_coarse = nest_domain%iend_coarse
746 jstart_coarse = nest_domain%jstart_coarse
747 jend_coarse = nest_domain%jend_coarse
751 npes_fine =
size(nest_domain%pelist_fine(:))
752 npes_coarse =
size(nest_domain%pelist_coarse(:))
755 allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse) )
756 allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse) )
757 allocate(isl_fine(npes_fine), iel_fine(npes_fine) )
758 allocate(jsl_fine(npes_fine), jel_fine(npes_fine) )
760 call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, ybegin=jsc_coarse, yend=jec_coarse)
761 call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, ybegin=jsc_fine, yend=jec_fine)
762 call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, ybegin=jsl_coarse, yend=jel_coarse)
763 call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, ybegin=jsl_fine, yend=jel_fine)
764 call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
768 if( nest_domain%is_fine_pe ) then
769 overlap%xbegin = isc_fine - domain_fine%whalo
770 overlap%xend = iec_fine + domain_fine%ehalo + ishift
771 overlap%ybegin = jsc_fine - domain_fine%shalo
772 overlap%yend = jec_fine + domain_fine%nhalo + jshift
774 overlap%xbegin = isc_coarse - domain_coarse%whalo
775 overlap%xend = iec_coarse + domain_coarse%ehalo + ishift
776 overlap%ybegin = jsc_coarse - domain_coarse%shalo
777 overlap%yend = jec_coarse + domain_coarse%nhalo + jshift
782 call init_index_type(overlap%
center)
784 !-----------------------------------------------------------------------------------------
789 !-----------------------------------------------------------------------------------------
791 if( nest_domain%is_fine_pe ) then
792 allocate(overLaplist(npes_coarse))
793 do n = 1, npes_coarse
794 if(domain_coarse%list(
n-1)%tile_id(1) == tile_coarse) then
795 is_coarse =
max( istart_coarse, isl_coarse(
n) )
796 ie_coarse =
min( iend_coarse, iel_coarse(
n) )
797 js_coarse =
max( jstart_coarse, jsl_coarse(
n) )
798 je_coarse =
min( jend_coarse, jel_coarse(
n) )
799 if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
800 is_fine = istart_fine + (is_coarse - istart_coarse) *
x_refine 801 ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) *
x_refine - 1
802 js_fine = jstart_fine + (js_coarse - jstart_coarse) *
y_refine 803 je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) *
y_refine - 1
805 is_fine =
max(isc_fine, is_fine)
806 ie_fine =
min(iec_fine, ie_fine)
807 js_fine =
max(jsc_fine, js_fine)
808 je_fine =
min(jec_fine, je_fine)
809 if( ie_fine .GE. is_fine .AND. je_fine .GE. js_fine ) then
811 call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
812 call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_coarse(
n), &
813 is_fine, ie_fine+ishift, js_fine, je_fine+jshift, dir, ZERO)
818 overlap%nsend = nsend
820 allocate(overlap%
send(nsend))
822 call copy_nest_overlap(overlap%
send(
n), overlaplist(
n) )
823 call deallocate_nest_overlap(overlaplist(
n))
826 if(allocated(overlaplist))deallocate(overlaplist)
829 !--------------------------------------------------------------------------------
832 !--------------------------------------------------------------------------------
834 if( nest_domain%is_coarse_pe ) then
836 if(domain_coarse%tile_id(1) == tile_coarse) then
837 is_coarse =
max( istart_coarse, isc_coarse )
838 ie_coarse =
min( iend_coarse, iec_coarse )
839 js_coarse =
max( jstart_coarse, jsc_coarse )
840 je_coarse =
min( jend_coarse, jec_coarse )
842 if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
843 is_fine = istart_fine + (is_coarse - istart_coarse) *
x_refine 844 ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) *
x_refine - 1
845 js_fine = jstart_fine + (js_coarse - jstart_coarse) *
y_refine 846 je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) *
y_refine - 1
847 overlap%
center%is_me = is_coarse; overlap%
center%ie_me = ie_coarse + ishift
848 overlap%
center%js_me = js_coarse; overlap%
center%je_me = je_coarse + jshift
849 overlap%
center%is_you = is_fine; overlap%
center%ie_you = ie_fine + ishift
850 overlap%
center%js_you = js_fine; overlap%
center%je_you = je_fine + jshift
852 allocate(overLaplist(npes_fine))
854 is_you =
max(isl_fine(
n), is_fine)
855 ie_you =
min(iel_fine(
n), ie_fine)
856 js_you =
max(jsl_fine(
n), js_fine)
857 je_you =
min(jel_fine(
n), je_fine)
858 if( ie_you .GE. is_you .AND. je_you .GE. js_you ) then
860 call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
861 call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_fine(
n), &
862 is_you, ie_you+ishift, js_you, je_you+jshift , dir, ZERO)
867 overlap%nrecv = nrecv
869 allocate(overlap%
recv(nrecv))
871 call copy_nest_overlap(overlap%
recv(
n), overlaplist(
n) )
872 call deallocate_nest_overlap( overLaplist(
n) )
875 if(allocated(overlaplist))deallocate(overlaplist)
879 deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
880 deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
883 allocate(msg1(0:
npes-1), msg2(0:
npes-1) )
886 do
m = 1, overlap%nrecv
888 do
n = 1, overlap%
recv(
m)%count
895 call mpp_recv( msg1(l), glen=1,
from_pe=
from_pe, block=.FALSE., tag=COMM_TAG_2)
899 do
m = 1, overlap%nsend
901 do
n = 1, overlap%
send(
m)%count
908 call mpp_sync_self(
check=EVENT_RECV)
911 if(msg1(
m) .NE. msg2(
m)) then
912 print*, "compute_overlap_fine_to_coarse: My
pe = ", mpp_pe(), ",
name =", trim(
name),", from
pe=", &
918 write(
outunit,*)"NOTE from compute_overlap_fine_to_coarse: "
919 "message sizes are matched between
send and
recv for "
920 deallocate(msg1, msg2)
923 end subroutine compute_overlap_fine_to_coarse
929 !!$subroutine set_overlap_fine_to_coarse(nest_domain, position)
930 !!$
type(nest_domain_type), intent(inout) :: nest_domain
931 !!$
integer, intent(in ) :: position
934 !!$ call mpp_get_domain_shift(domain, ishift, jshift, position)
935 !!$ update_in => nest_domain%F2C_T
936 !!$ select
case(position)
938 !!$ update_out => nest_domain%F2C_C
940 !!$ update_out => nest_domain%F2C_E
942 !!$ update_out => nest_domain%F2C_N
944 !!$ call
mpp_error(FATAL, "mpp_domains_define.inc(set_overlap_fine_to_coarse): the position should be CORNER, EAST or NORTH")
947 !!$ nsend = update_in%nsend
948 !!$ nrecv = update_in%nrecv
949 !!$ update_out%
pe = update_in%
pe 950 !!$ update_out%nsend = nsend
951 !!$ update_out%nrecv = nrecv
953 !!$
if( nsend > 0 ) then
954 !!$ allocate(update_out%
send(nsend))
956 !!$ count = update_in%
send(
n)%count
957 !!$ call allocate_overlap_type(update_out%
send(
n), update_in%count, overlap_in%
type)
960 !!$ update_out%
send(
n)%
ie (count) = update_in%
send(
n)%
ie (count) + ishift
962 !!$ update_out%
send(
n)%
je (count) = update_in%
send(
n)%
je (count) + jshift
963 !!$ update_out%
send(
n)%tileMe (count) = update_in%
send(
n)%tileMe (count)
964 !!$ update_out%
send(
n)%dir (count) = update_in%
send(
n)%dir (count)
965 !!$ update_out%
send(
n)%rotation(count) = update_in%
send(
n)%rotation(count)
971 !!$
if( nrecv > 0 ) then
972 !!$ allocate(update_out%
recv(nrecv))
974 !!$ count = update_in%
recv(
n)%count
975 !!$ call allocate_overlap_type(update_out%
recv(
n), update_in%count, overlap_in%
type)
978 !!$ update_out%
recv(
n)%
ie (count) = update_in%
recv(
n)%
ie (count) + ishift
980 !!$ update_out%
recv(
n)%
je (count) = update_in%
recv(
n)%
je (count) + jshift
981 !!$ update_out%
recv(
n)%tileMe (count) = update_in%
recv(
n)%tileMe (count)
982 !!$ update_out%
recv(
n)%dir (count) = update_in%
recv(
n)%dir (count)
983 !!$ update_out%
recv(
n)%rotation(count) = update_in%
recv(
n)%rotation(count)
988 !!$end subroutine set_overlap_fine_to_coarse
991 !
############################################################################### 993 subroutine init_index_type (indexData )
994 type(index_type), intent(inout) :: indexData
1000 indexData%is_you = 0
1001 indexData%ie_you = -1
1002 indexData%js_you = 0
1003 indexData%je_you = -1
1005 end subroutine init_index_type
1007 subroutine allocate_nest_overlap(overlap, count)
1008 type(overlap_type), intent(inout) :: overlap
1012 overlap%
pe = NULL_PE
1014 "mpp_define_nest_domains.inc: overlap is already been allocated")
1016 allocate(overlap%
is (count) )
1017 allocate(overlap%
ie (count) )
1018 allocate(overlap%
js (count) )
1019 allocate(overlap%
je (count) )
1020 allocate(overlap%dir (count) )
1021 allocate(overlap%rotation (count) )
1022 allocate(overlap%
msgsize (count) )
1024 end subroutine allocate_nest_overlap
1026 !##############################################################################
1027 subroutine deallocate_nest_overlap(overlap)
1028 type(overlap_type), intent(inout) :: overlap
1031 overlap%
pe = NULL_PE
1032 deallocate(overlap%
is)
1033 deallocate(overlap%
ie)
1034 deallocate(overlap%
js)
1035 deallocate(overlap%
je)
1036 deallocate(overlap%dir)
1037 deallocate(overlap%rotation)
1040 end subroutine deallocate_nest_overlap
1042 !##############################################################################
1043 subroutine insert_nest_overlap(overlap,
pe,
is,
ie,
js,
je, dir, rotation)
1044 type(overlap_type), intent(inout) :: overlap
1047 integer, intent(in ) :: dir, rotation
1050 if( overlap%count == 0 ) then
1054 "mpp_define_nest_domains.inc: mismatch on pe")
1056 overlap%count = overlap%count+1
1057 count = overlap%count
1059 "mpp_define_nest_domains.inc: overlap%count > size(overlap%is), contact developer")
1060 overlap%
is (count) =
is 1061 overlap%
ie (count) =
ie 1062 overlap%
js (count) =
js 1063 overlap%
je (count) =
je 1064 overlap%dir (count) = dir
1065 overlap%rotation (count) = rotation
1068 end subroutine insert_nest_overlap
1071 !#########################################################
1072 subroutine copy_nest_overlap(overlap_out, overlap_in)
1073 type(overlap_type), intent(inout) :: overlap_out
1074 type(overlap_type), intent(in) :: overlap_in
1077 "mpp_define_nest_domains.inc: overlap_in%count is 0")
1080 "mpp_define_nest_domains.inc: overlap_out is already been allocated")
1082 call allocate_nest_overlap(overlap_out, overlap_in%count)
1083 overlap_out%count = overlap_in%count
1084 overlap_out%
pe = overlap_in%
pe 1086 overlap_out%
is(:) = overlap_in%
is(1:overlap_in%count)
1087 overlap_out%
ie(:) = overlap_in%
ie(1:overlap_in%count)
1088 overlap_out%
js(:) = overlap_in%
js(1:overlap_in%count)
1089 overlap_out%
je(:) = overlap_in%
je(1:overlap_in%count)
1090 overlap_out%
is(:) = overlap_in%
is(1:overlap_in%count)
1091 overlap_out%dir(:) = overlap_in%dir(1:overlap_in%count)
1092 overlap_out%rotation(:) = overlap_in%rotation(1:overlap_in%count)
1096 end subroutine copy_nest_overlap
1099 !#######################################################################
1100 ! this routine found the domain has the same halo
size with the input
1102 function search_C2F_nest_overlap(nest_domain, extra_halo, position)
1103 type(nest_domain_type), intent(inout) :: nest_domain
1104 integer, intent(in) :: extra_halo
1105 integer, intent(in) :: position
1106 type(nestSpec), pointer :: search_C2F_nest_overlap
1107 type(nestSpec), pointer :: update_ref
1110 select
case(position)
1113 update_ref => nest_domain%C2F_T
1115 update_ref => nest_domain%C2F_C
1117 update_ref => nest_domain%C2F_N
1119 update_ref => nest_domain%C2F_E
1121 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc(search_C2F_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
1124 search_C2F_nest_overlap => update_ref
1127 if(extra_halo == search_C2F_nest_overlap%extra_halo) then
1130 !---
if not found, switch to next
1131 if(.NOT. ASSOCIATED(search_C2F_nest_overlap%next)) then
1132 allocate(search_C2F_nest_overlap%next)
1133 search_C2F_nest_overlap => search_C2F_nest_overlap%next
1134 call compute_overlap_coarse_to_fine(nest_domain, search_C2F_nest_overlap, extra_halo, position,
name)
1137 search_C2F_nest_overlap => search_C2F_nest_overlap%next
1142 update_ref =>
NULL()
1144 end function search_C2F_nest_overlap
1146 !#######################################################################
1147 ! this routine found the domain has the same halo
size with the input
1149 function search_F2C_nest_overlap(nest_domain, position)
1150 type(nest_domain_type), intent(inout) :: nest_domain
1151 integer, intent(in) :: position
1152 type(nestSpec), pointer :: search_F2C_nest_overlap
1154 select
case(position)
1156 search_F2C_nest_overlap => nest_domain%F2C_T
1158 search_F2C_nest_overlap => nest_domain%F2C_C
1160 search_F2C_nest_overlap => nest_domain%F2C_N
1162 search_F2C_nest_overlap => nest_domain%F2C_E
1164 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc(search_F2C_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
1167 end function search_F2C_nest_overlap
1169 !################################################################
1170 subroutine mpp_get_C2F_index(nest_domain, is_fine, ie_fine, js_fine, je_fine, &
1171 is_coarse, ie_coarse, js_coarse, je_coarse, dir, position)
1173 type(nest_domain_type), intent(in ) :: nest_domain
1174 integer, intent(
out) :: is_fine, ie_fine, js_fine, je_fine
1175 integer, intent(
out) :: is_coarse, ie_coarse, js_coarse, je_coarse
1177 integer, optional, intent(in ) :: position
1180 type(nestSpec), pointer :: update =>
NULL()
1182 update_position = CENTER
1183 if(present(position)) update_position = position
1185 select
case(update_position)
1187 update => nest_domain%C2F_T
1189 update => nest_domain%C2F_E
1191 update => nest_domain%C2F_C
1193 update => nest_domain%C2F_N
1195 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc(mpp_get_C2F_index): invalid option argument position")
1200 is_fine = update%
west%is_me
1201 ie_fine = update%
west%ie_me
1202 js_fine = update%
west%js_me
1203 je_fine = update%
west%je_me
1204 is_coarse = update%
west%is_you
1205 ie_coarse = update%
west%ie_you
1206 js_coarse = update%
west%js_you
1207 je_coarse = update%
west%je_you
1209 is_fine = update%
east%is_me
1210 ie_fine = update%
east%ie_me
1211 js_fine = update%
east%js_me
1212 je_fine = update%
east%je_me
1213 is_coarse = update%
east%is_you
1214 ie_coarse = update%
east%ie_you
1215 js_coarse = update%
east%js_you
1216 je_coarse = update%
east%je_you
1218 is_fine = update%
south%is_me
1219 ie_fine = update%
south%ie_me
1220 js_fine = update%
south%js_me
1221 je_fine = update%
south%je_me
1222 is_coarse = update%
south%is_you
1223 ie_coarse = update%
south%ie_you
1224 js_coarse = update%
south%js_you
1225 je_coarse = update%
south%je_you
1227 is_fine = update%
north%is_me
1228 ie_fine = update%
north%ie_me
1229 js_fine = update%
north%js_me
1230 je_fine = update%
north%je_me
1231 is_coarse = update%
north%is_you
1232 ie_coarse = update%
north%ie_you
1233 js_coarse = update%
north%js_you
1234 je_coarse = update%
north%je_you
1236 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc: invalid value for argument dir")
1240 end subroutine mpp_get_C2F_index
1242 !################################################################
1243 subroutine mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, &
1244 is_fine, ie_fine, js_fine, je_fine, position)
1246 type(nest_domain_type), intent(in ) :: nest_domain
1247 integer, intent(
out) :: is_fine, ie_fine, js_fine, je_fine
1248 integer, intent(
out) :: is_coarse, ie_coarse, js_coarse, je_coarse
1249 integer, optional, intent(in ) :: position
1252 type(nestSpec), pointer :: update =>
NULL()
1254 update_position = CENTER
1255 if(present(position)) update_position = position
1257 select
case(update_position)
1259 update => nest_domain%F2C_T
1261 update => nest_domain%F2C_E
1263 update => nest_domain%F2C_C
1265 update => nest_domain%F2C_N
1267 call
mpp_error(FATAL,
"mpp_define_nest_domains.inc(mpp_get_F2C_index): invalid option argument position")
1270 is_fine = update%
center%is_you
1271 ie_fine = update%
center%ie_you
1272 js_fine = update%
center%js_you
1273 je_fine = update%
center%je_you
1274 is_coarse = update%
center%is_me
1275 ie_coarse = update%
center%ie_me
1276 js_coarse = update%
center%js_me
1277 je_coarse = update%
center%je_me
1279 end subroutine mpp_get_F2C_index
integer, parameter coarse
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer, pointer refinement
integer(long), parameter true
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
integer(long), parameter false
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
l_size ! loop over number of fields ke do je do ie to to_pe
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
integer, parameter, public global
integer, parameter, public center
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) & T
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public east
integer, parameter x_refine
logical function received(this, seqno)
logical debug_message_passing
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, parameter, public south
integer, parameter y_refine
l_size ! loop over number of fields ke do je do ie to js
integer, parameter, public information