FV3 Bundle
mpp_define_nest_domains.inc
Go to the documentation of this file.
1 ! -*-f90-*-
2 
3 
4 !***********************************************************************
5 !* GNU Lesser General Public License
6 !*
7 !* This file is part of the GFDL Flexible Modeling System (FMS).
8 !*
9 !* FMS is free software: you can redistribute it and/or modify it under
10 !* the terms of the GNU Lesser General Public License as published by
11 !* the Free Software Foundation, either version 3 of the License, or (at
12 !* your option) any later version.
13 !*
14 !* FMS is distributed in the hope that it will be useful, but WITHOUT
15 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 !* for more details.
18 !*
19 !* You should have received a copy of the GNU Lesser General Public
20 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
21 !***********************************************************************
22 
23 !#############################################################################
24 ! Currently the contact will be limited to overlap contact.
25 subroutine mpp_define_nest_domains(nest_domain, domain_fine, domain_coarse, tile_fine, tile_coarse, &
26  istart_fine, iend_fine, jstart_fine, jend_fine, &
27  istart_coarse, iend_coarse, jstart_coarse, jend_coarse, &
28  pelist, extra_halo, name)
29  type(nest_domain_type), intent(inout) :: nest_domain
30  type(domain2D), target, intent(in ) :: domain_fine, domain_coarse
31  integer, intent(in ) :: tile_fine, tile_coarse
32  integer, intent(in ) :: istart_fine, iend_fine, jstart_fine, jend_fine
33  integer, intent(in ) :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
34  integer, optional, intent(in ) :: pelist(:)
35  integer, optional, intent(in ) :: extra_halo
36  character(len=*), optional, intent(in ) :: name
37 
38  logical :: concurrent
39  integer :: n
40  integer :: nx_coarse, ny_coarse
41  integer :: nx_fine, ny_fine
43  integer :: npes, npes_fine, npes_coarse
44  integer :: extra_halo_local
45  integer, allocatable :: pes(:)
46  integer, allocatable :: pes_coarse(:)
47  integer, allocatable :: pes_fine(:)
48 
49 
50  if(PRESENT(name)) then
51  if(len_trim(name) > NAME_LENGTH) then
52  call mpp_error(FATAL, "mpp_domains_define.inc(mpp_define_nest_domain): "// &
53  "the len_trim of optional argument name ="//trim(name)// &
54  " is greater than NAME_LENGTH, change the argument name or increase NAME_LENGTH")
55  endif
56  nest_domain%name = name
57  endif
58 
59  extra_halo_local = 0
60  if(present(extra_halo)) then
61  if(extra_halo .NE. 0) call mpp_error(FATAL, "mpp_define_nest_domains.inc: only support extra_halo=0, contact developer")
62  extra_halo_local = extra_halo
63  endif
64 
65  nest_domain%tile_fine = tile_fine
66  nest_domain%tile_coarse = tile_coarse
67  nest_domain%istart_fine = istart_fine
68  nest_domain%iend_fine = iend_fine
69  nest_domain%jstart_fine = jstart_fine
70  nest_domain%jend_fine = jend_fine
71  nest_domain%istart_coarse = istart_coarse
72  nest_domain%iend_coarse = iend_coarse
73  nest_domain%jstart_coarse = jstart_coarse
74  nest_domain%jend_coarse = jend_coarse
75 
76  ! since it is overlap contact, ie_fine > is_fine, je_fine > js_fine
77  ! and ie_coarse>is_coarse, je_coarse>js_coarse
78 
79 ! if( tile_fine .NE. 1 ) call mpp_error(FATAL, "mpp_define_nest_domains.inc: only support tile_fine = 1, contact developer")
80 
81  if( iend_fine .LE. istart_fine .OR. jend_fine .LE. jstart_fine ) then
82  call mpp_error(FATAL, "mpp_define_nest_domains.inc: ie_fine <= is_fine or je_fine <= js_fine "// &
83  " for domain "//trim(nest_domain%name) )
84  endif
85  if( iend_coarse .LE. istart_coarse .OR. jend_coarse .LE. jstart_coarse ) then
86  call mpp_error(FATAL, "mpp_define_nest_domains.inc: ie_coarse <= is_coarse or je_coarse <= js_coarse "// &
87  " for nest domain "//trim(nest_domain%name) )
88  endif
89 
90  !--- check the pelist, Either domain_coarse%pelist = pelist or
91  !--- domain_coarse%pelist + domain_fine%pelist = pelist
92  if( PRESENT(pelist) )then
93  allocate( pes(size(pelist(:))) )
94  pes = pelist
95  else
96  allocate( pes(mpp_npes()) )
97  call mpp_get_current_pelist(pes)
98  end if
99 
100  npes = size(pes)
101  npes_coarse = size(domain_coarse%list(:))
102  npes_fine = size(domain_fine%list(:))
103  !--- pes_fine and pes_coarse should be subset of pelist
104  allocate( pes_coarse(npes_coarse) )
105  allocate( pes_fine (npes_fine ) )
106  do n = 1, npes_coarse
107  pes_coarse(n) = domain_coarse%list(n-1)%pe
108  if( .NOT. ANY(pes(:) == pes_coarse(n)) ) then
109  call mpp_error(FATAL, "mpp_domains_define.inc: pelist_coarse is not subset of pelist")
110  endif
111  enddo
112  do n = 1, npes_fine
113  pes_fine(n) = domain_fine%list(n-1)%pe
114  if( .NOT. ANY(pes(:) == pes_fine(n)) ) then
115  call mpp_error(FATAL, "mpp_domains_define.inc: pelist_fine is not subset of pelist")
116  endif
117  enddo
118 
119  allocate(nest_domain%pelist_fine(npes_fine))
120  allocate(nest_domain%pelist_coarse(npes_coarse))
121  nest_domain%pelist_fine = pes_fine
122  nest_domain%pelist_coarse = pes_coarse
123  nest_domain%is_fine_pe = ANY(pes_fine(:) == mpp_pe())
124  nest_domain%is_coarse_pe = ANY(pes_coarse(:) == mpp_pe())
125 
126  !--- We are assuming the fine grid is fully overlapped with coarse grid.
127  if( nest_domain%is_fine_pe ) then
128  if( iend_fine - istart_fine + 1 .NE. domain_fine%x(1)%global%size .OR. &
129  jend_fine - jstart_fine + 1 .NE. domain_fine%y(1)%global%size ) then
130  call mpp_error(FATAL, "mpp_domains_define.inc: The fine global domain is not covered by coarse domain")
131  endif
132  endif
133  ! First computing the send and recv information from find to coarse.
134  if( npes == npes_coarse ) then
135  concurrent = .false.
136  else if( npes_fine + npes_coarse == npes ) then
137  concurrent = .true.
138  else
139  call mpp_error(FATAL, "mpp_domains_define.inc: size(pelist_coarse) .NE. size(pelist) and "// &
140  "size(pelist_coarse)+size(pelist_fine) .NE. size(pelist)")
141  endif
142 
143  !--- to confirm integer refinement.
144  nx_coarse = iend_coarse - istart_coarse + 1
145  ny_coarse = jend_coarse - jstart_coarse + 1
146  nx_fine = iend_fine - istart_fine + 1
147  ny_fine = jend_fine - jstart_fine + 1
148 
149  if( mod(nx_fine,nx_coarse) .NE. 0 ) call mpp_error(FATAL, &
150  "mpp_domains_define.inc: The refinement in x-direction is not integer for nest domain"//trim(nest_domain%name) )
151  x_refine = nx_fine/nx_coarse
152  if( mod(ny_fine,ny_coarse) .NE. 0 ) call mpp_error(FATAL, &
153  "mpp_domains_define.inc: The refinement in y-direction is not integer for nest domain"//trim(nest_domain%name) )
154  y_refine = ny_fine/ny_coarse
155 
156  !--- coarse grid and fine grid should be both symmetry or non-symmetry.
157  if(domain_coarse%symmetry .AND. .NOT. domain_fine%symmetry) then
158  call mpp_error(FATAL, "mpp_domains_define.inc: coarse grid domain is symmetric, fine grid domain is not")
159  endif
160 
161  if(.NOT. domain_coarse%symmetry .AND. domain_fine%symmetry) then
162  call mpp_error(FATAL, "mpp_domains_define.inc: fine grid domain is symmetric, coarse grid domain is not")
163  endif
164 
165  nest_domain%x_refine = x_refine
166  nest_domain%y_refine = y_refine
167  nest_domain%domain_fine => domain_fine
168  nest_domain%domain_coarse => domain_coarse
169 
170  allocate( nest_domain%C2F_T, nest_domain%C2F_C, nest_domain%C2F_E, nest_domain%C2F_N )
171  nest_domain%C2F_T%next => NULL()
172  nest_domain%C2F_C%next => NULL()
173  nest_domain%C2F_N%next => NULL()
174  nest_domain%C2F_E%next => NULL()
175  allocate( nest_domain%F2C_T, nest_domain%F2C_C, nest_domain%F2C_E, nest_domain%F2C_N )
176 
177  call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_T, CENTER, trim(nest_domain%name)//" T-cell")
178  call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_E, EAST, trim(nest_domain%name)//" E-cell")
179  call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_C, CORNER, trim(nest_domain%name)//" C-cell")
180  call compute_overlap_fine_to_coarse(nest_domain, nest_domain%F2C_N, NORTH, trim(nest_domain%name)//" N-cell")
181 
182  call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_T, extra_halo_local, CENTER, trim(nest_domain%name)//" T-cell")
183  call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_E, extra_halo_local, EAST, trim(nest_domain%name)//" E-cell")
184  call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_C, extra_halo_local, CORNER, trim(nest_domain%name)//" C-cell")
185  call compute_overlap_coarse_to_fine(nest_domain, nest_domain%C2F_N, extra_halo_local, NORTH, trim(nest_domain%name)//" N-cell")
186 
187  deallocate(pes, pes_fine, pes_coarse)
188 
189 
190 end subroutine mpp_define_nest_domains
191 
192 !###############################################################################
193 subroutine compute_overlap_coarse_to_fine(nest_domain, overlap, extra_halo, position, name)
194  type(nest_domain_type), intent(inout) :: nest_domain
195  type(nestSpec), intent(inout) :: overlap
196  integer, intent(in ) :: extra_halo
197  integer, intent(in ) :: position
198  character(len=*), intent(in ) :: name
199 
200  type(domain2D), pointer :: domain_fine =>NULL()
201  type(domain2D), pointer :: domain_coarse=>NULL()
202  type(overlap_type), allocatable :: overlapList(:)
203  logical :: is_first
204  integer :: tile_fine, tile_coarse
205  integer :: istart_fine, iend_fine, jstart_fine, jend_fine
206  integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
207  integer :: whalo, ehalo, shalo, nhalo
208  integer :: npes, npes_fine, npes_coarse, n, m
209  integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
210  integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
211  integer :: is_coarse, ie_coarse, js_coarse, je_coarse
212  integer :: isc_fine, iec_fine, jsc_fine, jec_fine
213  integer :: isd_fine, ied_fine, jsd_fine, jed_fine
214  integer :: isc_east, iec_east, jsc_east, jec_east
215  integer :: isc_west, iec_west, jsc_west, jec_west
216  integer :: isc_south, iec_south, jsc_south, jec_south
217  integer :: isc_north, iec_north, jsc_north, jec_north
218  integer :: x_refine, y_refine, ishift, jshift
219  integer :: nsend, nrecv, dir, from_pe, l
220  integer :: is, ie, js, je, msgsize
221  integer, allocatable :: msg1(:), msg2(:)
222  integer, allocatable :: isl_coarse(:), iel_coarse(:), jsl_coarse(:), jel_coarse(:)
223  integer, allocatable :: isl_fine(:), iel_fine(:), jsl_fine(:), jel_fine(:)
224  integer :: outunit
225 
226  outunit = stdout()
227  domain_fine => nest_domain%domain_fine
228  domain_coarse => nest_domain%domain_coarse
229  call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
230  tile_fine = nest_domain%tile_fine
231  tile_coarse = nest_domain%tile_coarse
232  istart_fine = nest_domain%istart_fine
233  iend_fine = nest_domain%iend_fine
234  jstart_fine = nest_domain%jstart_fine
235  jend_fine = nest_domain%jend_fine
236  istart_coarse = nest_domain%istart_coarse
237  iend_coarse = nest_domain%iend_coarse + ishift
238  jstart_coarse = nest_domain%jstart_coarse
239  jend_coarse = nest_domain%jend_coarse + jshift
240  x_refine = nest_domain%x_refine
241  y_refine = nest_domain%y_refine
242  npes = mpp_npes()
243  npes_fine = size(nest_domain%pelist_fine(:))
244  npes_coarse = size(nest_domain%pelist_coarse(:))
245  whalo = domain_fine%whalo + extra_halo
246  ehalo = domain_fine%ehalo + extra_halo
247  shalo = domain_fine%shalo + extra_halo
248  nhalo = domain_fine%nhalo + extra_halo
249 
250 
251  allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse))
252  allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse))
253  allocate(isl_fine (npes_fine ), iel_fine (npes_fine ))
254  allocate(jsl_fine (npes_fine ), jel_fine (npes_fine ))
255 
256 
257  call mpp_get_global_domain (domain_fine, xbegin=isg_fine, xend=ieg_fine, &
258  ybegin=jsg_fine, yend=jeg_fine, position=position)
259  call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, &
260  ybegin=jsc_coarse, yend=jec_coarse, position=position)
261  call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, &
262  ybegin=jsc_fine, yend=jec_fine, position=position)
263  call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, &
264  ybegin=jsl_coarse, yend=jel_coarse, position=position)
265  call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, &
266  ybegin=jsl_fine, yend=jel_fine, position=position)
267 
268  overlap%extra_halo = extra_halo
269  if( nest_domain%is_coarse_pe ) then
270  overlap%xbegin = isc_coarse - domain_coarse%whalo
271  overlap%xend = iec_coarse + domain_coarse%ehalo
272  overlap%ybegin = jsc_coarse - domain_coarse%shalo
273  overlap%yend = jec_coarse + domain_coarse%nhalo
274  else
275  overlap%xbegin = isc_fine - domain_fine%whalo
276  overlap%xend = iec_fine + domain_fine%ehalo
277  overlap%ybegin = jsc_fine - domain_fine%shalo
278  overlap%yend = jec_fine + domain_fine%nhalo
279  endif
280 
281  isd_fine = isc_fine - whalo
282  ied_fine = iec_fine + ehalo
283  jsd_fine = jsc_fine - shalo
284  jed_fine = jec_fine + nhalo
285 
286  overlap%nsend = 0
287  overlap%nrecv = 0
288  call init_index_type(overlap%west)
289  call init_index_type(overlap%east)
290  call init_index_type(overlap%south)
291  call init_index_type(overlap%north)
292 
293  !--- first compute the halo region and corresponding index in coarse grid.
294  if( nest_domain%is_fine_pe ) then
295  if( ieg_fine == iec_fine .AND. domain_fine%tile_id(1) == tile_fine ) then ! east halo
296  is_coarse = iend_coarse
297  ie_coarse = iend_coarse + ehalo
298  js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/y_refine
299  je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/y_refine
300  js_coarse = js_coarse - shalo
301  je_coarse = je_coarse + nhalo
302 
303  overlap%east%is_me = iec_fine + 1
304  overlap%east%ie_me = ied_fine
305  overlap%east%js_me = jsd_fine
306  overlap%east%je_me = jed_fine
307  overlap%east%is_you = is_coarse
308  overlap%east%ie_you = ie_coarse
309  overlap%east%js_you = js_coarse
310  overlap%east%je_you = je_coarse
311  endif
312 
313  if( jsg_fine == jsc_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! south
314  is_coarse = istart_coarse + ( isc_fine - isg_fine )/x_refine
315  ie_coarse = istart_coarse + ( iec_fine - isg_fine )/x_refine
316  is_coarse = is_coarse - whalo
317  ie_coarse = ie_coarse + ehalo
318  js_coarse = jstart_coarse - shalo
319  je_coarse = jstart_coarse
320  overlap%south%is_me = isd_fine
321  overlap%south%ie_me = ied_fine
322  overlap%south%js_me = jsd_fine
323  overlap%south%je_me = jsc_fine-1
324  overlap%south%is_you = is_coarse
325  overlap%south%ie_you = ie_coarse
326  overlap%south%js_you = js_coarse
327  overlap%south%je_you = je_coarse
328  endif
329 
330  if( isg_fine == isc_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! west
331  is_coarse = istart_coarse - whalo
332  ie_coarse = istart_coarse
333  js_coarse = jstart_coarse + ( jsc_fine - jsg_fine )/y_refine
334  je_coarse = jstart_coarse + ( jec_fine - jsg_fine )/y_refine
335  js_coarse = js_coarse - shalo
336  je_coarse = je_coarse + nhalo
337  overlap%west%is_me = isd_fine
338  overlap%west%ie_me = isc_fine-1
339  overlap%west%js_me = jsd_fine
340  overlap%west%je_me = jed_fine
341  overlap%west%is_you = is_coarse
342  overlap%west%ie_you = ie_coarse
343  overlap%west%js_you = js_coarse
344  overlap%west%je_you = je_coarse
345  endif
346 
347  if( jeg_fine == jec_fine .AND. domain_fine%tile_id(1) == tile_fine) then ! north
348  is_coarse = istart_coarse + ( isc_fine - isg_fine )/x_refine
349  ie_coarse = istart_coarse + ( iec_fine - isg_fine )/x_refine
350  is_coarse = is_coarse - whalo
351  ie_coarse = ie_coarse + ehalo
352  js_coarse = jend_coarse
353  je_coarse = jend_coarse + nhalo
354  overlap%north%is_me = isd_fine
355  overlap%north%ie_me = ied_fine
356  overlap%north%js_me = jec_fine+1
357  overlap%north%je_me = jed_fine
358  overlap%north%is_you = is_coarse
359  overlap%north%ie_you = ie_coarse
360  overlap%north%js_you = js_coarse
361  overlap%north%je_you = je_coarse
362  endif
363 
364  allocate(overLaplist(npes_coarse))
365 
366  !-------------------------------------------------------------------------
367  !
368  ! Receiving
369  !
370  !-------------------------------------------------------------------------
371  !--- loop through coarse pelist
372  nrecv = 0
373  do n = 1, npes_coarse
374  if( domain_coarse%list(n-1)%tile_id(1) .NE. tile_coarse ) cycle
375  is_first = .true.
376  !--- east halo receiving
377  is_coarse = overlap%east%is_you
378  ie_coarse = overlap%east%ie_you
379  js_coarse = overlap%east%js_you
380  je_coarse = overlap%east%je_you
381  if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
382  dir = 1
383  is_coarse = max( is_coarse, isl_coarse(n) )
384  ie_coarse = min( ie_coarse, iel_coarse(n) )
385  js_coarse = max( js_coarse, jsl_coarse(n) )
386  je_coarse = min( je_coarse, jel_coarse(n) )
387  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
388  if(is_first) then
389  nrecv = nrecv + 1
390  call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
391  is_first = .false.
392  endif
393  call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
394  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
395  endif
396  endif
397 
398  !--- south halo receiving
399  is_coarse = overlap%south%is_you
400  ie_coarse = overlap%south%ie_you
401  js_coarse = overlap%south%js_you
402  je_coarse = overlap%south%je_you
403  if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
404  dir = 3
405  is_coarse = max( is_coarse, isl_coarse(n) )
406  ie_coarse = min( ie_coarse, iel_coarse(n) )
407  js_coarse = max( js_coarse, jsl_coarse(n) )
408  je_coarse = min( je_coarse, jel_coarse(n) )
409 
410  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
411  if(is_first) then
412  nrecv = nrecv + 1
413  call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
414  is_first = .false.
415  endif
416  call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
417  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
418  endif
419  endif
420 
421  !--- west halo receiving
422  is_coarse = overlap%west%is_you
423  ie_coarse = overlap%west%ie_you
424  js_coarse = overlap%west%js_you
425  je_coarse = overlap%west%je_you
426  if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
427  dir = 5
428  is_coarse = max( is_coarse, isl_coarse(n) )
429  ie_coarse = min( ie_coarse, iel_coarse(n) )
430  js_coarse = max( js_coarse, jsl_coarse(n) )
431  je_coarse = min( je_coarse, jel_coarse(n) )
432 
433  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
434  if(is_first) then
435  nrecv = nrecv + 1
436  call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
437  is_first = .false.
438  endif
439  call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
440  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
441  endif
442  endif
443 
444  !--- north halo receiving
445  is_coarse = overlap%north%is_you
446  ie_coarse = overlap%north%ie_you
447  js_coarse = overlap%north%js_you
448  je_coarse = overlap%north%je_you
449  if( je_coarse .GE. js_coarse .AND. ie_coarse .GE. is_coarse ) then
450  dir = 7
451  is_coarse = max( is_coarse, isl_coarse(n) )
452  ie_coarse = min( ie_coarse, iel_coarse(n) )
453  js_coarse = max( js_coarse, jsl_coarse(n) )
454  je_coarse = min( je_coarse, jel_coarse(n) )
455 
456  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
457  if(is_first) then
458  nrecv = nrecv + 1
459  call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
460  is_first = .false.
461  endif
462  call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_coarse(n), &
463  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
464  endif
465  endif
466  enddo
467 
468  !--- copy the overlapping into nest_domain data.
469  overlap%nrecv = nrecv
470  if( nrecv > 0 ) then
471  allocate(overlap%recv(nrecv))
472  do n = 1, nrecv
473  call copy_nest_overlap( overlap%recv(n), overLaplist(n) )
474  call deallocate_nest_overlap( overLaplist(n) )
475  enddo
476  endif
477  if(allocated(overlaplist))deallocate(overlapList)
478  endif
479  !-----------------------------------------------------------------------
480  !
481  ! Sending
482  !
483  !-----------------------------------------------------------------------
484 
485  if( nest_domain%is_coarse_pe ) then
486  nsend = 0
487  if(domain_coarse%tile_id(1) == tile_coarse) then
488  isc_east = iend_coarse
489  iec_east = iend_coarse + ehalo
490  jsc_east = jstart_coarse - shalo
491  jec_east = jend_coarse + nhalo
492  isc_east = max(isc_coarse, isc_east)
493  iec_east = min(iec_coarse, iec_east)
494  jsc_east = max(jsc_coarse, jsc_east)
495  jec_east = min(jec_coarse, jec_east)
496 
497  isc_south = istart_coarse - whalo
498  iec_south = iend_coarse + ehalo
499  jsc_south = jstart_coarse - shalo
500  jec_south = jstart_coarse
501  isc_south = max(isc_coarse, isc_south)
502  iec_south = min(iec_coarse, iec_south)
503  jsc_south = max(jsc_coarse, jsc_south)
504  jec_south = min(jec_coarse, jec_south)
505 
506  isc_west = istart_coarse - whalo
507  iec_west = istart_coarse
508  jsc_west = jstart_coarse - shalo
509  jec_west = jend_coarse + nhalo
510  isc_west = max(isc_coarse, isc_west)
511  iec_west = min(iec_coarse, iec_west)
512  jsc_west = max(jsc_coarse, jsc_west)
513  jec_west = min(jec_coarse, jec_west)
514 
515  isc_north = istart_coarse - whalo
516  iec_north = iend_coarse + ehalo
517  jsc_north = jend_coarse
518  jec_north = jend_coarse + nhalo
519  isc_north = max(isc_coarse, isc_north)
520  iec_north = min(iec_coarse, iec_north)
521  jsc_north = max(jsc_coarse, jsc_north)
522  jec_north = min(jec_coarse, jec_north)
523  else
524  isc_west = 0; iec_west = -1; jsc_west = 0; jec_west = -1
525  isc_east = 0; iec_east = -1; jsc_east = 0; jec_west = -1
526  isc_south = 0; iec_south = -1; jsc_south = 0; jec_south = -1
527  isc_north = 0; iec_north = -1; jsc_north = 0; jec_north = -1
528  endif
529 
530  allocate(overLaplist(npes_fine))
531 
532  do n = 1, npes_fine
533  if( domain_fine%list(n-1)%tile_id(1) .NE. tile_fine ) cycle
534  is_first = .true.
535 
536  !--- to_pe's east
537  if( ieg_fine == iel_fine(n) ) then
538  dir = 1
539  if( iec_east .GE. isc_east .AND. jec_east .GE. jsc_east ) then
540  is_coarse = iend_coarse
541  ie_coarse = iend_coarse + ehalo
542  js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine
543  je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine
544  js_coarse = js_coarse - shalo
545  je_coarse = je_coarse + nhalo
546  is_coarse = max(isc_east, is_coarse)
547  ie_coarse = min(iec_east, ie_coarse)
548  js_coarse = max(jsc_east, js_coarse)
549  je_coarse = min(jec_east, je_coarse)
550  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
551  if(is_first) then
552  nsend = nsend + 1
553  call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
554  is_first = .false.
555  endif
556  call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
557  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
558  endif
559  endif
560  endif
561 
562  !--- to_pe's south
563  if( jsg_fine == jsl_fine(n) ) then
564  dir = 3
565  if( iec_south .GE. isc_south .AND. jec_south .GE. jsc_south ) then
566  is_coarse = istart_coarse + ( isl_fine(n) - isg_fine )/x_refine
567  ie_coarse = istart_coarse + ( iel_fine(n) - isg_fine )/x_refine
568  is_coarse = is_coarse - shalo
569  ie_coarse = ie_coarse + nhalo
570  js_coarse = jstart_coarse - shalo
571  je_coarse = jstart_coarse
572  is_coarse = max(isc_south, is_coarse)
573  ie_coarse = min(iec_south, ie_coarse)
574  js_coarse = max(jsc_south, js_coarse)
575  je_coarse = min(jec_south, je_coarse)
576  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
577  if(is_first) then
578  nsend = nsend + 1
579  call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
580  is_first = .false.
581  endif
582  call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
583  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
584  endif
585  endif
586  endif
587 
588  !--- to_pe's west
589  if( isg_fine == isl_fine(n) ) then
590  dir = 5
591  if( iec_west .GE. isc_west .AND. jec_west .GE. jsc_west ) then
592  is_coarse = istart_coarse - whalo
593  ie_coarse = istart_coarse
594  js_coarse = jstart_coarse + ( jsl_fine(n) - jsg_fine )/y_refine
595  je_coarse = jstart_coarse + ( jel_fine(n) - jsg_fine )/y_refine
596  js_coarse = js_coarse - shalo
597  je_coarse = je_coarse + nhalo
598  is_coarse = max(isc_west, is_coarse)
599  ie_coarse = min(iec_west, ie_coarse)
600  js_coarse = max(jsc_west, js_coarse)
601  je_coarse = min(jec_west, je_coarse)
602  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
603  if(is_first) then
604  nsend = nsend + 1
605  call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
606  is_first = .false.
607  endif
608  call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
609  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
610  endif
611  endif
612  endif
613 
614  !--- to_pe's north
615  if( jeg_fine == jel_fine(n) ) then
616  dir = 7
617  if( iec_north .GE. isc_north .AND. jec_north .GE. jsc_north ) then
618  is_coarse = istart_coarse + ( isl_fine(n) - isg_fine )/x_refine
619  ie_coarse = istart_coarse + ( iel_fine(n) - isg_fine )/x_refine
620  is_coarse = is_coarse - shalo
621  ie_coarse = ie_coarse + nhalo
622  js_coarse = jend_coarse
623  je_coarse = jend_coarse + nhalo
624  is_coarse = max(isc_north, is_coarse)
625  ie_coarse = min(iec_north, ie_coarse)
626  js_coarse = max(jsc_north, js_coarse)
627  je_coarse = min(jec_north, je_coarse)
628  if( ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
629  if(is_first) then
630  nsend = nsend + 1
631  call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
632  is_first = .false.
633  endif
634  call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_fine(n), &
635  is_coarse, ie_coarse, js_coarse, je_coarse , dir, ZERO)
636  endif
637  endif
638  endif
639  enddo
640 
641  !--- copy the overlapping into nest_domain data.
642  overlap%nsend = nsend
643  if( nsend > 0 ) then
644  allocate(overlap%send(nsend))
645  do n = 1, nsend
646  call copy_nest_overlap( overlap%send(n), overLaplist(n) )
647  call deallocate_nest_overlap( overLaplist(n) )
648  enddo
649  endif
650  if(allocated(overlaplist))deallocate(overLaplist)
651  endif
652 
653 
654  deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
655  deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
656 
657  if(debug_message_passing) then
658  allocate(msg1(0:npes-1), msg2(0:npes-1) )
659  msg1 = 0
660  msg2 = 0
661  do m = 1, overlap%nrecv
662  msgsize = 0
663  do n = 1, overlap%recv(m)%count
664  is = overlap%recv(m)%is(n); ie = overlap%recv(m)%ie(n)
665  js = overlap%recv(m)%js(n); je = overlap%recv(m)%je(n)
666  msgsize = msgsize + (ie-is+1)*(je-js+1)
667  end do
668  from_pe = overlap%recv(m)%pe
669  l = from_pe-mpp_root_pe()
670  call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1)
671  msg2(l) = msgsize
672  enddo
673 
674  do m = 1, overlap%nsend
675  msgsize = 0
676  do n = 1, overlap%send(m)%count
677  is = overlap%send(m)%is(n); ie = overlap%send(m)%ie(n)
678  js = overlap%send(m)%js(n); je = overlap%send(m)%je(n)
679  msgsize = msgsize + (ie-is+1)*(je-js+1)
680  end do
681  call mpp_send( msgsize, plen=1, to_pe=overlap%send(m)%pe, tag=COMM_TAG_1)
682  enddo
683  call mpp_sync_self(check=EVENT_RECV)
684 
685  do m = 0, npes-1
686  if(msg1(m) .NE. msg2(m)) then
687  print*, "compute_overlap_coarse_to_fine: My pe = ", mpp_pe(), ",name =", trim(name),", from pe=", &
688  m+mpp_root_pe(), ":send size = ", msg1(m), ", recv size = ", msg2(m)
689  call mpp_error(FATAL, "mpp_compute_overlap_coarse_to_fine: mismatch on send and recv size")
690  endif
691  enddo
692  call mpp_sync_self()
693  write(outunit,*)"NOTE from compute_overlap_coarse_to_fine: "// &
694  "message sizes are matched between send and recv for "//trim(name)
695  deallocate(msg1, msg2)
696  endif
697 
698 
699 end subroutine compute_overlap_coarse_to_fine
700 
701 !###############################################################################
702 !-- This routine will compute the send and recv information between overlapped nesting
703 !-- region. The data is assumed on T-cell center.
704 subroutine compute_overlap_fine_to_coarse(nest_domain, overlap, position, name)
705  type(nest_domain_type), intent(inout) :: nest_domain
706  type(nestSpec), intent(inout) :: overlap
707  integer, intent(in ) :: position
708  character(len=*), intent(in ) :: name
709 
710  !--- local variables
711 
712  type(domain2D), pointer :: domain_fine =>NULL()
713  type(domain2D), pointer :: domain_coarse=>NULL()
714  type(overlap_type), allocatable :: overlapList(:)
715  logical :: is_first
716  integer :: tile_fine, tile_coarse
717  integer :: istart_fine, iend_fine, jstart_fine, jend_fine
718  integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse
719  integer :: whalo, ehalo, shalo, nhalo
720  integer :: npes, npes_fine, npes_coarse, n, m
721  integer :: isg_fine, ieg_fine, jsg_fine, jeg_fine
722  integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse
723  integer :: is_coarse, ie_coarse, js_coarse, je_coarse
724  integer :: is_fine, ie_fine, js_fine, je_fine
725  integer :: isc_fine, iec_fine, jsc_fine, jec_fine
726  integer :: is_you, ie_you, js_you, je_you
727  integer :: x_refine, y_refine, ishift, jshift
728  integer :: nsend, nrecv, dir, from_pe, l
729  integer :: is, ie, js, je, msgsize
730  integer, allocatable :: msg1(:), msg2(:)
731  integer, allocatable :: isl_coarse(:), iel_coarse(:), jsl_coarse(:), jel_coarse(:)
732  integer, allocatable :: isl_fine(:), iel_fine(:), jsl_fine(:), jel_fine(:)
733  integer :: outunit
734 
735  outunit = stdout()
736  domain_fine => nest_domain%domain_fine
737  domain_coarse => nest_domain%domain_coarse
738  tile_fine = nest_domain%tile_fine
739  tile_coarse = nest_domain%tile_coarse
740  istart_fine = nest_domain%istart_fine
741  iend_fine = nest_domain%iend_fine
742  jstart_fine = nest_domain%jstart_fine
743  jend_fine = nest_domain%jend_fine
744  istart_coarse = nest_domain%istart_coarse
745  iend_coarse = nest_domain%iend_coarse
746  jstart_coarse = nest_domain%jstart_coarse
747  jend_coarse = nest_domain%jend_coarse
748  x_refine = nest_domain%x_refine
749  y_refine = nest_domain%y_refine
750  npes = mpp_npes()
751  npes_fine = size(nest_domain%pelist_fine(:))
752  npes_coarse = size(nest_domain%pelist_coarse(:))
753 
754 
755  allocate(isl_coarse(npes_coarse), iel_coarse(npes_coarse) )
756  allocate(jsl_coarse(npes_coarse), jel_coarse(npes_coarse) )
757  allocate(isl_fine(npes_fine), iel_fine(npes_fine) )
758  allocate(jsl_fine(npes_fine), jel_fine(npes_fine) )
759 
760  call mpp_get_compute_domain (domain_coarse, xbegin=isc_coarse, xend=iec_coarse, ybegin=jsc_coarse, yend=jec_coarse)
761  call mpp_get_compute_domain (domain_fine, xbegin=isc_fine, xend=iec_fine, ybegin=jsc_fine, yend=jec_fine)
762  call mpp_get_compute_domains(domain_coarse, xbegin=isl_coarse, xend=iel_coarse, ybegin=jsl_coarse, yend=jel_coarse)
763  call mpp_get_compute_domains(domain_fine, xbegin=isl_fine, xend=iel_fine, ybegin=jsl_fine, yend=jel_fine)
764  call mpp_get_domain_shift (domain_coarse, ishift, jshift, position)
765  overlap%center%is_you = 0; overlap%center%ie_you = -1
766  overlap%center%js_you = 0; overlap%center%je_you = -1
767 
768  if( nest_domain%is_fine_pe ) then
769  overlap%xbegin = isc_fine - domain_fine%whalo
770  overlap%xend = iec_fine + domain_fine%ehalo + ishift
771  overlap%ybegin = jsc_fine - domain_fine%shalo
772  overlap%yend = jec_fine + domain_fine%nhalo + jshift
773  else
774  overlap%xbegin = isc_coarse - domain_coarse%whalo
775  overlap%xend = iec_coarse + domain_coarse%ehalo + ishift
776  overlap%ybegin = jsc_coarse - domain_coarse%shalo
777  overlap%yend = jec_coarse + domain_coarse%nhalo + jshift
778  endif
779 
780  overlap%nsend = 0
781  overlap%nrecv = 0
782  call init_index_type(overlap%center)
783 
784  !-----------------------------------------------------------------------------------------
785  !
786  ! Sending From fine to coarse.
787  ! compute the send information from fine grid to coarse grid. This will only need to send
788  ! the internal of fine grid to coarse grid.
789  !-----------------------------------------------------------------------------------------
790  nsend = 0
791  if( nest_domain%is_fine_pe ) then
792  allocate(overLaplist(npes_coarse))
793  do n = 1, npes_coarse
794  if(domain_coarse%list(n-1)%tile_id(1) == tile_coarse) then
795  is_coarse = max( istart_coarse, isl_coarse(n) )
796  ie_coarse = min( iend_coarse, iel_coarse(n) )
797  js_coarse = max( jstart_coarse, jsl_coarse(n) )
798  je_coarse = min( jend_coarse, jel_coarse(n) )
799  if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
800  is_fine = istart_fine + (is_coarse - istart_coarse) * x_refine
801  ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) * x_refine - 1
802  js_fine = jstart_fine + (js_coarse - jstart_coarse) * y_refine
803  je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) * y_refine - 1
804  dir = 0
805  is_fine = max(isc_fine, is_fine)
806  ie_fine = min(iec_fine, ie_fine)
807  js_fine = max(jsc_fine, js_fine)
808  je_fine = min(jec_fine, je_fine)
809  if( ie_fine .GE. is_fine .AND. je_fine .GE. js_fine ) then
810  nsend = nsend + 1
811  call allocate_nest_overlap(overLaplist(nsend), MAXOVERLAP)
812  call insert_nest_overlap(overLaplist(nsend), nest_domain%pelist_coarse(n), &
813  is_fine, ie_fine+ishift, js_fine, je_fine+jshift, dir, ZERO)
814  endif
815  endif
816  endif
817  enddo
818  overlap%nsend = nsend
819  if(nsend > 0) then
820  allocate(overlap%send(nsend))
821  do n = 1, nsend
822  call copy_nest_overlap(overlap%send(n), overlaplist(n) )
823  call deallocate_nest_overlap(overlaplist(n))
824  enddo
825  endif
826  if(allocated(overlaplist))deallocate(overlaplist)
827  endif
828 
829  !--------------------------------------------------------------------------------
830  ! compute the recv information from fine grid to coarse grid. This will only need to send
831  ! the internal of fine grid to coarse grid.
832  !--------------------------------------------------------------------------------
833 
834  if( nest_domain%is_coarse_pe ) then
835  nrecv = 0
836  if(domain_coarse%tile_id(1) == tile_coarse) then
837  is_coarse = max( istart_coarse, isc_coarse )
838  ie_coarse = min( iend_coarse, iec_coarse )
839  js_coarse = max( jstart_coarse, jsc_coarse )
840  je_coarse = min( jend_coarse, jec_coarse )
841 
842  if(ie_coarse .GE. is_coarse .AND. je_coarse .GE. js_coarse ) then
843  is_fine = istart_fine + (is_coarse - istart_coarse) * x_refine
844  ie_fine = istart_fine + (ie_coarse - istart_coarse + 1) * x_refine - 1
845  js_fine = jstart_fine + (js_coarse - jstart_coarse) * y_refine
846  je_fine = jstart_fine + (je_coarse - jstart_coarse + 1) * y_refine - 1
847  overlap%center%is_me = is_coarse; overlap%center%ie_me = ie_coarse + ishift
848  overlap%center%js_me = js_coarse; overlap%center%je_me = je_coarse + jshift
849  overlap%center%is_you = is_fine; overlap%center%ie_you = ie_fine + ishift
850  overlap%center%js_you = js_fine; overlap%center%je_you = je_fine + jshift
851  dir = 0
852  allocate(overLaplist(npes_fine))
853  do n = 1, npes_fine
854  is_you = max(isl_fine(n), is_fine)
855  ie_you = min(iel_fine(n), ie_fine)
856  js_you = max(jsl_fine(n), js_fine)
857  je_you = min(jel_fine(n), je_fine)
858  if( ie_you .GE. is_you .AND. je_you .GE. js_you ) then
859  nrecv = nrecv + 1
860  call allocate_nest_overlap(overLaplist(nrecv), MAXOVERLAP)
861  call insert_nest_overlap(overLaplist(nrecv), nest_domain%pelist_fine(n), &
862  is_you, ie_you+ishift, js_you, je_you+jshift , dir, ZERO)
863  endif
864  enddo
865  endif
866  endif
867  overlap%nrecv = nrecv
868  if(nrecv > 0) then
869  allocate(overlap%recv(nrecv))
870  do n = 1, nrecv
871  call copy_nest_overlap(overlap%recv(n), overlaplist(n) )
872  call deallocate_nest_overlap( overLaplist(n) )
873  enddo
874  endif
875  if(allocated(overlaplist))deallocate(overlaplist)
876 
877  endif
878 
879  deallocate(isl_coarse, iel_coarse, jsl_coarse, jel_coarse)
880  deallocate(isl_fine, iel_fine, jsl_fine, jel_fine)
881 
882  if(debug_message_passing) then
883  allocate(msg1(0:npes-1), msg2(0:npes-1) )
884  msg1 = 0
885  msg2 = 0
886  do m = 1, overlap%nrecv
887  msgsize = 0
888  do n = 1, overlap%recv(m)%count
889  is = overlap%recv(m)%is(n); ie = overlap%recv(m)%ie(n)
890  js = overlap%recv(m)%js(n); je = overlap%recv(m)%je(n)
891  msgsize = msgsize + (ie-is+1)*(je-js+1)
892  end do
893  from_pe = overlap%recv(m)%pe
894  l = from_pe-mpp_root_pe()
895  call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2)
896  msg2(l) = msgsize
897  enddo
898 
899  do m = 1, overlap%nsend
900  msgsize = 0
901  do n = 1, overlap%send(m)%count
902  is = overlap%send(m)%is(n); ie = overlap%send(m)%ie(n)
903  js = overlap%send(m)%js(n); je = overlap%send(m)%je(n)
904  msgsize = msgsize + (ie-is+1)*(je-js+1)
905  end do
906  call mpp_send( msgsize, plen=1, to_pe=overlap%send(m)%pe, tag=COMM_TAG_2)
907  enddo
908  call mpp_sync_self(check=EVENT_RECV)
909 
910  do m = 0, npes-1
911  if(msg1(m) .NE. msg2(m)) then
912  print*, "compute_overlap_fine_to_coarse: My pe = ", mpp_pe(), ",name =", trim(name),", from pe=", &
913  m+mpp_root_pe(), ":send size = ", msg1(m), ", recv size = ", msg2(m)
914  call mpp_error(FATAL, "mpp_compute_overlap_coarse_to_fine: mismatch on send and recv size")
915  endif
916  enddo
917  call mpp_sync_self()
918  write(outunit,*)"NOTE from compute_overlap_fine_to_coarse: "// &
919  "message sizes are matched between send and recv for "//trim(name)
920  deallocate(msg1, msg2)
921  endif
922 
923 end subroutine compute_overlap_fine_to_coarse
924 
925 
926 
927 
928 
929 !!$subroutine set_overlap_fine_to_coarse(nest_domain, position)
930 !!$ type(nest_domain_type), intent(inout) :: nest_domain
931 !!$ integer, intent(in ) :: position
932 !!$
933 !!$
934 !!$ call mpp_get_domain_shift(domain, ishift, jshift, position)
935 !!$ update_in => nest_domain%F2C_T
936 !!$ select case(position)
937 !!$ case (CORNER)
938 !!$ update_out => nest_domain%F2C_C
939 !!$ case (EAST)
940 !!$ update_out => nest_domain%F2C_E
941 !!$ case (NORTH)
942 !!$ update_out => nest_domain%F2C_N
943 !!$ case default
944 !!$ call mpp_error(FATAL, "mpp_domains_define.inc(set_overlap_fine_to_coarse): the position should be CORNER, EAST or NORTH")
945 !!$ end select
946 !!$
947 !!$ nsend = update_in%nsend
948 !!$ nrecv = update_in%nrecv
949 !!$ update_out%pe = update_in%pe
950 !!$ update_out%nsend = nsend
951 !!$ update_out%nrecv = nrecv
952 !!$
953 !!$ if( nsend > 0 ) then
954 !!$ allocate(update_out%send(nsend))
955 !!$ do n = 1, nsend
956 !!$ count = update_in%send(n)%count
957 !!$ call allocate_overlap_type(update_out%send(n), update_in%count, overlap_in%type)
958 !!$ do m = 1, count
959 !!$ update_out%send(n)%is (count) = update_in%send(n)%is (count)
960 !!$ update_out%send(n)%ie (count) = update_in%send(n)%ie (count) + ishift
961 !!$ update_out%send(n)%js (count) = update_in%send(n)%js (count)
962 !!$ update_out%send(n)%je (count) = update_in%send(n)%je (count) + jshift
963 !!$ update_out%send(n)%tileMe (count) = update_in%send(n)%tileMe (count)
964 !!$ update_out%send(n)%dir (count) = update_in%send(n)%dir (count)
965 !!$ update_out%send(n)%rotation(count) = update_in%send(n)%rotation(count)
966 !!$ enddo
967 !!$ enddo
968 !!$ endif
969 !!$
970 !!$
971 !!$ if( nrecv > 0 ) then
972 !!$ allocate(update_out%recv(nrecv))
973 !!$ do n = 1, nrecv
974 !!$ count = update_in%recv(n)%count
975 !!$ call allocate_overlap_type(update_out%recv(n), update_in%count, overlap_in%type)
976 !!$ do m = 1, count
977 !!$ update_out%recv(n)%is (count) = update_in%recv(n)%is (count)
978 !!$ update_out%recv(n)%ie (count) = update_in%recv(n)%ie (count) + ishift
979 !!$ update_out%recv(n)%js (count) = update_in%recv(n)%js (count)
980 !!$ update_out%recv(n)%je (count) = update_in%recv(n)%je (count) + jshift
981 !!$ update_out%recv(n)%tileMe (count) = update_in%recv(n)%tileMe (count)
982 !!$ update_out%recv(n)%dir (count) = update_in%recv(n)%dir (count)
983 !!$ update_out%recv(n)%rotation(count) = update_in%recv(n)%rotation(count)
984 !!$ enddo
985 !!$ enddo
986 !!$ endif
987 !!$
988 !!$end subroutine set_overlap_fine_to_coarse
989 
990 
991 !###############################################################################
992 
993 subroutine init_index_type (indexData )
994  type(index_type), intent(inout) :: indexData
995 
996  indexData%is_me = 0
997  indexData%ie_me = -1
998  indexData%js_me = 0
999  indexData%je_me = -1
1000  indexData%is_you = 0
1001  indexData%ie_you = -1
1002  indexData%js_you = 0
1003  indexData%je_you = -1
1004 
1005 end subroutine init_index_type
1006 
1007 subroutine allocate_nest_overlap(overlap, count)
1008  type(overlap_type), intent(inout) :: overlap
1009  integer, intent(in ) :: count
1010 
1011  overlap%count = 0
1012  overlap%pe = NULL_PE
1013  if( ASSOCIATED(overlap%is) ) call mpp_error(FATAL, &
1014  "mpp_define_nest_domains.inc: overlap is already been allocated")
1015 
1016  allocate(overlap%is (count) )
1017  allocate(overlap%ie (count) )
1018  allocate(overlap%js (count) )
1019  allocate(overlap%je (count) )
1020  allocate(overlap%dir (count) )
1021  allocate(overlap%rotation (count) )
1022  allocate(overlap%msgsize (count) )
1023 
1024 end subroutine allocate_nest_overlap
1025 
1026 !##############################################################################
1027 subroutine deallocate_nest_overlap(overlap)
1028  type(overlap_type), intent(inout) :: overlap
1029 
1030  overlap%count = 0
1031  overlap%pe = NULL_PE
1032  deallocate(overlap%is)
1033  deallocate(overlap%ie)
1034  deallocate(overlap%js)
1035  deallocate(overlap%je)
1036  deallocate(overlap%dir)
1037  deallocate(overlap%rotation)
1038  deallocate(overlap%msgsize)
1039 
1040 end subroutine deallocate_nest_overlap
1041 
1042 !##############################################################################
1043 subroutine insert_nest_overlap(overlap, pe, is, ie, js, je, dir, rotation)
1044  type(overlap_type), intent(inout) :: overlap
1045  integer, intent(in ) :: pe
1046  integer, intent(in ) :: is, ie, js, je
1047  integer, intent(in ) :: dir, rotation
1048  integer :: count
1049 
1050  if( overlap%count == 0 ) then
1051  overlap%pe = pe
1052  else
1053  if(overlap%pe .NE. pe) call mpp_error(FATAL, &
1054  "mpp_define_nest_domains.inc: mismatch on pe")
1055  endif
1056  overlap%count = overlap%count+1
1057  count = overlap%count
1058  if(count > size(overlap%is(:))) call mpp_error(FATAL, &
1059  "mpp_define_nest_domains.inc: overlap%count > size(overlap%is), contact developer")
1060  overlap%is (count) = is
1061  overlap%ie (count) = ie
1062  overlap%js (count) = js
1063  overlap%je (count) = je
1064  overlap%dir (count) = dir
1065  overlap%rotation (count) = rotation
1066  overlap%msgsize (count) = (ie-is+1)*(je-js+1)
1067 
1068 end subroutine insert_nest_overlap
1069 
1070 
1071 !#########################################################
1072 subroutine copy_nest_overlap(overlap_out, overlap_in)
1073  type(overlap_type), intent(inout) :: overlap_out
1074  type(overlap_type), intent(in) :: overlap_in
1075 
1076  if(overlap_in%count == 0) call mpp_error(FATAL, &
1077  "mpp_define_nest_domains.inc: overlap_in%count is 0")
1078 
1079  if(associated(overlap_out%is)) call mpp_error(FATAL, &
1080  "mpp_define_nest_domains.inc: overlap_out is already been allocated")
1081 
1082  call allocate_nest_overlap(overlap_out, overlap_in%count)
1083  overlap_out%count = overlap_in%count
1084  overlap_out%pe = overlap_in%pe
1085 
1086  overlap_out%is(:) = overlap_in%is(1:overlap_in%count)
1087  overlap_out%ie(:) = overlap_in%ie(1:overlap_in%count)
1088  overlap_out%js(:) = overlap_in%js(1:overlap_in%count)
1089  overlap_out%je(:) = overlap_in%je(1:overlap_in%count)
1090  overlap_out%is(:) = overlap_in%is(1:overlap_in%count)
1091  overlap_out%dir(:) = overlap_in%dir(1:overlap_in%count)
1092  overlap_out%rotation(:) = overlap_in%rotation(1:overlap_in%count)
1093  overlap_out%msgsize(:) = overlap_in%msgsize(1:overlap_in%count)
1094 
1095 
1096 end subroutine copy_nest_overlap
1097 
1098 
1099 !#######################################################################
1100  ! this routine found the domain has the same halo size with the input
1101  ! whalo, ehalo,
1102 function search_C2F_nest_overlap(nest_domain, extra_halo, position)
1103  type(nest_domain_type), intent(inout) :: nest_domain
1104  integer, intent(in) :: extra_halo
1105  integer, intent(in) :: position
1106  type(nestSpec), pointer :: search_C2F_nest_overlap
1107  type(nestSpec), pointer :: update_ref
1108  character(len=128) :: name
1109 
1110  select case(position)
1111  case (CENTER)
1112  name = trim(nest_domain%name)//" T-cell"
1113  update_ref => nest_domain%C2F_T
1114  case (CORNER)
1115  update_ref => nest_domain%C2F_C
1116  case (NORTH)
1117  update_ref => nest_domain%C2F_N
1118  case (EAST)
1119  update_ref => nest_domain%C2F_E
1120  case default
1121  call mpp_error(FATAL,"mpp_define_nest_domains.inc(search_C2F_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
1122  end select
1123 
1124  search_C2F_nest_overlap => update_ref
1125 
1126  do
1127  if(extra_halo == search_C2F_nest_overlap%extra_halo) then
1128  exit ! found domain
1129  endif
1130  !--- if not found, switch to next
1131  if(.NOT. ASSOCIATED(search_C2F_nest_overlap%next)) then
1132  allocate(search_C2F_nest_overlap%next)
1133  search_C2F_nest_overlap => search_C2F_nest_overlap%next
1134  call compute_overlap_coarse_to_fine(nest_domain, search_C2F_nest_overlap, extra_halo, position, name)
1135  exit
1136  else
1137  search_C2F_nest_overlap => search_C2F_nest_overlap%next
1138  end if
1139 
1140  end do
1141 
1142  update_ref => NULL()
1143 
1144  end function search_C2F_nest_overlap
1145 
1146 !#######################################################################
1147  ! this routine found the domain has the same halo size with the input
1148  ! whalo, ehalo,
1149  function search_F2C_nest_overlap(nest_domain, position)
1150  type(nest_domain_type), intent(inout) :: nest_domain
1151  integer, intent(in) :: position
1152  type(nestSpec), pointer :: search_F2C_nest_overlap
1153 
1154  select case(position)
1155  case (CENTER)
1156  search_F2C_nest_overlap => nest_domain%F2C_T
1157  case (CORNER)
1158  search_F2C_nest_overlap => nest_domain%F2C_C
1159  case (NORTH)
1160  search_F2C_nest_overlap => nest_domain%F2C_N
1161  case (EAST)
1162  search_F2C_nest_overlap => nest_domain%F2C_E
1163  case default
1164  call mpp_error(FATAL,"mpp_define_nest_domains.inc(search_F2C_nest_overlap): position should be CENTER|CORNER|EAST|NORTH")
1165  end select
1166 
1167  end function search_F2C_nest_overlap
1168 
1169  !################################################################
1170  subroutine mpp_get_C2F_index(nest_domain, is_fine, ie_fine, js_fine, je_fine, &
1171  is_coarse, ie_coarse, js_coarse, je_coarse, dir, position)
1172 
1173  type(nest_domain_type), intent(in ) :: nest_domain
1174  integer, intent(out) :: is_fine, ie_fine, js_fine, je_fine
1175  integer, intent(out) :: is_coarse, ie_coarse, js_coarse, je_coarse
1176  integer, intent(in ) :: dir
1177  integer, optional, intent(in ) :: position
1178 
1179  integer :: update_position
1180  type(nestSpec), pointer :: update => NULL()
1181 
1182  update_position = CENTER
1183  if(present(position)) update_position = position
1184 
1185  select case(update_position)
1186  case (CENTER)
1187  update => nest_domain%C2F_T
1188  case (EAST)
1189  update => nest_domain%C2F_E
1190  case (CORNER)
1191  update => nest_domain%C2F_C
1192  case (NORTH)
1193  update => nest_domain%C2F_N
1194  case default
1195  call mpp_error(FATAL, "mpp_define_nest_domains.inc(mpp_get_C2F_index): invalid option argument position")
1196  end select
1197 
1198  select case(dir)
1199  case(WEST)
1200  is_fine = update%west%is_me
1201  ie_fine = update%west%ie_me
1202  js_fine = update%west%js_me
1203  je_fine = update%west%je_me
1204  is_coarse = update%west%is_you
1205  ie_coarse = update%west%ie_you
1206  js_coarse = update%west%js_you
1207  je_coarse = update%west%je_you
1208  case(EAST)
1209  is_fine = update%east%is_me
1210  ie_fine = update%east%ie_me
1211  js_fine = update%east%js_me
1212  je_fine = update%east%je_me
1213  is_coarse = update%east%is_you
1214  ie_coarse = update%east%ie_you
1215  js_coarse = update%east%js_you
1216  je_coarse = update%east%je_you
1217  case(SOUTH)
1218  is_fine = update%south%is_me
1219  ie_fine = update%south%ie_me
1220  js_fine = update%south%js_me
1221  je_fine = update%south%je_me
1222  is_coarse = update%south%is_you
1223  ie_coarse = update%south%ie_you
1224  js_coarse = update%south%js_you
1225  je_coarse = update%south%je_you
1226  case(NORTH)
1227  is_fine = update%north%is_me
1228  ie_fine = update%north%ie_me
1229  js_fine = update%north%js_me
1230  je_fine = update%north%je_me
1231  is_coarse = update%north%is_you
1232  ie_coarse = update%north%ie_you
1233  js_coarse = update%north%js_you
1234  je_coarse = update%north%je_you
1235  case default
1236  call mpp_error(FATAL, "mpp_define_nest_domains.inc: invalid value for argument dir")
1237  end select
1238 
1239 
1240  end subroutine mpp_get_C2F_index
1241 
1242  !################################################################
1243  subroutine mpp_get_F2C_index(nest_domain, is_coarse, ie_coarse, js_coarse, je_coarse, &
1244  is_fine, ie_fine, js_fine, je_fine, position)
1245 
1246  type(nest_domain_type), intent(in ) :: nest_domain
1247  integer, intent(out) :: is_fine, ie_fine, js_fine, je_fine
1248  integer, intent(out) :: is_coarse, ie_coarse, js_coarse, je_coarse
1249  integer, optional, intent(in ) :: position
1250 
1251  integer :: update_position
1252  type(nestSpec), pointer :: update => NULL()
1253 
1254  update_position = CENTER
1255  if(present(position)) update_position = position
1256 
1257  select case(update_position)
1258  case (CENTER)
1259  update => nest_domain%F2C_T
1260  case (EAST)
1261  update => nest_domain%F2C_E
1262  case (CORNER)
1263  update => nest_domain%F2C_C
1264  case (NORTH)
1265  update => nest_domain%F2C_N
1266  case default
1267  call mpp_error(FATAL, "mpp_define_nest_domains.inc(mpp_get_F2C_index): invalid option argument position")
1268  end select
1269 
1270  is_fine = update%center%is_you
1271  ie_fine = update%center%ie_you
1272  js_fine = update%center%js_you
1273  je_fine = update%center%je_you
1274  is_coarse = update%center%is_me
1275  ie_coarse = update%center%ie_me
1276  js_coarse = update%center%js_me
1277  je_coarse = update%center%je_me
1278 
1279  end subroutine mpp_get_F2C_index
1280 
1281 
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
integer, private je
Definition: fms_io.F90:494
integer, parameter recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
int npes
Definition: threadloc.c:26
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer, pointer refinement
integer(long), parameter true
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
integer(long), parameter false
from from_pe
integer, parameter send
character(len=32) name
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
l_size ! loop over number of fields ke do je do ie to to_pe
integer, parameter m
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
integer, parameter, public center
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) & T
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public east
integer, parameter x_refine
Definition: mosaic.F90:50
logical function received(this, seqno)
logical debug_message_passing
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
#define max(a, b)
Definition: mosaic_util.h:33
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
#define min(a, b)
Definition: mosaic_util.h:32
integer, parameter, public south
integer, parameter y_refine
Definition: mosaic.F90:50
l_size ! loop over number of fields ke do je do ie to js
integer, parameter, public information