FV3 Bundle
mpp_unstruct_domain.inc
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
10 !*
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 !* for more details.
15 !*
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 
20  !#####################################################################
21  subroutine mpp_define_unstruct_domain(UG_domain, SG_domain, npts_tile, grid_nlev, ndivs, npes_io_group, grid_index, name)
22  type(domainUG), intent(inout) :: UG_domain
23  type(domain2d), target, intent(in) :: SG_domain
24  integer, intent(in) :: npts_tile(:) ! number of unstructured points on each tile
25  integer, intent(in) :: grid_nlev(:) ! number of levels in each unstructured grid.
26  integer, intent(in) :: ndivs
27  integer, intent(in) :: npes_io_group ! number of processors in a io group. Only pe with same tile_id
28  ! in the same group
29  integer, intent(in) :: grid_index(:)
30  character(len=*), optional, intent(in) :: name
31  integer, dimension(size(npts_tile(:))) :: ndivs_tile, pe_start, pe_end
32  integer, dimension(0:ndivs-1) :: ibegin, iend, costs_list
33  integer :: ntiles, ntotal_pts, ndivs_used, max_npts, cur_tile, cur_npts
34  integer :: n, ts, te, p, pos, tile_id, ngroup, group_id, my_pos, i
35  integer :: npes_in_group, is, ie, ntotal_costs, max_cost, cur_cost, costs_left
36  integer :: npts_left, ndiv_left, cur_pos, ndiv, prev_cost, ioff
37  real :: avg_cost
38  integer :: costs(size(npts_tile(:)))
39 
40  UG_domain%SG_domain => SG_domain
41  ntiles = size(npts_tile(:))
42  UG_domain%ntiles = ntiles
43 
44  !--- total number of points must be no less than ndivs
45  if(sum(npts_tile)<ndivs) call mpp_error(FATAL, "mpp_define_unstruct_domain: total number of points is less than ndivs")
46  !--- We are assuming nlev on each grid is at least one.
47  do n = 1, size(grid_nlev(:))
48  if(grid_nlev(n) < 1) call mpp_error(FATAL, "mpp_define_unstruct_domain: grid_nlev at some point is less than 1")
49  enddo
50 
51  !-- costs for each tile.
52  pos = 0
53  do n = 1, ntiles
54  costs(n) = 0
55  do i = 1, npts_tile(n)
56  pos = pos + 1
57  costs(n) = costs(n) + grid_nlev(pos)
58  enddo
59  enddo
60  ! compute number of divisions for each tile.
61  ntotal_costs = sum(costs)
62  !--- get the upper limit of ndivs for each tile.
63  do n = 1, ntiles
64  ndivs_tile(n) = CEILING(real(costs(n)*ndivs)/ntotal_costs)
65  enddo
66 
67  ndivs_used = sum(ndivs_tile)
68  do while (ndivs_used > ndivs)
69  max_cost = 0
70  cur_tile = 0
71  do n = 1, ntiles
72  if( ndivs_tile(n) > 1 ) then
73  cur_cost = CEILING(real(costs(n))/(ndivs_tile(n)-1))
74  if( max_cost == 0 .OR. cur_cost<max_cost) then
75  max_cost = cur_cost
76  cur_tile = n
77  endif
78  endif
79  enddo
80  ndivs_used = ndivs_used-1
81  ndivs_tile(cur_tile) = ndivs_tile(cur_tile) - 1
82  enddo
83 
84  te = -1
85  ioff = 0
86  do n = 1, ntiles
87  ts = te + 1
88  te = ts + ndivs_tile(n) - 1
89  costs_left = costs(n)
90  ndiv_left = ndivs_tile(n)
91  npts_left = npts_tile(n)
92  cur_pos = 1
93  do ndiv = 1, ndivs_tile(n)
94  cur_cost = 0
95  ibegin(ts+ndiv-1) = cur_pos
96  avg_cost = real(costs_left)/ndiv_left
97  do i = cur_pos, npts_tile(n)
98  cur_cost = cur_cost + grid_nlev(i+ioff)
99  costs_left = costs_left - grid_nlev(i+ioff)
100  if(npts_left < ndiv_left ) then
101  call mpp_error(FATAL, "mpp_define_unstruct_domain: npts_left < ndiv_left")
102  else if(npts_left == ndiv_left ) then
103  cur_pos = i + 1
104  exit
105  else if(cur_cost .GE. avg_cost) then
106  prev_cost = cur_cost - grid_nlev(i+ioff)
107  if(i==cur_pos) then
108  cur_pos = i + 1
109  exit
110  else if( cur_cost - avg_cost .LE. avg_cost - prev_cost ) then
111  cur_pos = i + 1
112  exit
113  else
114  cur_pos = i
115  cur_cost = prev_cost
116  costs_left = costs_left + grid_nlev(i+ioff)
117  npts_left = npts_left+1
118  exit
119  endif
120  endif
121  npts_left = npts_left-1
122  enddo
123  iend(ts+ndiv-1) = cur_pos - 1
124  costs_list(ts+ndiv-1) = cur_cost
125  ndiv_left = ndiv_left-1
126  npts_left = npts_left-1
127  enddo
128  pe_start(n) = ts
129  pe_end(n) = te
130  ioff = ioff+ npts_tile(n)
131  enddo
132  allocate(UG_domain%list(0:ndivs-1))
133  do p = 0, ndivs-1
134  UG_domain%list(p)%compute%begin = ibegin(p)
135  UG_domain%list(p)%compute%end = iend(p)
136  UG_domain%list(p)%compute%size = UG_domain%list(p)%compute%end - UG_domain%list(p)%compute%begin + 1
137  UG_domain%list(p)%compute%max_size = 0
138  UG_domain%list(p)%pos = p
139  UG_domain%list(p)%pe = p + mpp_root_pe()
140  pos = 0
141  do n = 1, ntiles
142  if( p .GE. pe_start(n) .AND. p .LE. pe_end(n) ) then
143  UG_domain%list(p)%tile_id = n
144  exit
145  endif
146  pos = pos + npts_tile(n)
147  enddo
148  is = UG_domain%list(p)%compute%begin+pos
149  ie = UG_domain%list(p)%compute%end+pos
150  UG_domain%list(p)%compute%begin_index = minval(grid_index(is:ie))
151  UG_domain%list(p)%compute%end_index = maxval(grid_index(is:ie))
152  enddo
153 
154  !--- write out domain decomposition from root pe
155  if(mpp_pe() == mpp_root_pe() .and. present(name)) then
156  write(stdout(),*) "unstruct domain name = ", trim(name)
157  write(stdout(),*) UG_domain%list(:)%compute%size
158  endif
159 
160  pos = mpp_pe() - mpp_root_pe()
161  UG_domain%pe = mpp_pe()
162  UG_domain%pos = pos
163  UG_domain%tile_id = UG_domain%list(pos)%tile_id
164  p = pe_start(UG_domain%tile_id)
165  UG_domain%tile_root_pe = UG_domain%list(p)%pe
166  UG_domain%tile_npes = pe_end(UG_domain%tile_id) - pe_start(UG_domain%tile_id) + 1
167  UG_domain%compute = UG_domain%list(pos)%compute
168  UG_domain%compute%max_size = MAXVAL( UG_domain%list(:)%compute%size )
169  UG_domain%global%begin = 1
170  UG_domain%global%end = npts_tile(UG_domain%tile_id)
171  UG_domain%global%size = UG_domain%global%end - UG_domain%global%begin + 1
172  UG_domain%global%max_size = -1 ! currently this is not supposed to be used.
173  pos = 0
174  do n = 1, UG_domain%tile_id-1
175  pos = pos + npts_tile(n)
176  enddo
177  UG_domain%global%begin_index = grid_index(pos+1)
178  UG_domain%global%end_index = grid_index(pos+npts_tile(n))
179 
180  allocate(UG_domain%grid_index(UG_domain%compute%size))
181  do n = 1, UG_domain%compute%size
182  UG_domain%grid_index(n) = grid_index(pos+UG_domain%compute%begin+n-1)
183  enddo
184 
185  !--- define io_domain
186  allocate(UG_domain%io_domain)
187  tile_id = UG_domain%tile_id
188  UG_domain%io_domain%pe = UG_domain%pe
189  !--- figure out number groups for current tile
190  if(npes_io_group == 0) then
191  ngroup = 1
192  else
193  ngroup = CEILING(real(ndivs_tile(tile_id))/ npes_io_group)
194  endif
195 
196 !----------
197 !ug support
198  UG_domain%npes_io_group = npes_io_group
199  UG_domain%io_layout = ngroup
200 !----------
201 
202  call mpp_compute_extent(1, ndivs_tile(tile_id), ngroup, ibegin(0:ngroup-1), iend(0:ngroup-1))
203  my_pos = UG_domain%pe - UG_domain%tile_root_pe + 1
204  do n = 0, ngroup-1
205  if( my_pos .GE. ibegin(n) .AND. my_pos .LE. iend(n) ) then
206  group_id = n
207  exit
208  endif
209  enddo
210 
211  UG_domain%io_domain%tile_id = group_id+1
212  UG_domain%io_domain%compute = UG_domain%compute
213  UG_domain%io_domain%pe = UG_domain%pe
214  UG_domain%io_domain%pos = my_pos - ibegin(group_id) + 1
215  UG_domain%io_domain%tile_root_pe = ibegin(group_id) + UG_domain%tile_root_pe - 1
216  pos = UG_domain%io_domain%tile_root_pe - mpp_root_pe()
217  UG_domain%io_domain%global%begin = UG_domain%list(pos)%compute%begin
218  UG_domain%io_domain%global%begin_index = UG_domain%list(pos)%compute%begin_index
219  pos = iend(group_id) + UG_domain%tile_root_pe - mpp_root_pe() - 1
220  UG_domain%io_domain%global%end = UG_domain%list(pos)%compute%end
221  UG_domain%io_domain%global%end_index = UG_domain%list(pos)%compute%end_index
222  UG_domain%io_domain%global%size = UG_domain%io_domain%global%end - UG_domain%io_domain%global%begin + 1
223 
224  npes_in_group = iend(group_id) - ibegin(group_id) + 1
225  allocate(UG_domain%io_domain%list(0:npes_in_group-1))
226  do n = 0, npes_in_group-1
227  pos = UG_domain%io_domain%tile_root_pe - mpp_root_pe() + n
228  UG_domain%io_domain%list(n)%compute = UG_domain%list(pos)%compute
229  UG_domain%io_domain%list(n)%pos = n
230  UG_domain%io_domain%list(n)%pe = UG_domain%list(pos)%pe
231  UG_domain%io_domain%list(n)%tile_id = group_id+1
232  enddo
233 
234  call compute_overlap_SG2UG(UG_domain, SG_domain)
235  call compute_overlap_UG2SG(UG_domain)
236 
237  return
238 
239  end subroutine mpp_define_unstruct_domain
240 
241 
242  !####################################################################
243  subroutine compute_overlap_SG2UG(UG_domain, SG_domain)
244  type(domainUG), intent(inout) :: UG_domain
245  type(domain2d), intent(in) :: SG_domain
246  integer, dimension(0:size(SG_domain%list(:))-1) :: send_cnt, recv_cnt
247  integer, dimension(0:size(SG_domain%list(:))-1) :: send_buffer_pos, recv_buffer_pos
248  integer, dimension(:), allocatable :: send_buffer, recv_buffer, index_list
249  integer, dimension(:), allocatable :: buffer_pos
250  integer :: tile_id, nlist, nxg, begin_index, end_index, i, j
251  integer :: m, n, list, l, isc, iec, jsc, jec, ibegin, iend, grid_index
252  integer :: nrecv, nsend, send_pos, recv_pos, pos
253 
254  !--- figure out the recv index information.
255  tile_id = UG_domain%tile_id
256  nlist = size(SG_domain%list(:))
257  nxg = SG_domain%x(1)%global%size
258  begin_index = UG_domain%compute%begin_index
259  end_index = UG_domain%compute%end_index
260  pos = 0
261  recv_cnt = 0
262  allocate(index_list(UG_domain%compute%size))
263  allocate(send_buffer(UG_domain%compute%size))
264  index_list = -1
265  do n = 0, nlist-1
266  if(SG_domain%list(n)%tile_id(1) .NE. tile_id) cycle
267  isc = SG_domain%list(n)%x(1)%compute%begin; iec = SG_domain%list(n)%x(1)%compute%end
268  jsc = SG_domain%list(n)%y(1)%compute%begin; jec = SG_domain%list(n)%y(1)%compute%end
269  ibegin = (jsc-1)*nxg + isc
270  iend = (jec-1)*nxg + iec
271  if(ibegin > end_index .OR. iend < begin_index) cycle
272  do l = 1, UG_domain%compute%size
273  grid_index = UG_domain%grid_index(l)
274  i = mod((grid_index-1), nxg) + 1
275  j = (grid_index-1)/nxg + 1
276  if( i .GE. isc .AND. i .LE. iec .and. j .GE. jsc .AND. j .LE. jec ) then
277  recv_cnt(n) = recv_cnt(n) + 1
278  pos = pos + 1
279  if(pos > UG_domain%compute%size) call mpp_error(FATAL, &
280  'compute_overlap_SG2UG: pos > UG_domain%compute%size')
281  index_list(pos) = l
282  send_buffer(pos) = grid_index
283  endif
284  enddo
285  enddo
286 
287  !--- make sure sum(recv_cnt) == UG_domain%compute%size
288  if( UG_domain%compute%size .NE. sum(recv_cnt) ) then
289  print*,"pe=", mpp_pe(), UG_domain%compute%size, sum(recv_cnt)
290  call mpp_error(FATAL, &
291  "compute_overlap_SG2UG: UG_domain%compute%size .NE. sum(recv_cnt)")
292  endif
293  allocate(buffer_pos(0:nlist-1))
294  pos = 0
295  do list = 0,nlist-1
296  buffer_pos(list) = pos
297  pos = pos + recv_cnt(list)
298  enddo
299 
300  nrecv = count( recv_cnt > 0 )
301  UG_domain%SG2UG%nrecv = nrecv
302  allocate(UG_domain%SG2UG%recv(nrecv))
303  nrecv = 0
304  pos = 0
305  do list = 0,nlist-1
306  m = mod( SG_domain%pos+nlist-list, nlist )
307  if( recv_cnt(m) > 0 ) then
308  nrecv = nrecv+1
309  UG_domain%SG2UG%recv(nrecv)%count = recv_cnt(m)
310  UG_domain%SG2UG%recv(nrecv)%pe = UG_domain%list(m)%pe
311  allocate(UG_domain%SG2UG%recv(nrecv)%i(recv_cnt(m)))
312  pos = buffer_pos(m)
313  do l = 1, recv_cnt(m)
314  pos = pos + 1
315  UG_domain%SG2UG%recv(nrecv)%i(l) = index_list(pos)
316  enddo
317  endif
318  enddo
319 
320  !--- figure out the send index information.
321  send_cnt = recv_cnt
322  recv_cnt = 0
323  call mpp_alltoall(send_cnt,1,recv_cnt,1)
324  !--- make sure sum(send_cnt) == UG_domain%compute%size
325  if( UG_domain%compute%size .NE. sum(send_cnt) ) call mpp_error(FATAL, &
326  "compute_overlap_SG2UG: UG_domain%compute%size .NE. sum(send_cnt)")
327  allocate(recv_buffer(sum(recv_cnt)))
328  send_buffer_pos = 0; recv_buffer_pos = 0
329  send_pos = 0; recv_pos = 0
330  do n = 0, nlist-1
331  if(send_cnt(n) > 0) then
332  send_buffer_pos(n) = send_pos
333  send_pos = send_pos + send_cnt(n)
334  endif
335  if(recv_cnt(n) > 0) then
336  recv_buffer_pos(n) = recv_pos
337  recv_pos = recv_pos + recv_cnt(n)
338  endif
339  enddo
340 
341  call mpp_alltoall(send_buffer, send_cnt, send_buffer_pos, &
342  recv_buffer, recv_cnt, recv_buffer_pos)
343 
344  nsend = count( recv_cnt(:) > 0 )
345  UG_domain%SG2UG%nsend = nsend
346  allocate(UG_domain%SG2UG%send(nsend))
347  nsend = 0
348  isc = SG_domain%x(1)%compute%begin
349  jsc = SG_domain%y(1)%compute%begin
350  do list = 0,nlist-1
351  m = mod( SG_domain%pos+list, nlist )
352  if( recv_cnt(m) > 0 ) then
353  nsend = nsend+1
354  UG_domain%SG2UG%send(nsend)%count = recv_cnt(m)
355  UG_domain%SG2UG%send(nsend)%pe = UG_domain%list(m)%pe
356  allocate(UG_domain%SG2UG%send(nsend)%i(recv_cnt(m)))
357  allocate(UG_domain%SG2UG%send(nsend)%j(recv_cnt(m)))
358  pos = recv_buffer_pos(m)
359  do l = 1, recv_cnt(m)
360  grid_index = recv_buffer(pos+l)
361  UG_domain%SG2UG%send(nsend)%i(l) = mod(grid_index-1,nxg) + 1
362  UG_domain%SG2UG%send(nsend)%j(l) = (grid_index-1)/nxg + 1
363  enddo
364  endif
365  enddo
366  deallocate(send_buffer, recv_buffer, index_list, buffer_pos)
367 
368 return
369 
370  end subroutine compute_overlap_SG2UG
371 
372  !####################################################################
373  subroutine compute_overlap_UG2SG(UG_domain)
374  type(domainUG), intent(inout) :: UG_domain
375 
376  !--- UG2SG is the reverse of SG2UG
377  UG_domain%UG2SG%nsend = UG_domain%SG2UG%nrecv
378  UG_domain%UG2SG%send => UG_domain%SG2UG%recv
379  UG_domain%UG2SG%nrecv = UG_domain%SG2UG%nsend
380  UG_domain%UG2SG%recv => UG_domain%SG2UG%send
381 
382  return
383 
384  end subroutine compute_overlap_UG2SG
385 
386  !####################################################################
387  subroutine mpp_get_UG_SG_domain(UG_domain,SG_domain)
388  type(domainUG), intent(inout) :: UG_domain
389  type(domain2d), pointer :: SG_domain
390 
391  SG_domain => UG_domain%SG_domain
392 
393  return
394 
395  end subroutine mpp_get_UG_SG_domain
396 
397  !####################################################################
398  function mpp_get_UG_io_domain(domain)
399  type(domainUG), intent(in) :: domain
400  type(domainUG), pointer :: mpp_get_UG_io_domain
401 
402  if(ASSOCIATED(domain%io_domain)) then
403  mpp_get_UG_io_domain => domain%io_domain
404  else
405  call mpp_error(FATAL, "mpp_get_UG_io_domain: io_domain is not defined, contact developer")
406  endif
407 
408  end function mpp_get_UG_io_domain
409 
410  !#####################################################################
411  subroutine mpp_get_UG_compute_domain( domain, begin, end, size)
412  type(domainUG), intent(in) :: domain
413  integer, intent(out), optional :: begin, end, size
414 
415  if( PRESENT(begin) )begin = domain%compute%begin
416  if( PRESENT(end) )end = domain%compute%end
417  if( PRESENT(size) )size = domain%compute%size
418  return
419  end subroutine mpp_get_UG_compute_domain
420 
421  !#####################################################################
422  subroutine mpp_get_UG_global_domain( domain, begin, end, size)
423  type(domainUG), intent(in) :: domain
424  integer, intent(out), optional :: begin, end, size
425 
426  if( PRESENT(begin) )begin = domain%global%begin
427  if( PRESENT(end) )end = domain%global%end
428  if( PRESENT(size) )size = domain%global%size
429  return
430  end subroutine mpp_get_UG_global_domain
431 
432  !#####################################################################
433  subroutine mpp_get_UG_compute_domains( domain, begin, end, size )
434  type(domainUG), intent(in) :: domain
435  integer, intent(out), optional, dimension(:) :: begin, end, size
436 
437  !we use shape instead of size for error checks because size is used as an argument
438  if( PRESENT(begin) )then
439  if( any(shape(begin).NE.shape(domain%list)) ) &
440  call mpp_error( FATAL, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
441  begin(:) = domain%list(:)%compute%begin
442  end if
443  if( PRESENT(end) )then
444  if( any(shape(end).NE.shape(domain%list)) ) &
445  call mpp_error( FATAL, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
446  end(:) = domain%list(:)%compute%end
447  end if
448  if( PRESENT(size) )then
449  if( any(shape(size).NE.shape(domain%list)) ) &
450  call mpp_error( FATAL, 'mpp_get_UG_compute_domains: size array size does not match domain.' )
451  size(:) = domain%list(:)%compute%size
452  end if
453  return
454  end subroutine mpp_get_UG_compute_domains
455 
456  !#####################################################################
457  subroutine mpp_get_UG_domains_index( domain, begin, end)
458  type(domainUG), intent(in) :: domain
459  integer, intent(out), dimension(:) :: begin, end
460 
461  !we use shape instead of size for error checks because size is used as an argument
462  if( any(shape(begin).NE.shape(domain%list)) ) &
463  call mpp_error( FATAL, 'mpp_get_UG_compute_domains: begin array size does not match domain.' )
464  begin(:) = domain%list(:)%compute%begin_index
465  if( any(shape(end).NE.shape(domain%list)) ) &
466  call mpp_error( FATAL, 'mpp_get_UG_compute_domains: end array size does not match domain.' )
467  end(:) = domain%list(:)%compute%end_index
468  return
469  end subroutine mpp_get_UG_domains_index
470 
471  !#####################################################################
472  function mpp_get_UG_domain_ntiles(domain)
473  type(domainUG), intent(in) :: domain
474  integer :: mpp_get_UG_domain_ntiles
475 
476  mpp_get_UG_domain_ntiles = domain%ntiles
477  return
478  end function mpp_get_UG_domain_ntiles
479 
480  !#######################################################################
481  subroutine mpp_get_ug_domain_tile_list(domain, tiles)
482  type(domainUG), intent(in) :: domain
483  integer, intent(inout) :: tiles(:)
484  integer :: i
485 
486  if( size(tiles(:)).NE.size(domain%list(:)) ) &
487  call mpp_error( FATAL, 'mpp_get_ug_domain_tile_list: tiles array size does not match domain.' )
488  do i = 1, size(tiles(:))
489  tiles(i) = domain%list(i-1)%tile_id
490  end do
491 
492  end subroutine mpp_get_ug_domain_tile_list
493 
494  !#####################################################################
495  function mpp_get_UG_domain_tile_id(domain)
496  type(domainUG), intent(in) :: domain
497  integer :: mpp_get_UG_domain_tile_id
498 
499  mpp_get_UG_domain_tile_id = domain%tile_id
500  return
501  end function mpp_get_UG_domain_tile_id
502 
503  !####################################################################
504  function mpp_get_UG_domain_npes(domain)
505  type(domainUG), intent(in) :: domain
506  integer :: mpp_get_UG_domain_npes
507 
508  mpp_get_UG_domain_npes = size(domain%list(:))
509  return
510 
511  end function mpp_get_UG_domain_npes
512 
513 
514  !####################################################################
515  subroutine mpp_get_UG_domain_pelist( domain, pelist)
516  type(domainUG), intent(in) :: domain
517  integer, intent(out) :: pelist(:)
518 
519  if( size(pelist(:)).NE.size(domain%list(:)) ) &
520  call mpp_error( FATAL, 'mpp_get_UG_domain_pelist: pelist array size does not match domain.' )
521 
522  pelist(:) = domain%list(:)%pe
523  return
524 
525  end subroutine mpp_get_UG_domain_pelist
526 
527  !###################################################################
528  subroutine mpp_get_UG_domain_tile_pe_inf( domain, root_pe, npes, pelist)
529  type(domainUG), intent(in) :: domain
530  integer, optional, intent(out) :: root_pe, npes
531  integer, optional, intent(out) :: pelist(:)
532 
533  if(present(root_pe)) root_pe = domain%tile_root_pe
534  if(present(npes)) root_pe = domain%tile_npes
535 
536  if(present(pelist)) then
537  if( size(pelist(:)).NE. domain%tile_npes ) &
538  call mpp_error( FATAL, 'mpp_get_UG_domain_tile_pe_inf: pelist array size does not match domain.' )
539  pelist(:) = domain%list(domain%pos:domain%pos+domain%tile_npes-1)%pe
540  endif
541  return
542 
543  end subroutine mpp_get_UG_domain_tile_pe_inf
544 
545 
546  !####################################################################
547  subroutine mpp_get_UG_domain_grid_index( domain, grid_index)
548  type(domainUG), intent(in) :: domain
549  integer, intent(out) :: grid_index(:)
550 
551  if( size(grid_index(:)).NE.size(domain%grid_index(:)) ) &
552  call mpp_error( FATAL, 'mpp_get_UG_domain_grid_index: grid_index array size does not match domain.' )
553 
554  grid_index(:) = domain%grid_index(:)
555  return
556 
557  end subroutine mpp_get_UG_domain_grid_index
558 
559  !###################################################################
560  subroutine mpp_define_null_UG_domain(domain)
561  type(domainUG), intent(inout) :: domain
562 
563  domain%global%begin = -1; domain%global%end = -1; domain%global%size = 0
564  domain%compute%begin = -1; domain%compute%end = -1; domain%compute%size = 0
565  domain%pe = NULL_PE
566  domain%ntiles = -1
567  domain%pos = -1
568  domain%tile_id = -1
569  domain%tile_root_pe = -1
570 
571  end subroutine mpp_define_null_UG_domain
572 
573 !##############################################################################
574  subroutine mpp_broadcast_domain_ug( domain )
575 !broadcast domain (useful only outside the context of its own pelist)
576  type(domainUG), intent(inout) :: domain
577  integer, allocatable :: pes(:)
578  logical :: native !true if I'm on the pelist of this domain
579  integer :: listsize, listpos
580  integer :: n
581  integer, dimension(7) :: msg, info !pe and compute domain of each item in list
582  integer :: errunit
583 
584  errunit = stderr()
585  if( .NOT.module_is_initialized ) &
586  call mpp_error( FATAL, 'MPP_BROADCAST_DOMAIN_ug: You must first call mpp_domains_init.' )
587 
588 !get the current pelist
589  allocate( pes(0:mpp_npes()-1) )
590  call mpp_get_current_pelist(pes)
591 
592 !am I part of this domain?
593  native = ASSOCIATED(domain%list)
594 
595 !set local list size
596  if( native )then
597  listsize = size(domain%list(:))
598  else
599  listsize = 0
600  end if
601  call mpp_max(listsize)
602 
603  if( .NOT.native )then
604 !initialize domain%list and set null values in message
605  allocate( domain%list(0:listsize-1) )
606  domain%pe = NULL_PE
607  domain%pos = -1
608  domain%ntiles = -1
609  domain%compute%begin = 1
610  domain%compute%end = -1
611  domain%compute%begin_index = 1
612  domain%compute%end_index = -1
613  domain%global %begin = -1
614  domain%global %end = -1
615  domain%tile_id = -1
616  domain%tile_root_pe = -1
617  end if
618 !initialize values in info
619  info(1) = domain%pe
620  info(2) = domain%pos
621  info(3) = domain%tile_id
622  call mpp_get_UG_compute_domain( domain, info(4), info(5))
623  info(6) = domain%compute%begin_index
624  info(7) = domain%compute%end_index
625 !broadcast your info across current pelist and unpack if needed
626  listpos = 0
627  do n = 0,mpp_npes()-1
628  msg = info
629  if( mpp_pe().EQ.pes(n) .AND. debug )write( errunit,* )'PE ', mpp_pe(), 'broadcasting msg ', msg
630  call mpp_broadcast( msg, 7, pes(n) )
631 !no need to unpack message if native
632 !no need to unpack message from non-native PE
633  if( .NOT.native .AND. msg(1).NE.NULL_PE )then
634  domain%list(listpos)%pe = msg(1)
635  domain%list(listpos)%pos = msg(2)
636  domain%list(listpos)%tile_id = msg(3)
637  domain%list(listpos)%compute%begin = msg(4)
638  domain%list(listpos)%compute%end = msg(5)
639  domain%list(listpos)%compute%begin_index = msg(6)
640  domain%list(listpos)%compute%end_index = msg(7)
641  listpos = listpos + 1
642  if( debug )write( errunit,* )'PE ', mpp_pe(), 'received domain from PE ', msg(1), 'ls,le=', msg(4:5)
643  end if
644  end do
645 
646  end subroutine mpp_broadcast_domain_ug
647 
648 !------------------------------------------------------------------------------
649 function mpp_domain_UG_is_tile_root_pe(domain) result(is_root)
650 
651  !<Inputs/Outputs
652  type(domainUG),intent(in) :: domain
653  logical(INT_KIND) :: is_root
654 
655  if (domain%pe .eq. domain%tile_root_pe) then
656  is_root = .true.
657  else
658  is_root = .false.
659  endif
660 
661  return
662 end function mpp_domain_UG_is_tile_root_pe
663 
664 !------------------------------------------------------------------------------
665 !HELP: There needs to be a subroutine to return the "io_layout" for
666 ! an unstructured domain, so I made one. Someone should check
667 ! to see if this is correct.
668 function mpp_get_io_domain_UG_layout(domain) result(io_layout)
669 
670  !<Inputs/Outputs
671  type(domainUG),intent(in) :: domain
672  integer(INT_KIND) :: io_layout
673 
674  io_layout = domain%io_layout
675 
676  return
677 end function
678 
679 
680 !------------------------------------------------------------------
681 subroutine deallocate_unstruct_overlap_type(overlap)
682  type(unstruct_overlap_type), intent(inout) :: overlap
683 
684  if(associated(overlap%i)) deallocate(overlap%i)
685  if(associated(overlap%j)) deallocate(overlap%j)
686 
687 end subroutine deallocate_unstruct_overlap_type
688 
689 !------------------------------------------------------------------
690 subroutine deallocate_unstruct_pass_type(passobj)
691  type(unstruct_pass_type), intent(inout) :: passobj
692  integer :: n
693 
694  do n = 1, passobj%nsend
695  call deallocate_unstruct_overlap_type(passobj%send(n))
696  enddo
697  do n = 1, passobj%nrecv
698  call deallocate_unstruct_overlap_type(passobj%recv(n))
699  enddo
700 
701  if(associated(passobj%send)) deallocate(passobj%send)
702  if(associated(passobj%recv)) deallocate(passobj%recv)
703 
704 end subroutine deallocate_unstruct_pass_type
705 
706 !------------------------------------------------------------------
707 subroutine mpp_deallocate_domainUG(domain)
708 
709  !<Inputs/Outputs
710  type(domainUG),intent(inout) :: domain
711 
712  !<Local variables
713  integer(INT_KIND) :: i !<Loop variable.
714 
715  if (associated(domain%list)) then
716  deallocate(domain%list)
717  domain%list => null()
718  endif
719 
720  if (associated(domain%io_domain)) then
721  if (associated(domain%io_domain%list)) then
722  deallocate(domain%io_domain%list)
723  domain%io_domain%list => null()
724  endif
725  deallocate(domain%io_domain)
726  domain%io_domain => null()
727  endif
728 
729  call deallocate_unstruct_pass_type(domain%SG2UG)
730  call deallocate_unstruct_pass_type(domain%UG2SG)
731 
732  if (associated(domain%grid_index)) then
733  deallocate(domain%grid_index)
734  domain%grid_index => null()
735  endif
736 
737  if (associated(domain%SG_domain)) then
738  domain%SG_domain => null()
739  endif
740 
741  return
742 end subroutine mpp_deallocate_domainUG
743 
744  !###################################################################
745  !> Overload the .eq. for UG
746  function mpp_domainUG_eq( a, b )
747  logical :: mpp_domainUG_eq
748  type(domainUG), intent(in) :: a, b
749 
750  if (associated(a%SG_domain) .and. associated(b%SG_domain)) then
751  if (a%SG_domain .ne. b%SG_domain) then
752  mpp_domainUG_eq = .false.
753  return
754  endif
755  elseif (associated(a%SG_domain) .and. .not. associated(b%SG_domain)) then
756  mpp_domainUG_eq = .false.
757  return
758  elseif (.not. associated(a%SG_domain) .and. associated(b%SG_domain)) then
759  mpp_domainUG_eq = .false.
760  return
761  endif
762 
763  mpp_domainUG_eq = (a%npes_io_group .EQ. b%npes_io_group) .AND. &
764  (a%pos .EQ. b%pos) .AND. &
765  (a%ntiles .EQ. b%ntiles) .AND. &
766  (a%tile_id .EQ. b%tile_id) .AND. &
767  (a%tile_npes .EQ. b%tile_npes) .AND. &
768  (a%tile_root_pe .EQ. b%tile_root_pe)
769 
770  if(.not. mpp_domainUG_eq) return
771 
772  mpp_domainUG_eq = ( a%compute%begin.EQ.b%compute%begin .AND. &
773  a%compute%end .EQ.b%compute%end .AND. &
774  a%global%begin .EQ.b%global%begin .AND. &
775  a%global%end .EQ.b%global%end .AND. &
776  a%SG2UG%nsend .EQ.b%SG2UG%nsend .AND. &
777  a%SG2UG%nrecv .EQ.b%SG2UG%nrecv .AND. &
778  a%UG2SG%nsend .EQ.b%UG2SG%nsend .AND. &
779  a%UG2SG%nrecv .EQ.b%UG2SG%nrecv &
780  )
781 
782  return
783  end function mpp_domainUG_eq
784 
785  !> Overload the .ne. for UG
786  function mpp_domainUG_ne( a, b )
787  logical :: mpp_domainUG_ne
788  type(domainUG), intent(in) :: a, b
789 
790  mpp_domainUG_ne = .NOT. ( a.EQ.b )
791  return
792  end function mpp_domainUG_ne
793 
794 #undef MPP_TYPE_
795 #define MPP_TYPE_ real(DOUBLE_KIND)
796 #undef mpp_pass_SG_to_UG_2D_
797 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r8_2d
798 #undef mpp_pass_SG_to_UG_3D_
799 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r8_3d
800 #undef mpp_pass_UG_to_SG_2D_
801 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r8_2d
802 #undef mpp_pass_UG_to_SG_3D_
803 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r8_3d
804 #include <mpp_unstruct_pass_data.h>
805 
806 #ifdef OVERLOAD_R4
807 #undef MPP_TYPE_
808 #define MPP_TYPE_ real(FLOAT_KIND)
809 #undef mpp_pass_SG_to_UG_2D_
810 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_r4_2d
811 #undef mpp_pass_SG_to_UG_3D_
812 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_r4_3d
813 #undef mpp_pass_UG_to_SG_2D_
814 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_r4_2d
815 #undef mpp_pass_UG_to_SG_3D_
816 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_r4_3d
817 #include <mpp_unstruct_pass_data.h>
818 #endif
819 
820 #undef MPP_TYPE_
821 #define MPP_TYPE_ integer(INT_KIND)
822 #undef mpp_pass_SG_to_UG_2D_
823 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_i4_2d
824 #undef mpp_pass_SG_to_UG_3D_
825 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_i4_3d
826 #undef mpp_pass_UG_to_SG_2D_
827 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_i4_2d
828 #undef mpp_pass_UG_to_SG_3D_
829 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_i4_3d
830 #include <mpp_unstruct_pass_data.h>
831 
832 #undef MPP_TYPE_
833 #define MPP_TYPE_ logical(INT_KIND)
834 #undef mpp_pass_SG_to_UG_2D_
835 #define mpp_pass_SG_to_UG_2D_ mpp_pass_SG_to_UG_l4_2d
836 #undef mpp_pass_SG_to_UG_3D_
837 #define mpp_pass_SG_to_UG_3D_ mpp_pass_SG_to_UG_l4_3d
838 #undef mpp_pass_UG_to_SG_2D_
839 #define mpp_pass_UG_to_SG_2D_ mpp_pass_UG_to_SG_l4_2d
840 #undef mpp_pass_UG_to_SG_3D_
841 #define mpp_pass_UG_to_SG_3D_ mpp_pass_UG_to_SG_l4_3d
842 #include <mpp_unstruct_pass_data.h>
843 
844 #undef MPP_GLOBAL_FIELD_UG_2D_
845 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r8_2d
846 #undef MPP_GLOBAL_FIELD_UG_3D_
847 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r8_3d
848 #undef MPP_GLOBAL_FIELD_UG_4D_
849 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r8_4d
850 #undef MPP_GLOBAL_FIELD_UG_5D_
851 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r8_5d
852 #undef MPP_TYPE_
853 #define MPP_TYPE_ real(DOUBLE_KIND)
854 #include <mpp_global_field_ug.h>
855 
856 #ifndef no_8byte_integers
857 #undef MPP_GLOBAL_FIELD_UG_2D_
858 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i8_2d
859 #undef MPP_GLOBAL_FIELD_UG_3D_
860 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i8_3d
861 #undef MPP_GLOBAL_FIELD_UG_4D_
862 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i8_4d
863 #undef MPP_GLOBAL_FIELD_UG_5D_
864 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i8_5d
865 #undef MPP_TYPE_
866 #define MPP_TYPE_ integer(LONG_KIND)
867 #include <mpp_global_field_ug.h>
868 #endif
869 
870 #ifdef OVERLOAD_R4
871 #undef MPP_GLOBAL_FIELD_UG_2D_
872 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_r4_2d
873 #undef MPP_GLOBAL_FIELD_UG_3D_
874 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_r4_3d
875 #undef MPP_GLOBAL_FIELD_UG_4D_
876 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_r4_4d
877 #undef MPP_GLOBAL_FIELD_UG_5D_
878 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_r4_5d
879 #undef MPP_TYPE_
880 #define MPP_TYPE_ real(FLOAT_KIND)
881 #include <mpp_global_field_ug.h>
882 #endif
883 
884 #undef MPP_GLOBAL_FIELD_UG_2D_
885 #define MPP_GLOBAL_FIELD_UG_2D_ mpp_global_field2D_ug_i4_2d
886 #undef MPP_GLOBAL_FIELD_UG_3D_
887 #define MPP_GLOBAL_FIELD_UG_3D_ mpp_global_field2D_ug_i4_3d
888 #undef MPP_GLOBAL_FIELD_UG_4D_
889 #define MPP_GLOBAL_FIELD_UG_4D_ mpp_global_field2D_ug_i4_4d
890 #undef MPP_GLOBAL_FIELD_UG_5D_
891 #define MPP_GLOBAL_FIELD_UG_5D_ mpp_global_field2D_ug_i4_5d
892 #undef MPP_TYPE_
893 #define MPP_TYPE_ integer(INT_KIND)
894 #include <mpp_global_field_ug.h>
895 
896 
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
l_size ! loop over number of fields ke do je do ie to PE
integer, save, private iec
Definition: oda_core.F90:124
integer, parameter, public no
integer, parameter recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
int npes
Definition: threadloc.c:26
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer(long), parameter true
real(r8), dimension(cast_m, cast_n) p
l_size ! loop over number of fields ke do j
subroutine upper(string, length)
integer, parameter send
character(len=32) name
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
integer, parameter m
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
integer error
Definition: mpp.F90:1310
real(fvprc) function, dimension(size(a, 1), size(a, 2)) reverse(A)
real(double), parameter one
integer, dimension(:), pointer io_layout
logical function received(this, seqno)
integer, save, private isc
Definition: oda_core.F90:124
type(field_def), target, save root
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, save, private jsc
Definition: oda_core.F90:124
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> dimension(MAX_DOMAIN_FIELDS)
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, save, private jec
Definition: oda_core.F90:124
l_size ! loop over number of fields ke do je do ie pos
integer, pointer ntiles
integer nlev
No description.
integer, parameter, public information
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin