FV3 Bundle
mpp_do_global_field.h
Go to the documentation of this file.
1  subroutine MPP_DO_GLOBAL_FIELD_3D_( domain, local, global, tile, ishift, jshift, flags, default_data)
2 !get a global field from a local field
3 !local field may be on compute OR data domain
4  type(domain2D), intent(in) :: domain
5  MPP_TYPE_, intent(in) :: local(:,:,:)
6  integer, intent(in) :: tile, ishift, jshift
7  MPP_TYPE_, intent(out) :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
8  integer, intent(in), optional :: flags
9  MPP_TYPE_, intent(in), optional :: default_data
10 
11  integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
12  integer :: ke, isc, iec, jsc, jec, is, ie, js, je, nword_me
13  integer :: ipos, jpos
14  logical :: xonly, yonly, root_only, global_on_this_pe
15  MPP_TYPE_ :: clocal ((domain%x(1)%compute%size+ishift) *(domain%y(1)%compute%size+jshift) *size(local,3))
16  MPP_TYPE_ :: cremote((domain%x(1)%compute%max_size+ishift)*(domain%y(1)%compute%max_size+jshift)*size(local,3))
17  integer :: stackuse
18  character(len=8) :: text
19 
20  pointer( ptr_local, clocal )
21  pointer( ptr_remote, cremote )
22 
23  stackuse = size(clocal(:))+size(cremote(:))
24  if( stackuse.GT.mpp_domains_stack_size )then
25  write( text, '(i8)' )stackuse
26  call mpp_error( FATAL, &
27  'MPP_DO_GLOBAL_FIELD user stack overflow: call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
28  end if
30 
31  ptr_local = LOC(mpp_domains_stack)
32  ptr_remote = LOC(mpp_domains_stack(size(clocal(:))+1))
33 
34  if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
35 
36  xonly = .FALSE.
37  yonly = .FALSE.
38  root_only = .FALSE.
39  if( PRESENT(flags) ) then
40  xonly = BTEST(flags,EAST)
41  yonly = BTEST(flags,SOUTH)
42  if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
43  'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
44  if(xonly .AND. yonly) then
45  xonly = .false.; yonly = .false.
46  endif
47  root_only = BTEST(flags, ROOT_GLOBAL)
48  if( (xonly .or. yonly) .AND. root_only ) then
49  call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
50  'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
51  root_only = .FALSE.
52  endif
53  endif
54 
55  global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
56  ipos = 0; jpos = 0
57  if(global_on_this_pe ) then
58  if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
59  'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
60  if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
61  if(xonly) then
62  if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
63  size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
64  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
65  jpos = -domain%y(tile)%compute%begin + 1
66  else if(yonly) then
67  if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
68  size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
69  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
70  ipos = -domain%x(tile)%compute%begin + 1
71  else
72  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
73  endif
74  endif
75  endif
76 
77  if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
78  !local is on compute domain
79  ioff = -domain%x(tile)%compute%begin + 1
80  joff = -domain%y(tile)%compute%begin + 1
81  else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
82  !local is on data domain
83  ioff = -domain%x(tile)%data%begin + 1
84  joff = -domain%y(tile)%data%begin + 1
85  else
86  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
87  end if
88 
89  ke = size(local,3)
90  isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
91  jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
92 
93  nword_me = (iec-isc+1)*(jec-jsc+1)*ke
94 
95 ! make contiguous array from compute domain
96  m = 0
97  if(global_on_this_pe) then
98  !z1l: initialize global = 0 to support mask domain
99  if(PRESENT(default_data)) then
100  global = default_data
101  else
102 #ifdef LOGICAL_VARIABLE
103  global = .false.
104 #else
105  global = 0
106 #endif
107  endif
108 
109  do k = 1, ke
110  do j = jsc, jec
111  do i = isc, iec
112  m = m + 1
113  clocal(m) = local(i+ioff,j+joff,k)
114  global(i+ipos,j+jpos,k) = clocal(m) !always fill local domain directly
115  end do
116  end do
117  end do
118  else
119  do k = 1, ke
120  do j = jsc, jec
121  do i = isc, iec
122  m = m + 1
123  clocal(m) = local(i+ioff,j+joff,k)
124  end do
125  end do
126  end do
127  endif
128 
129 ! if there is more than one tile on this pe, then no decomposition for all tiles on this pe, so we can just return
130  if(size(domain%x(:))>1) then
131  !--- the following is needed to avoid deadlock.
132  if( tile == size(domain%x(:)) ) call mpp_sync_self( )
133  return
134  end if
135 
136  root_pe = mpp_root_pe()
137 
138 !fill off-domains (note loops begin at an offset of 1)
139  if( xonly )then
140  nd = size(domain%x(1)%list(:))
141  do n = 1,nd-1
142  lpos = mod(domain%x(1)%pos+nd-n,nd)
143  rpos = mod(domain%x(1)%pos +n,nd)
144  from_pe = domain%x(1)%list(rpos)%pe
145  rpos = from_pe - root_pe ! for concurrent run, root_pe may not be 0.
146  if (from_pe == NULL_PE) then
147  nwords = 0
148  else
149  nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
150  * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
151  endif
152  ! Force use of scalar, integer ptr interface
153  call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%x(1)%list(lpos)%pe, &
154  get_data=cremote(1), glen=nwords, from_pe=from_pe )
155  m = 0
156  if (from_pe /= NULL_PE) then
157  is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
158  do k = 1, ke
159  do j = jsc, jec
160  do i = is, ie
161  m = m + 1
162  global(i,j+jpos,k) = cremote(m)
163  end do
164  end do
165  end do
166  endif
167  call mpp_sync_self() !-ensure MPI_ISEND is done.
168  end do
169  else if( yonly )then
170  nd = size(domain%y(1)%list(:))
171  do n = 1,nd-1
172  lpos = mod(domain%y(1)%pos+nd-n,nd)
173  rpos = mod(domain%y(1)%pos +n,nd)
174  from_pe = domain%y(1)%list(rpos)%pe
175  rpos = from_pe - root_pe
176  if (from_pe == NULL_PE) then
177  nwords = 0
178  else
179  nwords = (domain%list(rpos)%x(1)%compute%size+ishift) &
180  * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
181  endif
182  ! Force use of scalar, integer pointer interface
183  call mpp_transmit( put_data=clocal(1), plen=nword_me, to_pe=domain%y(1)%list(lpos)%pe, &
184  get_data=cremote(1), glen=nwords, from_pe=from_pe )
185  m = 0
186  if (from_pe /= NULL_PE) then
187  js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
188  do k = 1,ke
189  do j = js, je
190  do i = isc, iec
191  m = m + 1
192  global(i+ipos,j,k) = cremote(m)
193  end do
194  end do
195  end do
196  endif
197  call mpp_sync_self() !-ensure MPI_ISEND is done.
198  end do
199  else
200  tile_id = domain%tile_id(1)
201  nd = size(domain%list(:))
202  if(root_only) then
203  if(domain%pe .NE. domain%tile_root_pe) then
204  call mpp_send( clocal(1), plen=nword_me, to_pe=domain%tile_root_pe, tag=COMM_TAG_1 )
205  else
206  do n = 1,nd-1
207  rpos = mod(domain%pos+n,nd)
208  if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle
209  nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
210  call mpp_recv(cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_1 )
211  m = 0
212  is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
213  js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
214 
215  do k = 1,ke
216  do j = js, je
217  do i = is, ie
218  m = m + 1
219  global(i,j,k) = cremote(m)
220  end do
221  end do
222  end do
223  end do
224  endif
225  else
226  do n = 1,nd-1
227  lpos = mod(domain%pos+nd-n,nd)
228  if( domain%list(lpos)%tile_id(1).NE. tile_id ) cycle ! global field only within tile
229  call mpp_send( clocal(1), plen=nword_me, to_pe=domain%list(lpos)%pe, tag=COMM_TAG_2 )
230  end do
231  do n = 1,nd-1
232  rpos = mod(domain%pos+n,nd)
233  if( domain%list(rpos)%tile_id(1) .NE. tile_id ) cycle ! global field only within tile
234  nwords = (domain%list(rpos)%x(1)%compute%size+ishift) * (domain%list(rpos)%y(1)%compute%size+jshift) * ke
235  call mpp_recv( cremote(1), glen=nwords, from_pe=domain%list(rpos)%pe, tag=COMM_TAG_2 )
236  m = 0
237  is = domain%list(rpos)%x(1)%compute%begin; ie = domain%list(rpos)%x(1)%compute%end+ishift
238  js = domain%list(rpos)%y(1)%compute%begin; je = domain%list(rpos)%y(1)%compute%end+jshift
239 
240  do k = 1,ke
241  do j = js, je
242  do i = is, ie
243  m = m + 1
244  global(i,j,k) = cremote(m)
245  end do
246  end do
247  end do
248  end do
249  endif
250  end if
251 
252  call mpp_sync_self()
253 
254  return
255  end subroutine MPP_DO_GLOBAL_FIELD_3D_
256 
257 
258  subroutine MPP_DO_GLOBAL_FIELD_A2A_3D_( domain, local, global, tile, ishift, jshift, flags, default_data)
259 !get a global field from a local field
260 !local field may be on compute OR data domain
261  type(domain2D), intent(in) :: domain
262  integer, intent(in) :: tile, ishift, jshift
263  MPP_TYPE_, intent(in), contiguous, target :: local(:,:,:)
264  MPP_TYPE_, intent(out), contiguous, target :: global(domain%x(tile)%global%begin:,domain%y(tile)%global%begin:,:)
265  integer, intent(in), optional :: flags
266  MPP_TYPE_, intent(in), optional :: default_data
267 
268  integer :: i, j, k, m, n, nd, nwords, lpos, rpos, ioff, joff, from_pe, root_pe, tile_id
269  integer :: ke, isc, iec, jsc, jec, is, ie, js, je
270  integer :: ipos, jpos
271  logical :: xonly, yonly, root_only, global_on_this_pe
272 
273  ! Alltoallw vectors
274  MPP_TYPE_, dimension(:), pointer :: plocal, pglobal
275 
276  integer, dimension(:), allocatable :: sendcounts(:), recvcounts(:)
277  integer, dimension(:), allocatable :: sdispls(:), rdispls(:)
278  type(mpp_type), allocatable :: sendtypes(:), recvtypes(:)
279  integer, dimension(3) :: array_of_subsizes, array_of_starts
280  integer :: n_sends, n_ax, pe
281  integer :: isg, jsg
282  integer, allocatable :: pelist(:), axis_pelist(:), pelist_idx(:)
283 
284  if (.NOT.module_is_initialized) &
285  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: must first call mpp_domains_init.' )
286 
287  ! Validate flag consistency and configure the function
288  xonly = .FALSE.
289  yonly = .FALSE.
290  root_only = .FALSE.
291  if( PRESENT(flags) ) then
292  xonly = BTEST(flags,EAST)
293  yonly = BTEST(flags,SOUTH)
294  if( .NOT.xonly .AND. .NOT.yonly )call mpp_error( WARNING, &
295  'MPP_GLOBAL_FIELD: you must have flags=XUPDATE, YUPDATE or XUPDATE+YUPDATE' )
296  if(xonly .AND. yonly) then
297  xonly = .false.; yonly = .false.
298  endif
299  root_only = BTEST(flags, ROOT_GLOBAL)
300  if( (xonly .or. yonly) .AND. root_only ) then
301  call mpp_error( WARNING, 'MPP_GLOBAL_FIELD: flags = XUPDATE+GLOBAL_ROOT_ONLY or ' // &
302  'flags = YUPDATE+GLOBAL_ROOT_ONLY is not supported, will ignore GLOBAL_ROOT_ONLY' )
303  root_only = .FALSE.
304  endif
305  endif
306 
307  global_on_this_pe = .NOT. root_only .OR. domain%pe == domain%tile_root_pe
308 
309  ! Calculate offset for truncated global fields
310  ! NOTE: We do not check contiguity of global subarrays, and assume that
311  ! they have been copied to a contigous array.
312  ipos = 0; jpos = 0
313  if(global_on_this_pe ) then
314  if(size(local,3).NE.size(global,3) ) call mpp_error( FATAL, &
315  'MPP_GLOBAL_FIELD: mismatch of third dimension size of global and local')
316  if( size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. size(global,2).NE.(domain%y(tile)%global%size+jshift))then
317  if(xonly) then
318  if(size(global,1).NE.(domain%x(tile)%global%size+ishift) .OR. &
319  size(global,2).NE.(domain%y(tile)%compute%size+jshift)) &
320  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for xonly global field.' )
321  jpos = -domain%y(tile)%compute%begin + 1
322  else if(yonly) then
323  if(size(global,1).NE.(domain%x(tile)%compute%size+ishift) .OR. &
324  size(global,2).NE.(domain%y(tile)%global%size+jshift)) &
325  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain for yonly global field.' )
326  ipos = -domain%x(tile)%compute%begin + 1
327  else
328  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD: incoming arrays do not match domain.' )
329  endif
330  endif
331  endif
332 
333  ! NOTE: Since local is assumed to contiguously match the data domain, this
334  ! is not a useful check. But maybe someday we can support compute
335  ! domains.
336  if( size(local,1).EQ.(domain%x(tile)%compute%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%compute%size+jshift) )then
337  !local is on compute domain
338  ioff = -domain%x(tile)%compute%begin
339  joff = -domain%y(tile)%compute%begin
340  else if( size(local,1).EQ.(domain%x(tile)%memory%size+ishift) .AND. size(local,2).EQ.(domain%y(tile)%memory%size+jshift) )then
341  !local is on data domain
342  ioff = -domain%x(tile)%data%begin
343  joff = -domain%y(tile)%data%begin
344  else
345  call mpp_error( FATAL, 'MPP_GLOBAL_FIELD_: incoming field array must match either compute domain or memory domain.' )
346  end if
347 
348  ke = size(local,3)
349  isc = domain%x(tile)%compute%begin; iec = domain%x(tile)%compute%end+ishift
350  jsc = domain%y(tile)%compute%begin; jec = domain%y(tile)%compute%end+jshift
351  isg = domain%x(1)%global%begin; jsg = domain%y(1)%global%begin
352 
353  if(global_on_this_pe) then
354  !z1l: initialize global = 0 to support mask domain
355  if(PRESENT(default_data)) then
356  global = default_data
357  else
358 #ifdef LOGICAL_VARIABLE
359  global = .false.
360 #else
361  global = 0
362 #endif
363  endif
364  endif
365 
366  ! if there is more than one tile on this pe, then no decomposition for
367  ! all tiles on this pe, so we can just return
368  if(size(domain%x(:))>1) then
369  !--- the following is needed to avoid deadlock.
370  if( tile == size(domain%x(:)) ) call mpp_sync_self( )
371  return
372  end if
373 
374  root_pe = mpp_root_pe()
375 
376  ! Generate the pelist
377  ! TODO: Add these to the domain API
378  if (xonly) then
379  n_ax = size(domain%x(1)%list(:))
380  allocate(axis_pelist(n_ax))
381  axis_pelist = [ (domain%x(1)%list(i)%pe, i = 0, n_ax-1) ]
382 
383  nd = count(axis_pelist >= 0)
384  allocate(pelist(nd), pelist_idx(0:nd-1))
385  pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
386  pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
387 
388  deallocate(axis_pelist)
389  else if (yonly) then
390  n_ax = size(domain%y(1)%list(:))
391  allocate(axis_pelist(n_ax))
392  axis_pelist = [ (domain%y(1)%list(i)%pe, i = 0, n_ax-1) ]
393 
394  nd = count(axis_pelist >= 0)
395  allocate(pelist(nd), pelist_idx(0:nd-1))
396  pelist = pack(axis_pelist, mask=(axis_pelist >= 0))
397  pelist_idx = pack([(i, i=0, n_ax-1)], mask=(axis_pelist >= 0))
398 
399  deallocate(axis_pelist)
400  else
401  nd = size(domain%list(:))
402  allocate(pelist(nd), pelist_idx(0:nd-1))
403  call mpp_get_pelist(domain, pelist)
404  pelist_idx = [ (i, i=0, nd-1) ]
405  end if
406 
407  ! Allocate message data buffers
408  allocate(sendcounts(0:nd-1))
409  allocate(sdispls(0:nd-1))
410  allocate(sendtypes(0:nd-1))
411  sendcounts(:) = 0
412  sdispls(:) = 0
413  sendtypes(:) = mpp_byte
414 
415  allocate(recvcounts(0:nd-1))
416  allocate(rdispls(0:nd-1))
417  allocate(recvtypes(0:nd-1))
418  recvcounts(:) = 0
419  rdispls(:) = 0
420  recvtypes(:) = mpp_byte
421 
422  array_of_subsizes = [iec - isc + 1, jec - jsc + 1, size(local, 3)]
423  array_of_starts = [isc + ioff, jsc + joff, 0]
424 
425  n_sends = merge(1, nd, root_only) ! 1 if root_only else nd
426  do n = 0, n_sends - 1
427  sendcounts(n) = 1
428 
429  call mpp_type_create( &
430  local, &
431  array_of_subsizes, &
432  array_of_starts, &
433  sendtypes(n) &
434  )
435  end do
436 
437  ! Receive configuration
438  if (global_on_this_pe) then
439  do n = 0, nd - 1
440  recvcounts(n) = 1
441  pe = pelist_idx(n)
442 
443  if (xonly) then
444  is = domain%x(1)%list(pe)%compute%begin
445  ie = domain%x(1)%list(pe)%compute%end + ishift
446  js = jsc; je = jec
447  else if (yonly) then
448  is = isc; ie = iec
449  js = domain%y(1)%list(pe)%compute%begin
450  je = domain%y(1)%list(pe)%compute%end + jshift
451  else
452  is = domain%list(pe)%x(1)%compute%begin
453  ie = domain%list(pe)%x(1)%compute%end + ishift
454  js = domain%list(pe)%y(1)%compute%begin
455  je = domain%list(pe)%y(1)%compute%end + jshift
456  end if
457 
458  array_of_subsizes = [ie - is + 1, je - js + 1, ke]
459  array_of_starts = [is - isg + ipos, js - jsg + jpos, 0]
460 
461  call mpp_type_create( &
462  global, &
463  array_of_subsizes, &
464  array_of_starts, &
465  recvtypes(n) &
466  )
467  end do
468  end if
469 
470  plocal(1:size(local)) => local
471  pglobal(1:size(global)) => global
472 
473  call mpp_alltoall(plocal, sendcounts, sdispls, sendtypes, &
474  pglobal, recvcounts, rdispls, recvtypes, &
475  pelist)
476 
477  plocal => null()
478  pglobal => null()
479 
480  ! Cleanup
481  deallocate(pelist)
482  deallocate(sendcounts, sdispls, sendtypes)
483  deallocate(recvcounts, rdispls, recvtypes)
484 
485  call mpp_sync_self()
486 
487  end subroutine MPP_DO_GLOBAL_FIELD_A2A_3D_
integer mpp_domains_stack_hwm
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
type(ext_fieldtype), dimension(:), pointer, save, private field
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
integer, private je
Definition: fms_io.F90:494
integer, save, private iec
Definition: oda_core.F90:124
integer, parameter, public note
integer, parameter, public no
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
character(len=256) text
Definition: mpp_io.F90:1051
type(field_mgr_type), dimension(max_fields), private fields
integer(long), parameter false
from from_pe
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
l_size ! loop over number of fields ke do j
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
logical module_is_initialized
l_size ! loop over number of fields ke do je do ie to to_pe
integer, parameter m
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
subroutine, private initialize
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
integer, private isg
Definition: fms_io.F90:496
integer mpp_domains_stack_size
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len get_data(i)
subroutine MPP_DO_GLOBAL_FIELD_3D_(domain, local, global, tile, ishift, jshift, flags, default_data) !get a global field from a local field !local field may be on compute OR data domain type(domain2D)
logical, pointer fill
real(double), parameter one
integer, save, private isc
Definition: oda_core.F90:124
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, save, private jsc
Definition: oda_core.F90:124
integer, private jsg
Definition: fms_io.F90:496
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
#define max(a, b)
Definition: mosaic_util.h:33
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> dimension(MAX_DOMAIN_FIELDS)
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, save, private jec
Definition: oda_core.F90:124
l_size ! loop over number of fields ke do je do ie pos
type(mpp_type), target, public mpp_byte
Definition: mpp.F90:1315
l_size ! loop over number of fields ke do je do ie to js
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin