FV3 Bundle
mpp_do_updateV.h
Go to the documentation of this file.
1 ! -*-f90-*-
2 !***********************************************************************
3 !* GNU Lesser General Public License
4 !*
5 !* This file is part of the GFDL Flexible Modeling System (FMS).
6 !*
7 !* FMS is free software: you can redistribute it and/or modify it under
8 !* the terms of the GNU Lesser General Public License as published by
9 !* the Free Software Foundation, either version 3 of the License, or (at
10 !* your option) any later version.
11 !*
12 !* FMS is distributed in the hope that it will be useful, but WITHOUT
13 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 !* for more details.
16 !*
17 !* You should have received a copy of the GNU Lesser General Public
18 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
19 !***********************************************************************
20  subroutine MPP_DO_UPDATE_3D_V_(f_addrsx,f_addrsy, domain, update_x, update_y, &
21  d_type, ke, gridtype, flags)
22 !updates data domain of 3D field whose computational domains have been computed
23  integer(LONG_KIND), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
24  type(domain2d), intent(in) :: domain
25  type(overlapSpec), intent(in) :: update_x, update_y
26  integer, intent(in) :: ke
27  MPP_TYPE_, intent(in) :: d_type ! creates unique interface
28  integer, intent(in) :: gridtype
29  integer, intent(in), optional :: flags
30 
31  MPP_TYPE_ :: fieldx(update_x%xbegin:update_x%xend, update_x%ybegin:update_x%yend,ke)
32  MPP_TYPE_ :: fieldy(update_y%xbegin:update_y%xend, update_y%ybegin:update_y%yend,ke)
33  pointer(ptr_fieldx, fieldx)
34  pointer(ptr_fieldy, fieldy)
35 
37  integer :: l_size, l, i, j, k, is, ie, js, je, n, m
38  integer :: pos, nlist, msgsize, isd, ied, jsd, jed
39  integer :: to_pe, from_pe, midpoint
40  integer :: tMe, dir
41 
42  integer :: send_start_pos, nsend
43  integer :: send_msgsize(2*MAXLIST)
44  integer :: send_pe(2*MAXLIST)
45  integer, allocatable :: msg1(:), msg2(:), msg3(:)
46  logical :: send(8), recv(8), update_edge_only
47  MPP_TYPE_ :: buffer(size(mpp_domains_stack(:)))
48  pointer(ptr,buffer )
49  integer :: buffer_pos
50  character(len=8) :: text
51  integer :: buffer_recv_size, shift
52  integer :: rank_x, rank_y, ind_x, ind_y, cur_rank
53  integer :: nsend_x, nsend_y, nrecv_x, nrecv_y, outunit
54 
55  outunit = stdout()
56  update_flags = XUPDATE+YUPDATE !default
57  if( PRESENT(flags) ) then
58  update_flags = flags
59  ! The following test is so that SCALAR_PAIR can be used alone with the
60  ! same default update pattern as without.
61  if (BTEST(update_flags,SCALAR_BIT)) then
62  if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
63  .OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
64  update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
65  end if
66  end if
67 
68  if( BTEST(update_flags,NORTH) .AND. BTEST(domain%fold,NORTH) .AND. BTEST(gridtype,SOUTH) ) &
69  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: Incompatible grid offset and fold.' )
70 
71  update_edge_only = BTEST(update_flags, EDGEONLY)
72  recv = .false.
73  recv(1) = BTEST(update_flags,EAST)
74  recv(3) = BTEST(update_flags,SOUTH)
75  recv(5) = BTEST(update_flags,WEST)
76  recv(7) = BTEST(update_flags,NORTH)
77  if( update_edge_only ) then
78  if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
79  recv(1) = .true.
80  recv(3) = .true.
81  recv(5) = .true.
82  recv(7) = .true.
83  endif
84  else
85  recv(2) = recv(1) .AND. recv(3)
86  recv(4) = recv(3) .AND. recv(5)
87  recv(6) = recv(5) .AND. recv(7)
88  recv(8) = recv(7) .AND. recv(1)
89  endif
90 
91  send = recv
92 
93  l_size = size(f_addrsx,1)
94  nlist = size(domain%list(:))
95  ptr = LOC(mpp_domains_stack)
96 
97 !recv
98  nsend_x = update_x%nsend
99  nsend_y = update_y%nsend
100  nrecv_x = update_x%nrecv
101  nrecv_y = update_y%nrecv
102 
104  allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
105  msg1 = 0
106  msg2 = 0
107  msg3 = 0
108  cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
109 
110  do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
111  msgsize = 0
112  if(cur_rank == rank_x) then
113  from_pe = update_x%recv(ind_x)%pe
114  do n = 1, update_x%recv(ind_x)%count
115  dir = update_x%recv(ind_x)%dir(n)
116  if(recv(dir)) then
117  is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
118  js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
119  msgsize = msgsize + (ie-is+1)*(je-js+1)
120  end if
121  end do
122  ind_x = ind_x+1
123  if(ind_x .LE. nrecv_x) then
124  rank_x = update_x%recv(ind_x)%pe - domain%pe
125  if(rank_x .LE.0) rank_x = rank_x + nlist
126  else
127  rank_x = -1
128  endif
129  endif
130  if(cur_rank == rank_y) then
131  from_pe = update_y%recv(ind_y)%pe
132  do n = 1, update_y%recv(ind_y)%count
133  dir = update_y%recv(ind_y)%dir(n)
134  if(recv(dir)) then
135  is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
136  js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
137  msgsize = msgsize + (ie-is+1)*(je-js+1)
138  end if
139  end do
140  ind_y = ind_y+1
141  if(ind_y .LE. nrecv_y) then
142  rank_y = update_y%recv(ind_y)%pe - domain%pe
143  if(rank_y .LE.0) rank_y = rank_y + nlist
144  else
145  rank_y = -1
146  endif
147  endif
148  cur_rank = max(rank_x, rank_y)
149  m = from_pe-mpp_root_pe()
150  msg2(m) = msgsize
151  end do
152 
153  cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
154  do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
155  msgsize = 0
156  if(cur_rank == rank_x) then
157  to_pe = update_x%send(ind_x)%pe
158  do n = 1, update_x%send(ind_x)%count
159  dir = update_x%send(ind_x)%dir(n)
160  if( send(dir) ) then
161  is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
162  js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
163  msgsize = msgsize + (ie-is+1)*(je-js+1)
164  end if
165  end do
166  ind_x = ind_x+1
167  if(ind_x .LE. nsend_x) then
168  rank_x = update_x%send(ind_x)%pe - domain%pe
169  if(rank_x .LT.0) rank_x = rank_x + nlist
170  else
171  rank_x = nlist+1
172  endif
173  endif
174  if(cur_rank == rank_y) then
175  to_pe = update_y%send(ind_y)%pe
176  do n = 1, update_y%send(ind_y)%count
177  dir = update_y%send(ind_y)%dir(n)
178  if( send(dir) ) then
179  is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
180  js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
181  msgsize = msgsize + (ie-is+1)*(je-js+1)
182  end if
183  end do
184  ind_y = ind_y+1
185  if(ind_y .LE. nsend_y) then
186  rank_y = update_y%send(ind_y)%pe - domain%pe
187  if(rank_y .LT.0) rank_y = rank_y + nlist
188  else
189  rank_y = nlist+1
190  endif
191  endif
192  m = to_pe-mpp_root_pe()
193  msg3(m) = msgsize
194  cur_rank = min(rank_x, rank_y)
195  enddo
196  call mpp_alltoall(msg3, 1, msg1, 1)
197 ! call mpp_sync_self(check=EVENT_RECV)
198  do m = 0, nlist-1
199  if(msg1(m) .NE. msg2(m)) then
200  print*, "My pe = ", mpp_pe(), ",domain name =", trim(domain%name), ",from pe=", &
201  domain%list(m)%pe, ":send size = ", msg1(m), ", recv size = ", msg2(m)
202  call mpp_error(FATAL, "mpp_do_updateV: mismatch on send and recv size")
203  endif
204  enddo
205 
206 ! call mpp_sync_self()
207  write(outunit,*)"NOTE from mpp_do_updateV: message sizes are matched between send and recv for domain " &
208  //trim(domain%name)
209  deallocate(msg1, msg2, msg3)
210  endif
211 
212  !--- recv
213  buffer_pos = 0
214  cur_rank = get_rank_recv(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
215  call mpp_clock_begin(recv_clock)
216  do while (ind_x .LE. nrecv_x .OR. ind_y .LE. nrecv_y)
217  msgsize = 0
218  select case(gridtype)
219  case(BGRID_NE, BGRID_SW, AGRID)
220  if(cur_rank == rank_x) then
221  from_pe = update_x%recv(ind_x)%pe
222  do n = 1, update_x%recv(ind_x)%count
223  dir = update_x%recv(ind_x)%dir(n)
224  if(recv(dir)) then
225  tMe = update_x%recv(ind_x)%tileMe(n)
226  is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
227  js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
228  msgsize = msgsize + (ie-is+1)*(je-js+1)
229  end if
230  end do
231  msgsize = msgsize*2
232  ind_x = ind_x+1
233  ind_y = ind_x
234  if(ind_x .LE. nrecv_x) then
235  rank_x = update_x%recv(ind_x)%pe - domain%pe
236  if(rank_x .LE.0) rank_x = rank_x + nlist
237  else
238  rank_x = -1
239  endif
240  rank_y = rank_x
241  endif
242  case(CGRID_NE, CGRID_SW)
243  if(cur_rank == rank_x) then
244  from_pe = update_x%recv(ind_x)%pe
245  do n = 1, update_x%recv(ind_x)%count
246  dir = update_x%recv(ind_x)%dir(n)
247  if(recv(dir)) then
248  is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
249  js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
250  msgsize = msgsize + (ie-is+1)*(je-js+1)
251  end if
252  end do
253  ind_x = ind_x+1
254  if(ind_x .LE. nrecv_x) then
255  rank_x = update_x%recv(ind_x)%pe - domain%pe
256  if(rank_x .LE.0) rank_x = rank_x + nlist
257  else
258  rank_x = -1
259  endif
260  endif
261  if(cur_rank == rank_y) then
262  from_pe = update_y%recv(ind_y)%pe
263  do n = 1, update_y%recv(ind_y)%count
264  dir = update_y%recv(ind_y)%dir(n)
265  if(recv(dir)) then
266  is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
267  js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
268  msgsize = msgsize + (ie-is+1)*(je-js+1)
269  end if
270  end do
271  ind_y = ind_y+1
272  if(ind_y .LE. nrecv_y) then
273  rank_y = update_y%recv(ind_y)%pe - domain%pe
274  if(rank_y .LE.0) rank_y = rank_y + nlist
275  else
276  rank_y = -1
277  endif
278  endif
279  end select
280  cur_rank = max(rank_x, rank_y)
281  msgsize = msgsize*ke*l_size
282 
283  if( msgsize.GT.0 )then
286  write( text,'(i8)' )mpp_domains_stack_hwm
287  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, '// &
288  'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
289  end if
290  call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., tag=COMM_TAG_2 )
291  buffer_pos = buffer_pos + msgsize
292  end if
293  end do
294  call mpp_clock_end(recv_clock)
295  buffer_recv_size = buffer_pos
296  send_start_pos = buffer_pos
297 
298  !--- send
299  cur_rank = get_rank_send(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
300  nsend = 0
301  call mpp_clock_begin(pack_clock)
302  do while (ind_x .LE. nsend_x .OR. ind_y .LE. nsend_y)
303  pos = buffer_pos
304  !--- make sure the domain stack size is big enough
305  msgsize = 0
306  if(cur_rank == rank_x) then
307  do n = 1, update_x%send(ind_x)%count
308  dir = update_x%send(ind_x)%dir(n)
309  if( send(dir) ) msgsize = msgsize + update_x%send(ind_x)%msgsize(n)
310  enddo
311  endif
312  if(cur_rank == rank_y) then
313  do n = 1, update_y%send(ind_y)%count
314  dir = update_y%send(ind_y)%dir(n)
315  if( send(dir) ) msgsize = msgsize + update_y%send(ind_y)%msgsize(n)
316  enddo
317  endif
318 
319  if( msgsize.GT.0 )then
320  msgsize = msgsize*ke*l_size
323  write( text,'(i8)' )mpp_domains_stack_hwm
324  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: mpp_domains_stack overflow, ' // &
325  'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
326  end if
327  end if
328  select case( gridtype )
329  case(BGRID_NE, BGRID_SW, AGRID)
330  if(cur_rank == rank_x) then
331  to_pe = update_x%send(ind_x)%pe
332  do n = 1, update_x%send(ind_x)%count
333  dir = update_x%send(ind_x)%dir(n)
334  if( send(dir) ) then
335  tMe = update_x%send(ind_x)%tileMe(n)
336 
337  is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
338  js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
339  select case( update_x%send(ind_x)%rotation(n) )
340  case(ZERO)
341  do l=1,l_size ! loop over number of fields
342  ptr_fieldx = f_addrsx(l,tMe)
343  ptr_fieldy = f_addrsy(l,tMe)
344  do k = 1,ke
345  do j = js, je
346  do i = is, ie
347  pos = pos + 2
348  buffer(pos-1) = fieldx(i,j,k)
349  buffer(pos) = fieldy(i,j,k)
350  end do
351  end do
352  end do
353  end do
354  case( MINUS_NINETY )
355  if( BTEST(update_flags,SCALAR_BIT) ) then
356  do l=1,l_size ! loop over number of fields
357  ptr_fieldx = f_addrsx(l,tMe)
358  ptr_fieldy = f_addrsy(l,tMe)
359  do k = 1,ke
360  do i = is, ie
361  do j = je, js, -1
362  pos = pos + 2
363  buffer(pos-1) = fieldy(i,j,k)
364  buffer(pos) = fieldx(i,j,k)
365  end do
366  end do
367  end do
368  end do
369  else
370  do l=1,l_size ! loop over number of fields
371  ptr_fieldx = f_addrsx(l,tMe)
372  ptr_fieldy = f_addrsy(l,tMe)
373  do k = 1,ke
374  do i = is, ie
375  do j = je, js, -1
376  pos = pos + 2
377  buffer(pos-1) = -fieldy(i,j,k)
378  buffer(pos) = fieldx(i,j,k)
379  end do
380  end do
381  end do
382  end do
383  end if
384  case( NINETY )
385  if( BTEST(update_flags,SCALAR_BIT) ) then
386  do l=1,l_size ! loop over number of fields
387  ptr_fieldx = f_addrsx(l,tMe)
388  ptr_fieldy = f_addrsy(l,tMe)
389  do k = 1,ke
390  do i = ie, is, -1
391  do j = js, je
392  pos = pos + 2
393  buffer(pos-1) = fieldy(i,j,k)
394  buffer(pos) = fieldx(i,j,k)
395  end do
396  end do
397  end do
398  end do
399  else
400  do l=1,l_size ! loop over number of fields
401  ptr_fieldx = f_addrsx(l,tMe)
402  ptr_fieldy = f_addrsy(l,tMe)
403  do k = 1,ke
404  do i = ie, is, -1
405  do j = js, je
406  pos = pos + 2
407  buffer(pos-1) = fieldy(i,j,k)
408  buffer(pos) = -fieldx(i,j,k)
409  end do
410  end do
411  end do
412  end do
413  end if
414  case( ONE_HUNDRED_EIGHTY )
415  if( BTEST(update_flags,SCALAR_BIT) ) then
416  do l=1,l_size ! loop over number of fields
417  ptr_fieldx = f_addrsx(l,tMe)
418  ptr_fieldy = f_addrsy(l,tMe)
419  do k = 1,ke
420  do j = je, js, -1
421  do i = ie, is, -1
422  pos = pos + 2
423  buffer(pos-1) = fieldx(i,j,k)
424  buffer(pos) = fieldy(i,j,k)
425  end do
426  end do
427  end do
428  end do
429  else
430  do l=1,l_size ! loop over number of fields
431  ptr_fieldx = f_addrsx(l,tMe)
432  ptr_fieldy = f_addrsy(l,tMe)
433  do k = 1,ke
434  do j = je, js, -1
435  do i = ie, is, -1
436  pos = pos + 2
437  buffer(pos-1) = -fieldx(i,j,k)
438  buffer(pos) = -fieldy(i,j,k)
439  end do
440  end do
441  end do
442  end do
443  end if
444  end select ! select case( rotation(n) )
445  end if ! if( send(dir) )
446  end do ! do n = 1, update_x%send(ind_x)%count
447  ind_x = ind_x+1
448  ind_y = ind_x
449  if(ind_x .LE. nsend_x) then
450  rank_x = update_x%send(ind_x)%pe - domain%pe
451  if(rank_x .LT.0) rank_x = rank_x + nlist
452  else
453  rank_x = nlist+1
454  endif
455  rank_y = rank_x
456  endif
457  case(CGRID_NE, CGRID_SW)
458  if(cur_rank == rank_x) then
459  to_pe = update_x%send(ind_x)%pe
460  do n = 1, update_x%send(ind_x)%count
461  dir = update_x%send(ind_x)%dir(n)
462  if( send(dir) ) then
463  tMe = update_x%send(ind_x)%tileMe(n)
464  is = update_x%send(ind_x)%is(n); ie = update_x%send(ind_x)%ie(n)
465  js = update_x%send(ind_x)%js(n); je = update_x%send(ind_x)%je(n)
466  select case( update_x%send(ind_x)%rotation(n) )
467  case(ZERO)
468  do l=1,l_size ! loop over number of fields
469  ptr_fieldx = f_addrsx(l, tMe)
470  ptr_fieldy = f_addrsy(l, tMe)
471  do k = 1,ke
472  do j = js, je
473  do i = is, ie
474  pos = pos + 1
475  buffer(pos) = fieldx(i,j,k)
476  end do
477  end do
478  end do
479  end do
480  case(MINUS_NINETY)
481  if( BTEST(update_flags,SCALAR_BIT) ) then
482  do l=1,l_size ! loop over number of fields
483  ptr_fieldx = f_addrsx(l, tMe)
484  ptr_fieldy = f_addrsy(l, tMe)
485  do k = 1,ke
486  do i = is, ie
487  do j = je, js, -1
488  pos = pos + 1
489  buffer(pos) = fieldy(i,j,k)
490  end do
491  end do
492  end do
493  end do
494  else
495  do l=1,l_size ! loop over number of fields
496  ptr_fieldx = f_addrsx(l, tMe)
497  ptr_fieldy = f_addrsy(l, tMe)
498  do k = 1,ke
499  do i = is, ie
500  do j = je, js, -1
501  pos = pos + 1
502  buffer(pos) = -fieldy(i,j,k)
503  end do
504  end do
505  end do
506  end do
507  end if
508  case(NINETY)
509  do l=1,l_size ! loop over number of fields
510  ptr_fieldx = f_addrsx(l, tMe)
511  ptr_fieldy = f_addrsy(l, tMe)
512  do k = 1, ke
513  do i = ie, is, -1
514  do j = js, je
515  pos = pos + 1
516  buffer(pos) = fieldy(i,j,k)
517  end do
518  end do
519  end do
520  end do
521  case(ONE_HUNDRED_EIGHTY)
522  if( BTEST(update_flags,SCALAR_BIT) ) then
523  do l=1,l_size ! loop over number of fields
524  ptr_fieldx = f_addrsx(l, tMe)
525  ptr_fieldy = f_addrsy(l, tMe)
526  do k = 1,ke
527  do j = je, js, -1
528  do i = ie, is, -1
529  pos = pos + 1
530  buffer(pos) = fieldx(i,j,k)
531  end do
532  end do
533  end do
534  end do
535  else
536  do l=1,l_size ! loop over number of fields
537  ptr_fieldx = f_addrsx(l, tMe)
538  ptr_fieldy = f_addrsy(l, tMe)
539  do k = 1,ke
540  do j = je, js, -1
541  do i = ie, is, -1
542  pos = pos + 1
543  buffer(pos) = -fieldx(i,j,k)
544  end do
545  end do
546  end do
547  end do
548  end if
549  end select
550  end if
551  end do
552  ind_x = ind_x+1
553  if(ind_x .LE. nsend_x) then
554  rank_x = update_x%send(ind_x)%pe - domain%pe
555  if(rank_x .LT.0) rank_x = rank_x + nlist
556  else
557  rank_x = nlist+1
558  endif
559  endif
560  if(cur_rank == rank_y) then
561  to_pe = update_y%send(ind_y)%pe
562  do n = 1, update_y%send(ind_y)%count
563  dir = update_y%send(ind_y)%dir(n)
564  if( send(dir) ) then
565  tMe = update_y%send(ind_y)%tileMe(n)
566  is = update_y%send(ind_y)%is(n); ie = update_y%send(ind_y)%ie(n)
567  js = update_y%send(ind_y)%js(n); je = update_y%send(ind_y)%je(n)
568  select case( update_y%send(ind_y)%rotation(n) )
569  case(ZERO)
570  do l=1,l_size ! loop over number of fields
571  ptr_fieldx = f_addrsx(l, tMe)
572  ptr_fieldy = f_addrsy(l, tMe)
573  do k = 1,ke
574  do j = js, je
575  do i = is, ie
576  pos = pos + 1
577  buffer(pos) = fieldy(i,j,k)
578  end do
579  end do
580  end do
581  end do
582  case(MINUS_NINETY)
583  do l=1,l_size ! loop over number of fields
584  ptr_fieldx = f_addrsx(l, tMe)
585  ptr_fieldy = f_addrsy(l, tMe)
586  do k = 1,ke
587  do i = is, ie
588  do j = je, js, -1
589  pos = pos + 1
590  buffer(pos) = fieldx(i,j,k)
591  end do
592  end do
593  end do
594  end do
595  case(NINETY)
596  if( BTEST(update_flags,SCALAR_BIT) ) then
597  do l=1,l_size ! loop over number of fields
598  ptr_fieldx = f_addrsx(l, tMe)
599  ptr_fieldy = f_addrsy(l, tMe)
600  do k = 1,ke
601  do i = ie, is, -1
602  do j = js, je
603  pos = pos + 1
604  buffer(pos) = fieldx(i,j,k)
605  end do
606  end do
607  end do
608  end do
609  else
610  do l=1,l_size ! loop over number of fields
611  ptr_fieldx = f_addrsx(l, tMe)
612  ptr_fieldy = f_addrsy(l, tMe)
613  do k = 1,ke
614  do i = ie, is, -1
615  do j = js, je
616  pos = pos + 1
617  buffer(pos) = -fieldx(i,j,k)
618  end do
619  end do
620  end do
621  end do
622  end if
623  case(ONE_HUNDRED_EIGHTY)
624  if( BTEST(update_flags,SCALAR_BIT) ) then
625  do l=1,l_size ! loop over number of fields
626  ptr_fieldx = f_addrsx(l, tMe)
627  ptr_fieldy = f_addrsy(l, tMe)
628  do k = 1,ke
629  do j = je, js, -1
630  do i = ie, is, -1
631  pos = pos + 1
632  buffer(pos) = fieldy(i,j,k)
633  end do
634  end do
635  end do
636  end do
637  else
638  do l=1,l_size ! loop over number of fields
639  ptr_fieldx = f_addrsx(l, tMe)
640  ptr_fieldy = f_addrsy(l, tMe)
641  do k = 1,ke
642  do j = je, js, -1
643  do i = ie, is, -1
644  pos = pos + 1
645  buffer(pos) = -fieldy(i,j,k)
646  end do
647  end do
648  end do
649  end do
650  end if
651  end select
652  endif
653  enddo
654  ind_y = ind_y+1
655  if(ind_y .LE. nsend_y) then
656  rank_y = update_y%send(ind_y)%pe - domain%pe
657  if(rank_y .LT.0) rank_y = rank_y + nlist
658  else
659  rank_y = nlist+1
660  endif
661  endif
662  end select
663  cur_rank = min(rank_x, rank_y)
664  nsend = nsend + 1
665  send_pe(nsend) = to_pe
666  send_msgsize(nsend) = pos - buffer_pos
667  buffer_pos = pos
668  end do
669 
670  buffer_pos = send_start_pos
671  call mpp_clock_end(pack_clock)
672  call mpp_clock_begin(send_clock)
673  do m = 1, nsend
674  msgsize = send_msgsize(m)
675  if( msgsize.GT.0 )then
676  call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=send_pe(m), tag=COMM_TAG_2 )
677  buffer_pos = buffer_pos + msgsize
678  end if
679  end do
680  call mpp_clock_end(send_clock)
681 
682 !unpack recv
683 !unpack halos in reverse order
684  call mpp_clock_begin(wait_clock)
685  call mpp_sync_self(check=EVENT_RECV)
686  call mpp_clock_end(wait_clock)
687  buffer_pos = buffer_recv_size
688  cur_rank = get_rank_unpack(domain, update_x, update_y, rank_x, rank_y, ind_x, ind_y)
689 
690  call mpp_clock_begin(unpk_clock)
691  do while (ind_x > 0 .OR. ind_y > 0)
692  pos = buffer_pos
693  select case ( gridtype )
694  case(BGRID_NE, BGRID_SW, AGRID)
695  if(cur_rank == rank_x) then
696  do n = update_x%recv(ind_x)%count, 1, -1
697  dir = update_x%recv(ind_x)%dir(n)
698  if( recv(dir) ) then
699  tMe = update_x%recv(ind_x)%tileMe(n)
700  is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
701  js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
702  msgsize = (ie-is+1)*(je-js+1)*ke*2*l_size
703  pos = buffer_pos - msgsize
704  buffer_pos = pos
705  do l=1, l_size ! loop over number of fields
706  ptr_fieldx = f_addrsx(l, tMe)
707  ptr_fieldy = f_addrsy(l, tMe)
708  do k = 1,ke
709  do j = js, je
710  do i = is, ie
711  pos = pos + 2
712  fieldx(i,j,k) = buffer(pos-1)
713  fieldy(i,j,k) = buffer(pos)
714  end do
715  end do
716  end do
717  end do
718  end if ! end if( recv(dir) )
719  end do ! do dir=8,1,-1
720  ind_x = ind_x-1
721  ind_y = ind_x
722  if(ind_x .GT. 0) then
723  rank_x = update_x%recv(ind_x)%pe - domain%pe
724  if(rank_x .LE.0) rank_x = rank_x + nlist
725  else
726  rank_x = nlist+1
727  endif
728  rank_y = rank_x
729  endif
730  case(CGRID_NE, CGRID_SW)
731  if(cur_rank == rank_y) then
732  do n = update_y%recv(ind_y)%count, 1, -1
733  dir = update_y%recv(ind_y)%dir(n)
734  if( recv(dir) ) then
735  tMe = update_y%recv(ind_y)%tileMe(n)
736  is = update_y%recv(ind_y)%is(n); ie = update_y%recv(ind_y)%ie(n)
737  js = update_y%recv(ind_y)%js(n); je = update_y%recv(ind_y)%je(n)
738  msgsize = (ie-is+1)*(je-js+1)*ke*l_size
739  pos = buffer_pos - msgsize
740  buffer_pos = pos
741  do l=1,l_size ! loop over number of fields
742  ptr_fieldx = f_addrsx(l, tMe)
743  ptr_fieldy = f_addrsy(l, tMe)
744  do k = 1,ke
745  do j = js, je
746  do i = is, ie
747  pos = pos + 1
748  fieldy(i,j,k) = buffer(pos)
749  end do
750  end do
751  end do
752  end do
753  end if
754  end do
755  ind_y = ind_y-1
756  if(ind_y .GT. 0) then
757  rank_y = update_y%recv(ind_y)%pe - domain%pe
758  if(rank_y .LE.0) rank_y = rank_y + nlist
759  else
760  rank_y = nlist+1
761  endif
762  endif
763  if(cur_rank == rank_x) then
764  do n = update_x%recv(ind_x)%count, 1, -1
765  dir = update_x%recv(ind_x)%dir(n)
766  if( recv(dir) ) then
767  tMe = update_x%recv(ind_x)%tileMe(n)
768  is = update_x%recv(ind_x)%is(n); ie = update_x%recv(ind_x)%ie(n)
769  js = update_x%recv(ind_x)%js(n); je = update_x%recv(ind_x)%je(n)
770  msgsize = (ie-is+1)*(je-js+1)*ke*l_size
771  pos = buffer_pos - msgsize
772  buffer_pos = pos
773  do l=1,l_size ! loop over number of fields
774  ptr_fieldx = f_addrsx(l, tMe)
775  ptr_fieldy = f_addrsy(l, tMe)
776  do k = 1,ke
777  do j = js, je
778  do i = is, ie
779  pos = pos + 1
780  fieldx(i,j,k) = buffer(pos)
781  end do
782  end do
783  end do
784  end do
785  end if
786  end do
787  ind_x = ind_x-1
788  if(ind_x .GT. 0) then
789  rank_x = update_x%recv(ind_x)%pe - domain%pe
790  if(rank_x .LE.0) rank_x = rank_x + nlist
791  else
792  rank_x = nlist+1
793  endif
794  endif
795  end select
796  cur_rank = min(rank_x, rank_y)
797  end do
798  call mpp_clock_end(unpk_clock)
799 
800  ! ---northern boundary fold
801  shift = 0
802  if(domain%symmetry) shift = 1
803  if( BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then
804  isd = domain%x(1)%compute%begin - update_x%whalo;
805  ied = domain%x(1)%compute%end + update_x%ehalo;
806  jsd = domain%y(1)%compute%begin - update_y%shalo;
807  jed = domain%y(1)%compute%end + update_y%nhalo;
808 
809  j = domain%y(1)%global%end+shift
810  if( jsd .LE. j .AND. j.LE.jed+shift )then !fold is within domain
811  !poles set to 0: BGRID only
812  if( gridtype.EQ.BGRID_NE )then
813  midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
814  j = domain%y(1)%global%end+shift
815  is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
816  if( .NOT. domain%symmetry ) is = is - 1
817  do i = is ,ie, midpoint
818  if( isd.LE.i .AND. i.LE. ied+shift )then
819  do l=1,l_size
820  ptr_fieldx = f_addrsx(l, 1)
821  ptr_fieldy = f_addrsy(l, 1)
822  do k = 1,ke
823  fieldx(i,j,k) = 0.
824  fieldy(i,j,k) = 0.
825  end do
826  end do
827  end if
828  end do
829  endif
830 
831  ! the following code code block correct an error where the data in your halo coming from
832  ! other half may have the wrong sign
833  !off west edge, when update north or west direction
834  j = domain%y(1)%global%end+shift
835  if ( recv(7) .OR. recv(5) ) then
836  select case(gridtype)
837  case(BGRID_NE)
838  if(domain%symmetry) then
839  is = domain%x(1)%global%begin
840  else
841  is = domain%x(1)%global%begin - 1
842  end if
843  if( is.GT.isd )then
844  if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
845  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
846  do l=1,l_size
847  ptr_fieldx = f_addrsx(l, 1)
848  ptr_fieldy = f_addrsy(l, 1)
849  do k = 1,ke
850  do i = isd,is-1
851  fieldx(i,j,k) = fieldx(2*is-i,j,k)
852  fieldy(i,j,k) = fieldy(2*is-i,j,k)
853  end do
854  end do
855  end do
856  end if
857  case(CGRID_NE)
858  is = domain%x(1)%global%begin
859  if( is.GT.isd )then
860  if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
861  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
862  do l=1,l_size
863  ptr_fieldy = f_addrsy(l, 1)
864  do k = 1,ke
865  do i = isd,is-1
866  fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
867  end do
868  end do
869  end do
870  end if
871  end select
872  end if
873 
874  !off east edge
875  is = domain%x(1)%global%end
876  if(domain%x(1)%cyclic .AND. is.LT.ied )then
877  ie = ied
878  is = is + 1
879  select case(gridtype)
880  case(BGRID_NE)
881  is = is + shift
882  ie = ie + shift
883  do l=1,l_size
884  ptr_fieldx = f_addrsx(l, 1)
885  ptr_fieldy = f_addrsy(l, 1)
886  do k = 1,ke
887  do i = is,ie
888  fieldx(i,j,k) = -fieldx(i,j,k)
889  fieldy(i,j,k) = -fieldy(i,j,k)
890  end do
891  end do
892  end do
893  case(CGRID_NE)
894  do l=1,l_size
895  ptr_fieldy = f_addrsy(l, 1)
896  do k = 1,ke
897  do i = is, ie
898  fieldy(i,j,k) = -fieldy(i,j,k)
899  end do
900  end do
901  end do
902  end select
903  end if
904  end if
905  else if( BTEST(domain%fold,SOUTH) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---southern boundary fold
906  ! NOTE: symmetry is assumed for fold-south boundary
907  j = domain%y(1)%global%begin
908  if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
909  midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
910  !poles set to 0: BGRID only
911  if( gridtype.EQ.BGRID_NE )then
912  j = domain%y(1)%global%begin
913  is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
914  do i = is ,ie, midpoint
915  if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
916  do l=1,l_size
917  ptr_fieldx = f_addrsx(l, 1)
918  ptr_fieldy = f_addrsy(l, 1)
919  do k = 1,ke
920  fieldx(i,j,k) = 0.
921  fieldy(i,j,k) = 0.
922  end do
923  end do
924  end if
925  end do
926  endif
927 
928  ! the following code code block correct an error where the data in your halo coming from
929  ! other half may have the wrong sign
930  !off west edge, when update north or west direction
931  j = domain%y(1)%global%begin
932  if ( recv(3) .OR. recv(5) ) then
933  select case(gridtype)
934  case(BGRID_NE)
935  is = domain%x(1)%global%begin
936  if( is.GT.domain%x(1)%data%begin )then
937 
938  if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
939  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south BGRID_NE west edge ubound error.' )
940  do l=1,l_size
941  ptr_fieldx = f_addrsx(l, 1)
942  ptr_fieldy = f_addrsy(l, 1)
943  do k = 1,ke
944  do i = domain%x(1)%data%begin,is-1
945  fieldx(i,j,k) = fieldx(2*is-i,j,k)
946  fieldy(i,j,k) = fieldy(2*is-i,j,k)
947  end do
948  end do
949  end do
950  end if
951  case(CGRID_NE)
952  is = domain%x(1)%global%begin
953  if( is.GT.domain%x(1)%data%begin )then
954  if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
955  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-south CGRID_NE west edge ubound error.' )
956  do l=1,l_size
957  ptr_fieldy = f_addrsy(l, 1)
958  do k = 1,ke
959  do i = domain%x(1)%data%begin,is-1
960  fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
961  end do
962  end do
963  end do
964  end if
965  end select
966  end if
967 
968  !off east edge
969  is = domain%x(1)%global%end
970  if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
971  ie = domain%x(1)%data%end
972  is = is + 1
973  select case(gridtype)
974  case(BGRID_NE)
975  is = is + shift
976  ie = ie + shift
977  do l=1,l_size
978  ptr_fieldx = f_addrsx(l, 1)
979  ptr_fieldy = f_addrsy(l, 1)
980  do k = 1,ke
981  do i = is,ie
982  fieldx(i,j,k) = -fieldx(i,j,k)
983  fieldy(i,j,k) = -fieldy(i,j,k)
984  end do
985  end do
986  end do
987  case(CGRID_NE)
988  do l=1,l_size
989  ptr_fieldy = f_addrsy(l, 1)
990  do k = 1,ke
991  do i = is, ie
992  fieldy(i,j,k) = -fieldy(i,j,k)
993  end do
994  end do
995  end do
996  end select
997  end if
998  end if
999  else if( BTEST(domain%fold,WEST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
1000  ! NOTE: symmetry is assumed for fold-west boundary
1001  i = domain%x(1)%global%begin
1002  if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
1003  midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
1004  !poles set to 0: BGRID only
1005  if( gridtype.EQ.BGRID_NE )then
1006  i = domain%x(1)%global%begin
1007  js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
1008  do j = js ,je, midpoint
1009  if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
1010  do l=1,l_size
1011  ptr_fieldx = f_addrsx(l, 1)
1012  ptr_fieldy = f_addrsy(l, 1)
1013  do k = 1,ke
1014  fieldx(i,j,k) = 0.
1015  fieldy(i,j,k) = 0.
1016  end do
1017  end do
1018  end if
1019  end do
1020  endif
1021 
1022  ! the following code code block correct an error where the data in your halo coming from
1023  ! other half may have the wrong sign
1024  !off south edge, when update south or west direction
1025  i = domain%x(1)%global%begin
1026  if ( recv(3) .OR. recv(5) ) then
1027  select case(gridtype)
1028  case(BGRID_NE)
1029  js = domain%y(1)%global%begin
1030  if( js.GT.domain%y(1)%data%begin )then
1031 
1032  if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
1033  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west BGRID_NE west edge ubound error.' )
1034  do l=1,l_size
1035  ptr_fieldx = f_addrsx(l, 1)
1036  ptr_fieldy = f_addrsy(l, 1)
1037  do k = 1,ke
1038  do j = domain%y(1)%data%begin,js-1
1039  fieldx(i,j,k) = fieldx(i,2*js-j,k)
1040  fieldy(i,j,k) = fieldy(i,2*js-j,k)
1041  end do
1042  end do
1043  end do
1044  end if
1045  case(CGRID_NE)
1046  js = domain%y(1)%global%begin
1047  if( js.GT.domain%y(1)%data%begin )then
1048  if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
1049  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-west CGRID_NE west edge ubound error.' )
1050  do l=1,l_size
1051  ptr_fieldx = f_addrsx(l, 1)
1052  do k = 1,ke
1053  do j = domain%y(1)%data%begin,js-1
1054  fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
1055  end do
1056  end do
1057  end do
1058  end if
1059  end select
1060  end if
1061 
1062  !off north edge
1063  js = domain%y(1)%global%end
1064  if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
1065  je = domain%y(1)%data%end
1066  js = js + 1
1067  select case(gridtype)
1068  case(BGRID_NE)
1069  js = js + shift
1070  je = je + shift
1071  do l=1,l_size
1072  ptr_fieldx = f_addrsx(l, 1)
1073  ptr_fieldy = f_addrsy(l, 1)
1074  do k = 1,ke
1075  do j = js,je
1076  fieldx(i,j,k) = -fieldx(i,j,k)
1077  fieldy(i,j,k) = -fieldy(i,j,k)
1078  end do
1079  end do
1080  end do
1081  case(CGRID_NE)
1082  do l=1,l_size
1083  ptr_fieldx = f_addrsx(l, 1)
1084  do k = 1,ke
1085  do j = js, je
1086  fieldx(i,j,k) = -fieldx(i,j,k)
1087  end do
1088  end do
1089  end do
1090  end select
1091  end if
1092  end if
1093  else if( BTEST(domain%fold,EAST) .AND. (.NOT.BTEST(update_flags,SCALAR_BIT)) )then ! ---eastern boundary fold
1094  ! NOTE: symmetry is assumed for fold-west boundary
1095  i = domain%x(1)%global%end+shift
1096  if( domain%x(1)%data%begin.LE.i .AND. i.LE.domain%x(1)%data%end+shift )then !fold is within domain
1097  midpoint = (domain%y(1)%global%begin+domain%y(1)%global%end-1+shift)/2
1098  !poles set to 0: BGRID only
1099  if( gridtype.EQ.BGRID_NE )then
1100  i = domain%x(1)%global%end+shift
1101  js = domain%y(1)%global%begin; je = domain%y(1)%global%end+shift
1102  do j = js ,je, midpoint
1103  if( domain%y(1)%data%begin.LE.j .AND. j.LE. domain%y(1)%data%end+shift )then
1104  do l=1,l_size
1105  ptr_fieldx = f_addrsx(l, 1)
1106  ptr_fieldy = f_addrsy(l, 1)
1107  do k = 1,ke
1108  fieldx(i,j,k) = 0.
1109  fieldy(i,j,k) = 0.
1110  end do
1111  end do
1112  end if
1113  end do
1114  endif
1115 
1116  ! the following code code block correct an error where the data in your halo coming from
1117  ! other half may have the wrong sign
1118  !off south edge, when update south or west direction
1119  i = domain%x(1)%global%end+shift
1120  if ( recv(3) .OR. recv(1) ) then
1121  select case(gridtype)
1122  case(BGRID_NE)
1123  js = domain%y(1)%global%begin
1124  if( js.GT.domain%y(1)%data%begin )then
1125 
1126  if( 2*js-domain%y(1)%data%begin.GT.domain%y(1)%data%end+shift ) &
1127  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east BGRID_NE west edge ubound error.' )
1128  do l=1,l_size
1129  ptr_fieldx = f_addrsx(l, 1)
1130  ptr_fieldy = f_addrsy(l, 1)
1131  do k = 1,ke
1132  do j = domain%y(1)%data%begin,js-1
1133  fieldx(i,j,k) = fieldx(i,2*js-j,k)
1134  fieldy(i,j,k) = fieldy(i,2*js-j,k)
1135  end do
1136  end do
1137  end do
1138  end if
1139  case(CGRID_NE)
1140  js = domain%y(1)%global%begin
1141  if( js.GT.domain%y(1)%data%begin )then
1142  if( 2*js-domain%y(1)%data%begin-1.GT.domain%y(1)%data%end ) &
1143  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-east CGRID_NE west edge ubound error.' )
1144  do l=1,l_size
1145  ptr_fieldx = f_addrsx(l, 1)
1146  do k = 1,ke
1147  do j = domain%y(1)%data%begin,js-1
1148  fieldx(i,j,k) = fieldx(i, 2*js-j-1,k)
1149  end do
1150  end do
1151  end do
1152  end if
1153  end select
1154  end if
1155 
1156  !off north edge
1157  js = domain%y(1)%global%end
1158  if(domain%y(1)%cyclic .AND. js.LT.domain%y(1)%data%end )then
1159  je = domain%y(1)%data%end
1160  js = js + 1
1161  select case(gridtype)
1162  case(BGRID_NE)
1163  js = js + shift
1164  je = je + shift
1165  do l=1,l_size
1166  ptr_fieldx = f_addrsx(l, 1)
1167  ptr_fieldy = f_addrsy(l, 1)
1168  do k = 1,ke
1169  do j = js,je
1170  fieldx(i,j,k) = -fieldx(i,j,k)
1171  fieldy(i,j,k) = -fieldy(i,j,k)
1172  end do
1173  end do
1174  end do
1175  case(CGRID_NE)
1176  do l=1,l_size
1177  ptr_fieldx = f_addrsx(l, 1)
1178  do k = 1,ke
1179  do j = js, je
1180  fieldx(i,j,k) = -fieldx(i,j,k)
1181  end do
1182  end do
1183  end do
1184  end select
1185  end if
1186  end if
1187  end if
1188 
1189  call mpp_clock_begin(wait_clock)
1190  call mpp_sync_self( )
1191  call mpp_clock_end(wait_clock)
1192 
1193  return
1194 
1195  end subroutine MPP_DO_UPDATE_3D_V_
integer mpp_domains_stack_hwm
real(fp), parameter, public half
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
type(ext_fieldtype), dimension(:), pointer, save, private field
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
integer, private je
Definition: fms_io.F90:494
integer, private jsd
Definition: fms_io.F90:495
integer, parameter recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_WRITE_UNLIMITED_AXIS_1D_(unit, field, domain, data, nelems_io) integer, intent(in) ::unit type(fieldtype), intent(inout) ::field type(domain2D), intent(inout) ::domain MPP_TYPE_, intent(inout) ::data(:) integer, intent(in) ::nelems_io(:) ! number of compressed elements from each ! member of the io_domain. It MUST have the ! same order as the io_domain pelist. integer, allocatable ::pelist(:) integer ::i, j, nelems, npes type(domain2d), pointer ::io_domain=> allocatable
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer, parameter set
character(len=256) text
Definition: mpp_io.F90:1051
integer(long), parameter true
type(field_mgr_type), dimension(max_fields), private fields
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
Definition: mpp_do_updateV.h:5
integer(long), parameter false
from from_pe
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
l_size ! loop over number of fields ke do j
integer, parameter send
character(len=32) name
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
integer, private jed
Definition: fms_io.F90:495
l_size ! loop over number of fields ke do je do ie to to_pe
integer, parameter m
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
integer, private ied
Definition: fms_io.F90:495
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
integer error
Definition: mpp.F90:1310
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public east
integer mpp_domains_stack_size
real(fvprc) function, dimension(size(a, 1), size(a, 2)) reverse(A)
logical function received(this, seqno)
logical debug_message_passing
#define LONG_KIND
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_flags
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
#define max(a, b)
Definition: mosaic_util.h:33
integer, parameter, public cyclic
l_size ! loop over number of fields ke do je do ie pos
integer, parameter, public order
#define min(a, b)
Definition: mosaic_util.h:32
integer, parameter, public south
integer, private isd
Definition: fms_io.F90:495
l_size ! loop over number of fields ke do je do ie to js
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin