FV3 Bundle
mpp_group_update.h
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
10 !*
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 !* for more details.
15 !*
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 ! -*-f90-*-
20 subroutine MPP_CREATE_GROUP_UPDATE_2D_(group, field, domain, flags, position, &
21  whalo, ehalo, shalo, nhalo)
22  type(mpp_group_update_type), intent(inout) :: group
23  MPP_TYPE_, intent(inout) :: field(:,:)
24  type(domain2D), intent(inout) :: domain
25  integer, intent(in), optional :: flags
26  integer, intent(in), optional :: position
27  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
28 
29  MPP_TYPE_ :: field3D(size(field,1),size(field,2),1)
30  pointer( ptr, field3D )
31  ptr = LOC(field)
32 
33  call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
34 
35  return
36 
37 end subroutine MPP_CREATE_GROUP_UPDATE_2D_
38 
39 subroutine MPP_CREATE_GROUP_UPDATE_3D_(group, field, domain, flags, position, whalo, ehalo, shalo, nhalo)
40  type(mpp_group_update_type), intent(inout) :: group
41  MPP_TYPE_, intent(inout) :: field(:,:,:)
42  type(domain2D), intent(inout) :: domain
43  integer, intent(in), optional :: flags
44  integer, intent(in), optional :: position
45  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo ! specify halo region to be updated.
46 
47  integer :: update_position, update_whalo, update_ehalo, update_shalo, update_nhalo
49  integer :: nscalar
50  character(len=3) :: text
51  logical :: set_mismatch, update_edge_only
52  logical :: recv(8)
53 
54  if(group%initialized) then
55  call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_3D: group is already initialized")
56  endif
57 
58  if(present(whalo)) then
59  update_whalo = whalo
60  if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
61  "optional argument whalo should not be larger than the whalo when define domain.")
62  else
63  update_whalo = domain%whalo
64  end if
65  if(present(ehalo)) then
66  update_ehalo = ehalo
67  if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
68  "optional argument ehalo should not be larger than the ehalo when define domain.")
69  else
70  update_ehalo = domain%ehalo
71  end if
72  if(present(shalo)) then
73  update_shalo = shalo
74  if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
75  "optional argument shalo should not be larger than the shalo when define domain.")
76  else
77  update_shalo = domain%shalo
78  end if
79  if(present(nhalo)) then
80  update_nhalo = nhalo
81  if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE: "// &
82  "optional argument nhalo should not be larger than the nhalo when define domain.")
83  else
84  update_nhalo = domain%nhalo
85  end if
86  update_position = CENTER
87  !--- when there is NINETY or MINUS_NINETY rotation for some contact, the salar data can not be on E or N-cell,
88  if(present(position)) then
89  update_position = position
90  if(domain%rotated_ninety .AND. ( position == EAST .OR. position == NORTH ) ) &
91  call mpp_error(FATAL, 'MPP_CREATE_GROUP_UPDATE_3D: hen there is NINETY or MINUS_NINETY rotation, ' // &
92  'can not use scalar version update_domain for data on E or N-cell' )
93  end if
94 
95  if( domain%max_ntile_pe > 1 ) then
96  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: do not support multiple tile per processor')
97  endif
98 
99  update_flags = XUPDATE+YUPDATE
100  if(present(flags)) update_flags = flags
101 
102  group%nscalar = group%nscalar + 1
103  nscalar = group%nscalar
104  if( nscalar > MAX_DOMAIN_FIELDS)then
105  write( text,'(i2)' ) MAX_DOMAIN_FIELDS
106  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
107  endif
108 
109  isize = size(field,1); jsize=size(field,2); ksize = size(field,3)
110 
111  group%addrs_s(nscalar) = LOC(field)
112  if( group%nscalar == 1 ) then
113  group%flags_s = update_flags
114  group%whalo_s = update_whalo
115  group%ehalo_s = update_ehalo
116  group%shalo_s = update_shalo
117  group%nhalo_s = update_nhalo
118  group%position = update_position
119  group%isize_s = isize
120  group%jsize_s = jsize
121  group%ksize_s = ksize
122  call mpp_get_memory_domain(domain, group%is_s, group%ie_s, group%js_s, group%je_s, position=position)
123 
124  update_edge_only = BTEST(update_flags, EDGEONLY)
125  recv(1) = BTEST(update_flags,EAST)
126  recv(3) = BTEST(update_flags,SOUTH)
127  recv(5) = BTEST(update_flags,WEST)
128  recv(7) = BTEST(update_flags,NORTH)
129  if( update_edge_only ) then
130  recv(2) = .false.
131  recv(4) = .false.
132  recv(6) = .false.
133  recv(8) = .false.
134  if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
135  recv(1) = .true.
136  recv(3) = .true.
137  recv(5) = .true.
138  recv(7) = .true.
139  endif
140  else
141  recv(2) = recv(1) .AND. recv(3)
142  recv(4) = recv(3) .AND. recv(5)
143  recv(6) = recv(5) .AND. recv(7)
144  recv(8) = recv(7) .AND. recv(1)
145  endif
146  group%recv_s = recv
147  else
148  set_mismatch = .false.
149  set_mismatch = set_mismatch .OR. (group%flags_s .NE. update_flags)
150  set_mismatch = set_mismatch .OR. (group%whalo_s .NE. update_whalo)
151  set_mismatch = set_mismatch .OR. (group%ehalo_s .NE. update_ehalo)
152  set_mismatch = set_mismatch .OR. (group%shalo_s .NE. update_shalo)
153  set_mismatch = set_mismatch .OR. (group%nhalo_s .NE. update_nhalo)
154  set_mismatch = set_mismatch .OR. (group%position .NE. update_position)
155  set_mismatch = set_mismatch .OR. (group%isize_s .NE. isize)
156  set_mismatch = set_mismatch .OR. (group%jsize_s .NE. jsize)
157  set_mismatch = set_mismatch .OR. (group%ksize_s .NE. ksize)
158 
159  if(set_mismatch)then
160  write( text,'(i2)' ) nscalar
161  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_3D: Incompatible field at count '//text//' for group update.' )
162  endif
163  endif
164 
165  return
166 
167 end subroutine MPP_CREATE_GROUP_UPDATE_3D_
168 
169 
170 subroutine MPP_CREATE_GROUP_UPDATE_4D_(group, field, domain, flags, position, &
171  whalo, ehalo, shalo, nhalo)
172  type(mpp_group_update_type), intent(inout) :: group
173  MPP_TYPE_, intent(inout) :: field(:,:,:,:)
174  type(domain2D), intent(inout) :: domain
175  integer, intent(in), optional :: flags
176  integer, intent(in), optional :: position
177  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
178 
179  MPP_TYPE_ :: field3D(size(field,1),size(field,2),size(field,3)*size(field,4))
180  pointer( ptr, field3D )
181  ptr = LOC(field)
182 
183  call mpp_create_group_update(group, field3D, domain, flags, position, whalo, ehalo, shalo, nhalo)
184 
185  return
186 
187 end subroutine MPP_CREATE_GROUP_UPDATE_4D_
188 
189 subroutine MPP_CREATE_GROUP_UPDATE_2D_V_( group, fieldx, fieldy, domain, flags, gridtype, &
190  whalo, ehalo, shalo, nhalo)
191 
192  type(mpp_group_update_type), intent(inout) :: group
193  MPP_TYPE_, intent(inout) :: fieldx(:,:), fieldy(:,:)
194  type(domain2D), intent(inout) :: domain
195  integer, intent(in), optional :: flags, gridtype
196  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
197  MPP_TYPE_ :: field3Dx(size(fieldx,1),size(fieldx,2),1)
198  MPP_TYPE_ :: field3Dy(size(fieldy,1),size(fieldy,2),1)
199  pointer( ptrx, field3Dx )
200  pointer( ptry, field3Dy )
201  ptrx = LOC(fieldx)
202  ptry = LOC(fieldy)
203 
204  call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
205  whalo, ehalo, shalo, nhalo)
206 
207  return
208 
209 end subroutine MPP_CREATE_GROUP_UPDATE_2D_V_
210 
211 
212 
213 subroutine MPP_CREATE_GROUP_UPDATE_3D_V_( group, fieldx, fieldy, domain, flags, gridtype, &
214  whalo, ehalo, shalo, nhalo)
215  type(mpp_group_update_type), intent(inout) :: group
216  MPP_TYPE_, intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
217  type(domain2D), intent(inout) :: domain
218  integer, intent(in), optional :: flags, gridtype
219  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
220 
221  integer :: update_whalo, update_ehalo, update_shalo, update_nhalo
222  integer :: update_flags, isize_x, jsize_x, ksize_x, isize_y, jsize_y, ksize_y
223  integer :: nvector, update_gridtype, position_x, position_y
224  character(len=3) :: text
225  logical :: set_mismatch, update_edge_only
226  logical :: recv(8)
227 
228 
229  if(group%initialized) then
230  call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: group is already initialized")
231  endif
232 
233  if(present(whalo)) then
234  update_whalo = whalo
235  if(abs(update_whalo) > domain%whalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
236  "optional argument whalo should not be larger than the whalo when define domain.")
237  else
238  update_whalo = domain%whalo
239  end if
240  if(present(ehalo)) then
241  update_ehalo = ehalo
242  if(abs(update_ehalo) > domain%ehalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
243  "optional argument ehalo should not be larger than the ehalo when define domain.")
244  else
245  update_ehalo = domain%ehalo
246  end if
247  if(present(shalo)) then
248  update_shalo = shalo
249  if(abs(update_shalo) > domain%shalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
250  "optional argument shalo should not be larger than the shalo when define domain.")
251  else
252  update_shalo = domain%shalo
253  end if
254  if(present(nhalo)) then
255  update_nhalo = nhalo
256  if(abs(update_nhalo) > domain%nhalo ) call mpp_error(FATAL, "MPP_CREATE_GROUP_UPDATE_V: "// &
257  "optional argument nhalo should not be larger than the nhalo when define domain.")
258  else
259  update_nhalo = domain%nhalo
260  end if
261 
262  update_gridtype = AGRID
263  if(PRESENT(gridtype)) update_gridtype = gridtype
264 
265  if( domain%max_ntile_pe > 1 ) then
266  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: do not support multiple tile per processor')
267  endif
268 
269  update_flags = XUPDATE+YUPDATE !default
270  if( PRESENT(flags) )update_flags = flags
271  ! The following test is so that SCALAR_PAIR can be used alone with the
272  ! same default update pattern as without.
273  if (BTEST(update_flags,SCALAR_BIT)) then
274  if (.NOT.(BTEST(update_flags,WEST) .OR. BTEST(update_flags,EAST) &
275  .OR. BTEST(update_flags,NORTH) .OR. BTEST(update_flags,SOUTH))) &
276  update_flags = update_flags + XUPDATE+YUPDATE !default with SCALAR_PAIR
277  end if
278 
279  group%nvector = group%nvector + 1
280  nvector = group%nvector
281  if( nvector > MAX_DOMAIN_FIELDS)then
282  write( text,'(i2)' ) MAX_DOMAIN_FIELDS
283  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: MAX_DOMAIN_FIELDS='//text//' exceeded for group update.' )
284  endif
285 
286  isize_x = size(fieldx,1); jsize_x = size(fieldx,2); ksize_x = size(fieldx,3)
287  isize_y = size(fieldy,1); jsize_y = size(fieldy,2); ksize_y = size(fieldy,3)
288 
289  if(ksize_x .NE. ksize_y) call mpp_error(FATAL, &
290  'MPP_CREATE_GROUP_UPDATE_V: mismatch of ksize between fieldx and fieldy')
291 
292  group%addrs_x(nvector) = LOC(fieldx)
293  group%addrs_y(nvector) = LOC(fieldy)
294 
295  if( group%nvector == 1 ) then
296  group%flags_v = update_flags
297  group%whalo_v = update_whalo
298  group%ehalo_v = update_ehalo
299  group%shalo_v = update_shalo
300  group%nhalo_v = update_nhalo
301  group%gridtype = update_gridtype
302  group%isize_x = isize_x
303  group%jsize_x = jsize_x
304  group%isize_y = isize_y
305  group%jsize_y = jsize_y
306  group%ksize_v = ksize_x
307  update_edge_only = BTEST(update_flags, EDGEONLY)
308  group%nonsym_edge = .false.
309 
310  recv(1) = BTEST(update_flags,EAST)
311  recv(3) = BTEST(update_flags,SOUTH)
312  recv(5) = BTEST(update_flags,WEST)
313  recv(7) = BTEST(update_flags,NORTH)
314  if( update_edge_only ) then
315  recv(2) = .false.
316  recv(4) = .false.
317  recv(6) = .false.
318  recv(8) = .false.
319  if( .NOT. (recv(1) .OR. recv(3) .OR. recv(5) .OR. recv(7)) ) then
320  recv(1) = .true.
321  recv(3) = .true.
322  recv(5) = .true.
323  recv(7) = .true.
324  endif
325  else
326  recv(2) = recv(1) .AND. recv(3)
327  recv(4) = recv(3) .AND. recv(5)
328  recv(6) = recv(5) .AND. recv(7)
329  recv(8) = recv(7) .AND. recv(1)
330  endif
331  group%recv_x = recv
332  group%recv_y = recv
333 
334  !--- NONSYMEDGE is only for non-symmetric domain and CGRID/DGRID
335  if( .not. domain%symmetry .and. (update_gridtype==CGRID_NE .OR. update_gridtype==DGRID_NE)) then
336  group%nonsym_edge = BTEST(update_flags, NONSYMEDGE)
337  endif
338  if( group%nonsym_edge ) then
339  group%recv_x(2:8:2) = .false.
340  group%recv_y(2:8:2) = .false.
341  if(update_gridtype==CGRID_NE) then
342  group%recv_x(3) = .false.
343  group%recv_x(7) = .false.
344  group%recv_y(1) = .false.
345  group%recv_y(5) = .false.
346  else if(update_gridtype==DGRID_NE) then
347  group%recv_x(1) = .false.
348  group%recv_x(5) = .false.
349  group%recv_y(3) = .false.
350  group%recv_y(7) = .false.
351  endif
352  endif
353 
354  select case(group%gridtype)
355  case (AGRID)
356  position_x = CENTER
357  position_y = CENTER
358  case (BGRID_NE, BGRID_SW)
359  position_x = CORNER
360  position_y = CORNER
361  case (CGRID_NE, CGRID_SW)
362  position_x = EAST
363  position_y = NORTH
364  case (DGRID_NE, DGRID_SW)
365  position_x = NORTH
366  position_y = EAST
367  case default
368  call mpp_error(FATAL, "mpp_CREATE_GROUP_UPDATE_V: invalid value of gridtype")
369  end select
370 
371  call mpp_get_memory_domain(domain, group%is_x, group%ie_x, group%js_x, group%je_x, position=position_x)
372  call mpp_get_memory_domain(domain, group%is_y, group%ie_y, group%js_y, group%je_y, position=position_y)
373  else
374  set_mismatch = .false.
375  set_mismatch = set_mismatch .OR. (group%flags_v .NE. update_flags)
376  set_mismatch = set_mismatch .OR. (group%whalo_v .NE. update_whalo)
377  set_mismatch = set_mismatch .OR. (group%ehalo_v .NE. update_ehalo)
378  set_mismatch = set_mismatch .OR. (group%shalo_v .NE. update_shalo)
379  set_mismatch = set_mismatch .OR. (group%nhalo_v .NE. update_nhalo)
380  set_mismatch = set_mismatch .OR. (group%gridtype .NE. update_gridtype)
381  set_mismatch = set_mismatch .OR. (group%isize_x .NE. isize_x)
382  set_mismatch = set_mismatch .OR. (group%jsize_x .NE. jsize_x)
383  set_mismatch = set_mismatch .OR. (group%isize_y .NE. isize_y)
384  set_mismatch = set_mismatch .OR. (group%jsize_y .NE. jsize_y)
385  set_mismatch = set_mismatch .OR. (group%ksize_v .NE. ksize_x)
386 
387  if(set_mismatch)then
388  write( text,'(i2)' ) nvector
389  call mpp_error(FATAL,'MPP_CREATE_GROUP_UPDATE_V: Incompatible field at count '//text//' for group update.' )
390  endif
391  endif
392 
393  return
394 
395 end subroutine MPP_CREATE_GROUP_UPDATE_3D_V_
396 
397 subroutine MPP_CREATE_GROUP_UPDATE_4D_V_( group, fieldx, fieldy, domain, flags, gridtype, &
398  whalo, ehalo, shalo, nhalo)
399 
400  type(mpp_group_update_type), intent(inout) :: group
401  MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
402  type(domain2D), intent(inout) :: domain
403  integer, intent(in), optional :: flags, gridtype
404  integer, intent(in), optional :: whalo, ehalo, shalo, nhalo
405  MPP_TYPE_ :: field3Dx(size(fieldx,1),size(fieldx,2),size(fieldx,3)*size(fieldx,4))
406  MPP_TYPE_ :: field3Dy(size(fieldy,1),size(fieldy,2),size(fieldy,3)*size(fieldy,4))
407  pointer( ptrx, field3Dx )
408  pointer( ptry, field3Dy )
409  ptrx = LOC(fieldx)
410  ptry = LOC(fieldy)
411 
412  call mpp_create_group_update(group, field3Dx, field3Dy, domain, flags, gridtype, &
413  whalo, ehalo, shalo, nhalo)
414 
415  return
416 
417 end subroutine MPP_CREATE_GROUP_UPDATE_4D_V_
418 
419 
420 subroutine MPP_DO_GROUP_UPDATE_(group, domain, d_type)
421  type(mpp_group_update_type), intent(inout) :: group
422  type(domain2D), intent(inout) :: domain
423  MPP_TYPE_, intent(in) :: d_type
424 
425  integer :: nscalar, nvector, nlist
426  logical :: recv_y(8)
427  integer :: nsend, nrecv, flags_v
428  integer :: msgsize
429  integer :: from_pe, to_pe, buffer_pos, pos
430  integer :: ksize, is, ie, js, je
431  integer :: n, l, m, i, j, k, buffer_start_pos, nk
432  integer :: shift, gridtype, midpoint
433  integer :: npack, nunpack, rotation, isd
434  character(len=8) :: text
435 
437  MPP_TYPE_ :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
438  MPP_TYPE_ :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
439  MPP_TYPE_ :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
440  pointer(ptr, buffer )
441  pointer(ptr_field, field)
442  pointer(ptr_fieldx, fieldx)
443  pointer(ptr_fieldy, fieldy)
444 
445  nscalar = group%nscalar
446  nvector = group%nvector
447  nlist = size(domain%list(:))
448  gridtype = group%gridtype
449 
450  !--- ksize_s must equal ksize_v
451  if(nvector > 0 .AND. nscalar > 0) then
452  if(group%ksize_s .NE. group%ksize_v) then
453  call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: ksize_s and ksize_v are not equal")
454  endif
455  ksize = group%ksize_s
456  else if (nscalar > 0) then
457  ksize = group%ksize_s
458  else if (nvector > 0) then
459  ksize = group%ksize_v
460  else
461  call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: nscalar and nvector are all 0")
462  endif
463  if(nvector > 0) recv_y = group%recv_y
464 
465  ptr = LOC(mpp_domains_stack)
466 
467  !--- set reset_index_s and reset_index_v to 0
468  group%reset_index_s = 0
469  group%reset_index_v = 0
470 
471  if(.not. group%initialized) call set_group_update(group,domain)
472 
473  nrecv = group%nrecv
474  nsend = group%nsend
475 
476  !---pre-post receive.
477  call mpp_clock_begin(group_recv_clock)
478  do m = 1, nrecv
479  msgsize = group%recv_size(m)
480  from_pe = group%from_pe(m)
481  if( msgsize .GT. 0 )then
482  buffer_pos = group%buffer_pos_recv(m)
483  call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
484  tag=COMM_TAG_1)
485  end if
486  end do
487 
488  !pack the data
489  call mpp_clock_end(group_recv_clock)
490 
491  flags_v = group%flags_v
492  npack = group%npack
493 
494  call mpp_clock_begin(group_pack_clock)
495  !pack the data
496  buffer_start_pos = 0
497 #include <group_update_pack.inc>
498  call mpp_clock_end(group_pack_clock)
499 
500  call mpp_clock_begin(group_send_clock)
501  do n = 1, nsend
502  msgsize = group%send_size(n)
503  if( msgsize .GT. 0 )then
504  buffer_pos = group%buffer_pos_send(n)
505  to_pe = group%to_pe(n)
506  call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1)
507  endif
508  enddo
509  call mpp_clock_end(group_send_clock)
510 
511  if(nrecv>0) then
512  call mpp_clock_begin(group_wait_clock)
513  call mpp_sync_self(check=EVENT_RECV)
514  call mpp_clock_end(group_wait_clock)
515  endif
516 
517  !---unpack the buffer
518  nunpack = group%nunpack
519  call mpp_clock_begin(group_unpk_clock)
520 #include <group_update_unpack.inc>
521  call mpp_clock_end(group_unpk_clock)
522 
523  ! ---northern boundary fold
524  shift = 0
525  if(domain%symmetry) shift = 1
526  if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
527  j = domain%y(1)%global%end+shift
528  if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
529  !poles set to 0: BGRID only
530  if( gridtype.EQ.BGRID_NE )then
531  midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
532  j = domain%y(1)%global%end+shift
533  is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
534  if( .NOT. domain%symmetry ) is = is - 1
535  do i = is ,ie, midpoint
536  if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
537  do l=1,nvector
538  ptr_fieldx = group%addrs_x(l)
539  ptr_fieldy = group%addrs_y(l)
540  do k = 1,ksize
541  fieldx(i,j,k) = 0.
542  fieldy(i,j,k) = 0.
543  end do
544  end do
545  end if
546  end do
547  endif
548  ! the following code code block correct an error where the data in your halo coming from
549  ! other half may have the wrong sign
550  !off west edge, when update north or west direction
551  j = domain%y(1)%global%end+shift
552  if ( recv_y(7) .OR. recv_y(5) ) then
553  select case(gridtype)
554  case(BGRID_NE)
555  if(domain%symmetry) then
556  is = domain%x(1)%global%begin
557  else
558  is = domain%x(1)%global%begin - 1
559  end if
560  if( is.GT.domain%x(1)%data%begin )then
561 
562  if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
563  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
564  do l=1,nvector
565  ptr_fieldx = group%addrs_x(l)
566  ptr_fieldy = group%addrs_y(l)
567  do k = 1,ksize
568  do i = domain%x(1)%data%begin,is-1
569  fieldx(i,j,k) = fieldx(2*is-i,j,k)
570  fieldy(i,j,k) = fieldy(2*is-i,j,k)
571  end do
572  end do
573  end do
574  end if
575  case(CGRID_NE)
576  is = domain%x(1)%global%begin
577  isd = domain%x(1)%compute%begin - group%whalo_v
578  if( is.GT.isd )then
579  if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
580  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
581  do l=1,nvector
582  ptr_fieldy = group%addrs_y(l)
583  do k = 1,ksize
584  do i = isd,is-1
585  fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
586  end do
587  end do
588  end do
589  end if
590  end select
591  end if
592  !off east edge
593  is = domain%x(1)%global%end
594  if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
595  ie = domain%x(1)%compute%end+group%ehalo_v
596  is = is + 1
597  select case(gridtype)
598  case(BGRID_NE)
599  is = is + shift
600  ie = ie + shift
601  do l=1,nvector
602  ptr_fieldx = group%addrs_x(l)
603  ptr_fieldy = group%addrs_y(l)
604  do k = 1,ksize
605  do i = is,ie
606  fieldx(i,j,k) = -fieldx(i,j,k)
607  fieldy(i,j,k) = -fieldy(i,j,k)
608  end do
609  end do
610  end do
611  case(CGRID_NE)
612  do l=1,nvector
613  ptr_fieldy = group%addrs_y(l)
614  do k = 1,ksize
615  do i = is, ie
616  fieldy(i,j,k) = -fieldy(i,j,k)
617  end do
618  end do
619  end do
620  end select
621  end if
622  end if
623  else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
624  call mpp_error(FATAL, "MPP_DO_GROUP_UPDATE: this interface does not support folded_south, " // &
625  "folded_west of folded_east, contact developer")
626  endif
627 
628  if(nsend>0) then
629  call mpp_clock_begin(group_wait_clock)
630  call mpp_sync_self( )
631  call mpp_clock_end(group_wait_clock)
632  endif
633 
634 end subroutine MPP_DO_GROUP_UPDATE_
635 
636 
637 subroutine MPP_START_GROUP_UPDATE_(group, domain, d_type, reuse_buffer)
638  type(mpp_group_update_type), intent(inout) :: group
639  type(domain2D), intent(inout) :: domain
640  MPP_TYPE_, intent(in) :: d_type
641  logical, optional, intent(in) :: reuse_buffer
642 
643  integer :: nscalar, nvector
644  integer :: nsend, nrecv, flags_v
645  integer :: msgsize, npack, rotation
646  integer :: from_pe, to_pe, buffer_pos, pos
647  integer :: ksize, is, ie, js, je
648  integer :: n, l, m, i, j, k, buffer_start_pos, nk
649  logical :: reuse_buf_pos
650  character(len=8) :: text
651 
652  MPP_TYPE_ :: buffer(size(mpp_domains_stack_nonblock(:)))
653  MPP_TYPE_ :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
654  MPP_TYPE_ :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
655  MPP_TYPE_ :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
656  pointer( ptr, buffer )
657  pointer(ptr_field, field)
658  pointer(ptr_fieldx, fieldx)
659  pointer(ptr_fieldy, fieldy)
660 
661  nscalar = group%nscalar
662  nvector = group%nvector
663 
664  if(nscalar>0) then
665  ksize = group%ksize_s
666  else
667  ksize = group%ksize_v
668  endif
669 
670  !--- set reset_index_s and reset_index_v to 0
671  group%reset_index_s = 0
672  group%reset_index_v = 0
673 
674  reuse_buf_pos = .FALSE.
675  if (PRESENT(reuse_buffer)) reuse_buf_pos = reuse_buffer
676 
677  if (.not. group%initialized) then
678  call set_group_update(group,domain)
679  endif
680 
681  if (.not. reuse_buf_pos) then
682  group%buffer_start_pos = nonblock_group_buffer_pos
686  write( text,'(i8)' )mpp_domains_stack_hwm
687  call mpp_error( FATAL, 'set_group_update: mpp_domains_stack overflow, '// &
688  'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
689  end if
690 
691  else if( group%buffer_start_pos < 0 ) then
692  call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: group%buffer_start_pos is not set")
693  endif
694 
695  nrecv = group%nrecv
696  nsend = group%nsend
697 
698  ptr = LOC(mpp_domains_stack_nonblock)
699 
700  ! Make sure it is not in the middle of the old version of non-blocking halo update.
701  if(num_update>0) call mpp_error(FATAL, "MPP_START_GROUP_UPDATE: can not be called in the middle of "// &
702  "mpp_start_update_domains/mpp_complete_update_domains call")
703 
705 
706  !---pre-post receive.
707  call mpp_clock_begin(nonblock_group_recv_clock)
708  do m = 1, nrecv
709  msgsize = group%recv_size(m)
710  from_pe = group%from_pe(m)
711  if( msgsize .GT. 0 )then
712  buffer_pos = group%buffer_pos_recv(m) + group%buffer_start_pos
713  call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.false., &
714  tag=COMM_TAG_1, request=group%request_recv(m))
715 #ifdef use_libMPI
716  group%type_recv(m) = MPI_TYPE_
717 #endif
718  end if
719  end do
720  call mpp_clock_end(nonblock_group_recv_clock)
721 
722  flags_v = group%flags_v
723 
724  !pack the data
725  call mpp_clock_begin(nonblock_group_pack_clock)
726  npack = group%npack
727  buffer_start_pos = group%buffer_start_pos
728 #include <group_update_pack.inc>
729  call mpp_clock_end(nonblock_group_pack_clock)
730 
731  call mpp_clock_begin(nonblock_group_send_clock)
732  do n = 1, nsend
733  msgsize = group%send_size(n)
734  if( msgsize .GT. 0 )then
735  buffer_pos = group%buffer_pos_send(n) + group%buffer_start_pos
736  to_pe = group%to_pe(n)
737  call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_1, &
738  request=group%request_send(n))
739  endif
740  enddo
741  call mpp_clock_end(nonblock_group_send_clock)
742 
743 end subroutine MPP_START_GROUP_UPDATE_
744 
745 subroutine MPP_COMPLETE_GROUP_UPDATE_(group, domain, d_type)
746  type(mpp_group_update_type), intent(inout) :: group
747  type(domain2D), intent(inout) :: domain
748  MPP_TYPE_, intent(in) :: d_type
749 
750  integer :: nsend, nrecv, nscalar, nvector
751  integer :: k, buffer_pos, msgsize, pos, m, n, l
752  integer :: is, ie, js, je, dir, ksize, i, j
753  integer :: shift, gridtype, midpoint, flags_v
754  integer :: nunpack, rotation, buffer_start_pos, nk, isd
755  logical :: recv_y(8)
756  MPP_TYPE_ :: buffer(size(mpp_domains_stack_nonblock(:)))
757  MPP_TYPE_ :: field (group%is_s:group%ie_s,group%js_s:group%je_s, group%ksize_s)
758  MPP_TYPE_ :: fieldx(group%is_x:group%ie_x,group%js_x:group%je_x, group%ksize_v)
759  MPP_TYPE_ :: fieldy(group%is_y:group%ie_y,group%js_y:group%je_y, group%ksize_v)
760  pointer(ptr, buffer )
761  pointer(ptr_field, field)
762  pointer(ptr_fieldx, fieldx)
763  pointer(ptr_fieldy, fieldy)
764 
765  gridtype = group%gridtype
766  flags_v = group%flags_v
767  nscalar = group%nscalar
768  nvector = group%nvector
769  nrecv = group%nrecv
770  nsend = group%nsend
771  if(nscalar>0) then
772  ksize = group%ksize_s
773  else
774  ksize = group%ksize_v
775  endif
776  if(nvector > 0) recv_y = group%recv_y
777  ptr = LOC(mpp_domains_stack_nonblock)
778 
779  if(num_nonblock_group_update < 1) call mpp_error(FATAL, &
780  'mpp_start_group_update must be called before calling mpp_end_group_update')
783 
784  if(nrecv>0) then
785  call mpp_clock_begin(nonblock_group_wait_clock)
786  call mpp_sync_self(check=EVENT_RECV, request=group%request_recv(1:nrecv), &
787  msg_size=group%recv_size(1:nrecv), msg_type=group%type_recv(1:nrecv))
788  call mpp_clock_end(nonblock_group_wait_clock)
789  endif
790 
791  !---unpack the buffer
792  nunpack = group%nunpack
793 
794  call mpp_clock_begin(nonblock_group_unpk_clock)
795  buffer_start_pos = group%buffer_start_pos
796 #include <group_update_unpack.inc>
797  call mpp_clock_end(nonblock_group_unpk_clock)
798 
799  ! ---northern boundary fold
800  shift = 0
801  if(domain%symmetry) shift = 1
802  if( nvector >0 .AND. BTEST(domain%fold,NORTH) .AND. (.NOT.BTEST(flags_v,SCALAR_BIT)) )then
803  j = domain%y(1)%global%end+shift
804  if( domain%y(1)%data%begin.LE.j .AND. j.LE.domain%y(1)%data%end+shift )then !fold is within domain
805  !poles set to 0: BGRID only
806  if( gridtype.EQ.BGRID_NE )then
807  midpoint = (domain%x(1)%global%begin+domain%x(1)%global%end-1+shift)/2
808  j = domain%y(1)%global%end+shift
809  is = domain%x(1)%global%begin; ie = domain%x(1)%global%end+shift
810  if( .NOT. domain%symmetry ) is = is - 1
811  do i = is ,ie, midpoint
812  if( domain%x(1)%data%begin.LE.i .AND. i.LE. domain%x(1)%data%end+shift )then
813  do l=1,nvector
814  ptr_fieldx = group%addrs_x(l)
815  ptr_fieldy = group%addrs_y(l)
816  do k = 1,ksize
817  fieldx(i,j,k) = 0.
818  fieldy(i,j,k) = 0.
819  end do
820  end do
821  end if
822  end do
823  endif
824  ! the following code code block correct an error where the data in your halo coming from
825  ! other half may have the wrong sign
826  !off west edge, when update north or west direction
827  j = domain%y(1)%global%end+shift
828  if ( recv_y(7) .OR. recv_y(5) ) then
829  select case(gridtype)
830  case(BGRID_NE)
831  if(domain%symmetry) then
832  is = domain%x(1)%global%begin
833  else
834  is = domain%x(1)%global%begin - 1
835  end if
836  if( is.GT.domain%x(1)%data%begin )then
837 
838  if( 2*is-domain%x(1)%data%begin.GT.domain%x(1)%data%end+shift ) &
839  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north BGRID_NE west edge ubound error.' )
840  do l=1,nvector
841  ptr_fieldx = group%addrs_x(l)
842  ptr_fieldy = group%addrs_y(l)
843  do k = 1,ksize
844  do i = domain%x(1)%data%begin,is-1
845  fieldx(i,j,k) = fieldx(2*is-i,j,k)
846  fieldy(i,j,k) = fieldy(2*is-i,j,k)
847  end do
848  end do
849  end do
850  end if
851  case(CGRID_NE)
852  is = domain%x(1)%global%begin
853  isd = domain%x(1)%compute%begin - group%whalo_v
854  if( is.GT.isd)then
855  if( 2*is-domain%x(1)%data%begin-1.GT.domain%x(1)%data%end ) &
856  call mpp_error( FATAL, 'MPP_DO_UPDATE_V: folded-north CGRID_NE west edge ubound error.' )
857  do l=1,nvector
858  ptr_fieldy = group%addrs_y(l)
859  do k = 1,ksize
860  do i = isd,is-1
861  fieldy(i,j,k) = fieldy(2*is-i-1,j,k)
862  end do
863  end do
864  end do
865  end if
866  end select
867  end if
868  !off east edge
869  is = domain%x(1)%global%end
870  if(domain%x(1)%cyclic .AND. is.LT.domain%x(1)%data%end )then
871  ie = domain%x(1)%compute%end+group%ehalo_v
872  is = is + 1
873  select case(gridtype)
874  case(BGRID_NE)
875  is = is + shift
876  ie = ie + shift
877  do l=1,nvector
878  ptr_fieldx = group%addrs_x(l)
879  ptr_fieldy = group%addrs_y(l)
880  do k = 1,ksize
881  do i = is,ie
882  fieldx(i,j,k) = -fieldx(i,j,k)
883  fieldy(i,j,k) = -fieldy(i,j,k)
884  end do
885  end do
886  end do
887  case(CGRID_NE)
888  do l=1,nvector
889  ptr_fieldy = group%addrs_y(l)
890  do k = 1,ksize
891  do i = is, ie
892  fieldy(i,j,k) = -fieldy(i,j,k)
893  end do
894  end do
895  end do
896  end select
897  end if
898  end if
899  else if( BTEST(domain%fold,SOUTH) .OR. BTEST(domain%fold,WEST) .OR. BTEST(domain%fold,EAST) ) then
900  call mpp_error(FATAL, "MPP_COMPLETE_GROUP_UPDATE: this interface does not support folded_south, " // &
901  "folded_west of folded_east, contact developer")
902  endif
903 
904  if(nsend>0) then
905  call mpp_clock_begin(nonblock_group_wait_clock)
906  call mpp_sync_self(check=EVENT_SEND, request=group%request_send(1:nsend) )
907  call mpp_clock_end(nonblock_group_wait_clock)
908  endif
909 
910  if( num_nonblock_group_update == 0) then
912  endif
913 
914 end subroutine MPP_COMPLETE_GROUP_UPDATE_
915 
916 subroutine MPP_RESET_GROUP_UPDATE_FIELD_2D_(group, field)
917  type(mpp_group_update_type), intent(inout) :: group
918  MPP_TYPE_, intent(in) :: field(:,:)
919 
920  group%reset_index_s = group%reset_index_s + 1
921 
922  if(group%reset_index_s > group%nscalar) &
923  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: group%reset_index_s > group%nscalar")
924  if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. group%ksize_s .NE. 1) &
925  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_: size of field does not match the size stored in group")
926 
927  group%addrs_s(group%reset_index_s) = LOC(field)
928 
929 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_2D_
930 
931 subroutine MPP_RESET_GROUP_UPDATE_FIELD_3D_(group, field)
932  type(mpp_group_update_type), intent(inout) :: group
933  MPP_TYPE_, intent(in) :: field(:,:,:)
934 
935  group%reset_index_s = group%reset_index_s + 1
936 
937  if(group%reset_index_s > group%nscalar) &
938  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: group%reset_index_s > group%nscalar")
939  if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. size(field,3) .NE. group%ksize_s) &
940  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_: size of field does not match the size stored in group")
941 
942  group%addrs_s(group%reset_index_s) = LOC(field)
943 
944 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_3D_
945 
946 subroutine MPP_RESET_GROUP_UPDATE_FIELD_4D_(group, field)
947  type(mpp_group_update_type), intent(inout) :: group
948  MPP_TYPE_, intent(in) :: field(:,:,:,:)
949 
950  group%reset_index_s = group%reset_index_s + 1
951 
952  if(group%reset_index_s > group%nscalar) &
953  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: group%reset_index_s > group%nscalar")
954  if(size(field,1) .NE. group%isize_s .OR. size(field,2) .NE. group%jsize_s .OR. &
955  size(field,3)*size(field,4) .NE. group%ksize_s) &
956  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_: size of field does not match the size stored in group")
957 
958  group%addrs_s(group%reset_index_s) = LOC(field)
959 
960 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_4D_
961 
962 
963 subroutine MPP_RESET_GROUP_UPDATE_FIELD_2D_V_(group, fieldx, fieldy)
964  type(mpp_group_update_type), intent(inout) :: group
965  MPP_TYPE_, intent(in) :: fieldx(:,:), fieldy(:,:)
966  integer :: indx
967 
968  group%reset_index_v = group%reset_index_v + 1
969 
970  if(group%reset_index_v > group%nvector) &
971  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: group%reset_index_v > group%nvector")
972  if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. group%ksize_v .NE. 1) &
973  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldx does not match the size stored in group")
974  if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y ) &
975  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_2D_V_: size of fieldy does not match the size stored in group")
976 
977  group%addrs_x(group%reset_index_v) = LOC(fieldx)
978  group%addrs_y(group%reset_index_v) = LOC(fieldy)
979 
980 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_2D_V_
981 
982 
983 subroutine MPP_RESET_GROUP_UPDATE_FIELD_3D_V_(group, fieldx, fieldy)
984  type(mpp_group_update_type), intent(inout) :: group
985  MPP_TYPE_, intent(in) :: fieldx(:,:,:), fieldy(:,:,:)
986  integer :: indx
987 
988  group%reset_index_v = group%reset_index_v + 1
989 
990  if(group%reset_index_v > group%nvector) &
991  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: group%reset_index_v > group%nvector")
992  if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. size(fieldx,3) .NE. group%ksize_v) &
993  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldx does not match the size stored in group")
994  if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. size(fieldy,3) .NE. group%ksize_v) &
995  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_3D_V_: size of fieldy does not match the size stored in group")
996 
997  group%addrs_x(group%reset_index_v) = LOC(fieldx)
998  group%addrs_y(group%reset_index_v) = LOC(fieldy)
999 
1000 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_3D_V_
1001 
1002 
1003 subroutine MPP_RESET_GROUP_UPDATE_FIELD_4D_V_(group, fieldx, fieldy)
1004  type(mpp_group_update_type), intent(inout) :: group
1005  MPP_TYPE_, intent(in) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
1006  integer :: indx
1007 
1008  group%reset_index_v = group%reset_index_v + 1
1009 
1010  if(group%reset_index_v > group%nvector) &
1011  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: group%reset_index_v > group%nvector")
1012  if(size(fieldx,1) .NE. group%isize_x .OR. size(fieldx,2) .NE. group%jsize_x .OR. &
1013  size(fieldx,3)*size(fieldx,4) .NE. group%ksize_v) &
1014  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldx does not match the size stored in group")
1015  if(size(fieldy,1) .NE. group%isize_y .OR. size(fieldy,2) .NE. group%jsize_y .OR. &
1016  size(fieldy,3)*size(fieldy,4) .NE. group%ksize_v) &
1017  call mpp_error(FATAL, "MPP_RESET_GROUP_UPDATE_FIELD_4D_V_: size of fieldy does not match the size stored in group")
1018 
1019  group%addrs_x(group%reset_index_v) = LOC(fieldx)
1020  group%addrs_y(group%reset_index_v) = LOC(fieldy)
1021 
1022 end subroutine MPP_RESET_GROUP_UPDATE_FIELD_4D_V_
1023 
1024 
integer mpp_domains_stack_hwm
real(fp), parameter, public half
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
type(ext_fieldtype), dimension(:), pointer, save, private field
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
integer nonblock_group_unpk_clock
integer group_pack_clock
character(len=1), parameter equal
integer, private je
Definition: fms_io.F90:494
integer group_unpk_clock
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz ksize
integer, parameter recv
integer nonblock_group_send_clock
integer, save, private nk
Definition: oda_core.F90:126
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
integer, parameter set
character(len=256) text
Definition: mpp_io.F90:1051
integer nonblock_group_pack_clock
integer(long), parameter true
integer group_send_clock
integer(long), parameter false
from from_pe
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible MPP_TYPE_
l_size ! loop over number of fields ke do j
integer, parameter, public west
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
l_size ! loop over number of fields ke do je do ie to to_pe
integer nonblock_group_buffer_pos
integer, parameter m
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
type
Definition: c2f.py:15
integer, parameter, public global
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_GLOBAL_FIELD_2D_(domain, local, global, flags, position, tile_count, default_data) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::local(:,:) MPP_TYPE_, intent(out) ::global(:,:) integer, intent(in), optional ::flags integer, intent(in), optional ::position integer, intent(in), optional ::tile_count MPP_TYPE_, intent(in), optional ::default_data MPP_TYPE_ ::local3D(size(local, 1), size(local, 2), 1) MPP_TYPE_ ::global3D(size(global, 1), size(global, 2), 1) pointer(lptr, local3D) pointer(gptr, global3D) lptr=LOC(local) gptr=LOC(global) call mpp_global_field(domain, local3D, global3D, flags, position, tile_count, default_data) end subroutine MPP_GLOBAL_FIELD_2D_ subroutine MPP_GLOBAL_FIELD_3D_(domain, local, global, flags, position, tile_count, default_data)!get a global field from a local field!local field may be on compute OR data domain type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::local(:,:,:) MPP_TYPE_, intent(out) ::global(:,:,:) integer, intent(in), optional ::flags integer, intent(in), optional ::position integer, intent(in), optional ::tile_count MPP_TYPE_, intent(in), optional ::default_data integer ::ishift, jshift integer ::tile integer ::isize, jsize tile=1;if(PRESENT(tile_count)) tile=tile_count call mpp_get_domain_shift(domain, ishift, jshift, position) ! The alltoallw method requires that local and global be contiguous. ! We presume that `local` is contiguous if it matches the data domain;! `global` is presumed to always be contiguous. ! Ideally we would use the F2015 function IS_CONTIGUOUS() to validate ! contiguity, but it is not yet suppored in many compilers. ! Also worth noting that many of the nD-> conversion also assumes so there many be other issues here isize
integer nonblock_group_wait_clock
integer, private ie
Definition: fms_io.F90:494
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
integer error
Definition: mpp.F90:1310
l_size ! loop over number of fields ke do je do ie to je msgsize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer group_wait_clock
integer, parameter, public east
integer mpp_domains_stack_size
integer group_recv_clock
logical function received(this, seqno)
logical complete_group_update_on
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_flags
integer, dimension(:), allocatable request_recv
Definition: mpp.F90:1320
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
integer, parameter, public north
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
integer, parameter, public cyclic
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call MPI_TYPE_
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz jsize
l_size ! loop over number of fields ke do je do ie pos
integer, dimension(:), allocatable request_send
Definition: mpp.F90:1319
integer, private isd
Definition: fms_io.F90:495
integer nonblock_group_recv_clock
real per
Longitude of perihelion with respect to autumnal equinox in NH [degrees].
Definition: astronomy.F90:345
subroutine, public some(xmap, some_arr, grid_id)
Definition: xgrid.F90:3421
l_size ! loop over number of fields ke do je do ie to js
integer num_nonblock_group_update
integer, dimension(:), allocatable type_recv
Definition: mpp.F90:1322
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST begin