FV3 Bundle
test_mpp_io.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU Lesser General Public License
3 !*
4 !* This file is part of the GFDL Flexible Modeling System (FMS).
5 !*
6 !* FMS is free software: you can redistribute it and/or modify it under
7 !* the terms of the GNU Lesser General Public License as published by
8 !* the Free Software Foundation, either version 3 of the License, or (at
9 !* your option) any later version.
10 !*
11 !* FMS is distributed in the hope that it will be useful, but WITHOUT
12 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 !* for more details.
15 !*
16 !* You should have received a copy of the GNU Lesser General Public
17 !* License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18 !***********************************************************************
19 #ifdef test_mpp_io
20 program test
21 #include <fms_platform.h>
22 
23  use mpp_mod, only : mpp_init, mpp_pe, mpp_npes, mpp_root_pe, mpp_error, mpp_sync_self
24  use mpp_mod, only : fatal, note, mpp_chksum, mpp_debug, mpp_set_stack_size, mpp_clock_sync
25  use mpp_mod, only : mpp_sync, mpp_exit, mpp_clock_begin, mpp_clock_end, mpp_clock_id
26  use mpp_domains_mod, only : mpp_define_domains, mpp_domains_set_stack_size, domain1d, mpp_get_global_domain
27  use mpp_domains_mod, only : domain2d, mpp_define_layout, mpp_get_domain_components, mpp_define_mosaic
28  use mpp_domains_mod, only : mpp_get_memory_domain, mpp_get_compute_domain, mpp_domains_exit
29  use mpp_domains_mod, only : center, east, north, corner, mpp_get_data_domain
30  use mpp_domains_mod, only : mpp_define_io_domain, mpp_deallocate_domain
31  use mpp_io_mod, only : mpp_io_init, mpp_write_meta, axistype, fieldtype, atttype
32  use mpp_io_mod, only : mpp_rdonly, mpp_open, mpp_overwr, mpp_ascii, mpp_single
33  use mpp_io_mod, only : mpp_netcdf, mpp_multi, mpp_get_atts, mpp_write, mpp_close
34  use mpp_io_mod, only : mpp_get_info, mpp_get_axes, mpp_get_fields, mpp_get_times
35  use mpp_io_mod, only : mpp_read, mpp_io_exit, mpp_append
36 
37 #ifdef INTERNAL_FILE_NML
38  USE mpp_mod, ONLY: input_nml_file
39 #endif
40 
41  implicit none
42 
43 #ifdef use_netCDF
44 #include <netcdf.inc>
45 #endif
46 
47  !--- namelist definition
48  integer :: nx=128, ny=128, nz=40, nt=2
49  integer :: halo=2, stackmax=1500000, stackmaxd=500000
50  logical :: debug=.false.
51  character(len=64) :: file='test', iospec='-F cachea'
52  integer :: layout(2) = (/0,0/)
53  integer :: ntiles_x=1, ntiles_y=1 ! total number of tiles will be ntiles_x*ntiles_y,
54  ! the grid size for each tile will be (nx/ntiles_x, ny/ntiles_y)
55  ! set ntiles > 1 to test the efficiency of mpp_io.
56  integer :: io_layout(2) = (/0,0/) ! set io_layout to divide each tile into io_layout(1)*io_layout(2)
57  ! group and write out data from the root pe of each group.
58  integer :: pack_size = 1
59 
60  namelist / test_mpp_io_nml / nx, ny, nz, nt, halo, stackmax, stackmaxd, debug, file, iospec, &
61  ntiles_x, ntiles_y, layout, io_layout
62 
63  integer :: pe, npes, io_status
64  type(domain2D) :: domain
65 
66  integer :: tks_per_sec
67  integer :: i,j,k, unit=7
68  integer :: id_single_tile_mult_file
69  integer :: id_mult_tile, id_single_tile_with_group, id_mult_tile_with_group
70  logical :: opened
71  character(len=64) :: varname
72 
73  real :: time
74  type(axistype) :: x, y, z, t
75  type(fieldtype) :: f
76  type(domain1D) :: xdom, ydom
77  integer(LONG_KIND) :: rchk, chk
78  real(DOUBLE_KIND) :: doubledata = 0.0
79  real :: realarray(4)
80 
81  call mpp_init()
82  pe = mpp_pe()
83  npes = mpp_npes()
84 
85 #ifdef INTERNAL_FILE_NML
86  read (input_nml_file, test_mpp_io_nml, iostat=io_status)
87 #else
88  do
89  inquire( unit=unit, opened=opened )
90  if( .NOT.opened )exit
91  unit = unit + 1
92  if( unit.EQ.100 )call mpp_error( fatal, 'Unable to locate unit number.' )
93  end do
94  open( unit=unit, file='input.nml', iostat=io_status)
95  read( unit,test_mpp_io_nml, iostat=io_status )
96  close(unit)
97 #endif
98 
99  if (io_status > 0) then
100  call mpp_error(fatal,'=>test_mpp_io: Error reading input.nml')
101  endif
102 
103 
104  call system_clock( count_rate=tks_per_sec )
105  if( debug )then
106  call mpp_io_init(mpp_debug)
107  else
108  call mpp_io_init()
109  end if
110  call mpp_set_stack_size(stackmax)
111  call mpp_domains_set_stack_size(stackmaxd)
112 
113  if( pe.EQ.mpp_root_pe() )then
114  print '(a,6i6)', 'npes, nx, ny, nz, nt, halo=', npes, nx, ny, nz, nt, halo
115  print *, 'Using NEW domaintypes and calls...'
116  end if
117 
118  write( file,'(a,i3.3)' )trim(file), npes
119 
120 ! determine the pack_size
121  pack_size = size(transfer(doubledata, realarray))
122  if( pack_size .NE. 1 .AND. pack_size .NE. 2) call mpp_error(fatal,'test_mpp_io: pack_size should be 1 or 2')
123 
124  call test_netcdf_io_append()
125 
126  if(ntiles_x == 1 .and. ntiles_y == 1 .and. io_layout(1) == 1 .and. io_layout(2) == 1) then
127  call test_netcdf_io('Simple')
128  call test_netcdf_io('Symmetry_T_cell')
129  call test_netcdf_io('Symmetry_E_cell')
130  call test_netcdf_io('Symmetry_N_cell')
131  call test_netcdf_io('Symmetry_C_cell')
132  call test_netcdf_io('Symmetry_T_cell_memory')
133  call test_netcdf_io('Symmetry_E_cell_memory')
134  call test_netcdf_io('Symmetry_N_cell_memory')
135  call test_netcdf_io('Symmetry_C_cell_memory')
136  else
137  if(io_layout(1) <1 .OR. io_layout(2) <1) call mpp_error(fatal, &
138  "program test_mpp_io: both elements of test_mpp_io_nml variable io_layout must be positive integer")
139  if(ntiles_x <1 .OR. ntiles_y <1) call mpp_error(fatal, &
140  "program test_mpp_io: mpp_io_nml variable ntiles_x and ntiles_y must be positive integer")
141  if(mod(nx, ntiles_x) .NE. 0) call mpp_error(fatal, &
142  "program test_mpp_io: nx must be divided by ntiles_x")
143  if(mod(ny, ntiles_y) .NE. 0) call mpp_error(fatal, &
144  "program test_mpp_io: ny must be divided by ntiles_y")
145  if(mod(npes, ntiles_x*ntiles_y) .NE. 0) call mpp_error(fatal, &
146  "program test_mpp_io: npes should be divided by ntiles = ntiles_x*ntiles_y ")
147  if(layout(1) * layout(2) .NE. npes) call mpp_error(fatal, &
148  "program test_mpp_io: npes should equal to layout(1)*layout(2)" )
149  if(mod(layout(1), io_layout(1)) .NE. 0 ) call mpp_error(fatal, &
150  "program test_mpp_io: layout(1) must be divided by io_layout(1)")
151  if(mod(layout(2), io_layout(2)) .NE. 0 )call mpp_error(fatal, &
152  "program test_mpp_io: layout(2) must be divided by io_layout(2)")
153 
154  id_single_tile_mult_file = mpp_clock_id('Single Tile Multiple File', flags=mpp_clock_sync)
155  call mpp_clock_begin(id_single_tile_mult_file)
156  call test_netcdf_io_mosaic('Single_tile_mult_file', layout, 1, 1, (/1,1/) )
157  call mpp_clock_end(id_single_tile_mult_file)
158 
159  if(io_layout(1) >1 .OR. io_layout(2) > 1) then
160  id_single_tile_with_group = mpp_clock_id('Single Tile With Group', flags=mpp_clock_sync)
161  call mpp_clock_begin(id_single_tile_with_group)
162  call test_netcdf_io_mosaic('Single_tile_with_group', layout, 1, 1, io_layout)
163  call mpp_clock_end(id_single_tile_with_group)
164  endif
165 
166  id_mult_tile = mpp_clock_id('Multiple Tile', flags=mpp_clock_sync)
167  call mpp_clock_begin(id_mult_tile)
168  if(ntiles_x > 1 .OR. ntiles_y > 1) then
169  call test_netcdf_io_mosaic('Mult_tile', layout, ntiles_x, ntiles_y, (/1,1/))
170  else
171  call test_netcdf_io_mosaic('Mult_tile', layout, io_layout(1), io_layout(2), (/1,1/) )
172  endif
173  call mpp_clock_end(id_mult_tile)
174 
175  if( (io_layout(1) >1 .OR. io_layout(2) > 1) .AND. (ntiles_x >1 .OR. ntiles_y > 1) ) then
176  id_mult_tile_with_group = mpp_clock_id('Multiple Tile With Group', flags=mpp_clock_sync)
177  call mpp_clock_begin(id_mult_tile_with_group)
178  call test_netcdf_io_mosaic('Mult_tile_with_group', layout, ntiles_x, ntiles_y, io_layout)
179  call mpp_clock_end(id_mult_tile_with_group)
180  endif
181  endif
182 
183  call mpp_io_exit()
184  call mpp_domains_exit()
185  call mpp_exit()
186 
187  contains
188 
189  !------------------------------------------------------------------
190 
191  subroutine test_netcdf_io(type)
192  character(len=*), intent(in) :: type
193  integer :: ndim, nvar, natt, ntime
194  integer :: is, ie, js, je, isd, ied, jsd, jed, ism, iem, jsm, jem
195  integer :: position, msize(2), ioff, joff, nxg, nyg
196  logical :: symmetry
197  type(atttype), allocatable :: atts(:)
198  type(fieldtype), allocatable :: vars(:)
199  type(axistype), allocatable :: axes(:)
200  real, allocatable :: tstamp(:)
201  real, dimension(:,:,:), allocatable :: data, gdata, rdata
202 
203  !--- determine the shift and symmetry according to type,
204  select case(type)
205  case('Simple')
206  position = center; symmetry = .false.
207  case('Symmetry_T_cell', 'Symmetry_T_cell_memory')
208  position = center; symmetry = .true.
209  case('Symmetry_E_cell', 'Symmetry_E_cell_memory')
210  position = east; symmetry = .true.
211  case('Symmetry_N_cell', 'Symmetry_N_cell_memory')
212  position = north; symmetry = .true.
213  case('Symmetry_C_cell', 'Symmetry_C_cell_memory')
214  position = corner; symmetry = .true.
215  case default
216  call mpp_error(fatal, "type = "//type//" is not a valid test type")
217  end select
218 
219 !define domain decomposition
220  call mpp_define_layout( (/1,nx,1,ny/), npes, layout )
221  if(index(type,"memory") == 0) then
222  call mpp_define_domains( (/1,nx,1,ny/), layout, domain, xhalo=halo, yhalo=halo, symmetry = symmetry )
223  else ! on memory domain
224  msize(1) = nx/layout(1) + 2*halo + 2
225  msize(2) = ny/layout(2) + 2*halo + 2
226  call mpp_define_domains( (/1,nx,1,ny/), layout, domain, xhalo=halo, yhalo=halo, symmetry = symmetry, &
227  memory_size = msize )
228  end if
229 
230  call mpp_get_compute_domain( domain, is, ie, js, je, position=position )
231  call mpp_get_data_domain ( domain, isd, ied, jsd, jed, position=position )
232  call mpp_get_memory_domain ( domain, ism, iem, jsm, jem, position=position )
233  call mpp_get_global_domain ( domain, xsize=nxg, ysize=nyg, position=position )
234  call mpp_get_domain_components( domain, xdom, ydom )
235 
236 !define global data array
237  allocate( gdata(nxg,nyg,nz) )
238  gdata = 0.
239  do k = 1,nz
240  do j = 1,nyg
241  do i = 1,nxg
242  gdata(i,j,k) = k + i*1e-3 + j*1e-6
243  end do
244  end do
245  end do
246 
247  ioff = ism - isd; joff = jsm - jsd
248  allocate( data(ism:iem,jsm:jem,nz) )
249  data = 0
250  data(is+ioff:ie+ioff,js+joff:je+joff,:) = gdata(is:ie,js:je,:)
251 
252 !tests
253 
254 !sequential write: single-threaded formatted: only if small
255  if( nx*ny*nz*nt.LT.1000 .AND. index(type,"memory") .NE. 0 )then
256  if( pe.EQ.mpp_root_pe() )print *, 'sequential write: single-threaded formatted'
257 !here the only test is a successful write: please look at test.txt for verification.
258  call mpp_open( unit, trim(file)//'s.txt', action=mpp_overwr, form=mpp_ascii, threading=mpp_single )
259  call mpp_write_meta( unit, x, 'X', 'km', 'X distance', domain=xdom, data=(/(i-1.,i=1,nxg)/) )
260  call mpp_write_meta( unit, y, 'Y', 'km', 'Y distance', domain=ydom, data=(/(i-1.,i=1,nyg)/) )
261  call mpp_write_meta( unit, z, 'Z', 'km', 'Z distance', data=(/(i-1.,i=1,nz)/) )
262  call mpp_write_meta( unit, t, 'T', 'sec', 'Time' )
263  call mpp_write_meta( unit, f, (/x,y,z,t/), 'Data', 'metres', 'Random data' )
264  call mpp_write( unit, x )
265  call mpp_write( unit, y )
266  call mpp_write( unit, z )
267  do i = 0,nt-1
268  time = i*10.
269  call mpp_write( unit, f, domain, data, time )
270  end do
271  call mpp_close(unit)
272  end if
273 
274 !netCDF distributed write
275  if( pe.EQ.mpp_root_pe() )print *, 'netCDF distributed write'
276  call mpp_open( unit, trim(type)//"_"//trim(file)//'d', action=mpp_overwr, &
277  form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi )
278  call mpp_write_meta( unit, x, 'X', 'km', 'X distance', 'X', domain=xdom, data=(/(i-1.,i=1,nxg)/) )
279  call mpp_write_meta( unit, y, 'Y', 'km', 'Y distance', 'Y', domain=ydom, data=(/(i-1.,i=1,nyg)/) )
280  call mpp_write_meta( unit, z, 'Z', 'km', 'Z distance', 'Z', data=(/(i-1.,i=1,nz)/) )
281  call mpp_write_meta( unit, t, 'T', 'sec', 'Time', 'T' )
282  call mpp_write_meta( unit, f, (/x,y,z,t/), 'Data', 'metres', 'Random data', pack=pack_size )
283  call mpp_write( unit, x )
284  call mpp_write( unit, y )
285  call mpp_write( unit, z )
286  do i = 0,nt-1
287  time = i*10.
288  call mpp_write( unit, f, domain, data, time )
289  end do
290  call mpp_close(unit)
291 
292 !netCDF single-threaded write
293  if( pe.EQ.mpp_root_pe() )print *, 'netCDF single-threaded write'
294  call mpp_open( unit, trim(type)//"_"//trim(file)//'s', action=mpp_overwr, form=mpp_netcdf, threading=mpp_single )
295 
296  call mpp_write_meta( unit, x, 'X', 'km', 'X distance', 'X', domain=xdom, data=(/(i-1.,i=1,nxg)/) )
297 
298  call mpp_write_meta( unit, y, 'Y', 'km', 'Y distance', 'Y', domain=ydom, data=(/(i-1.,i=1,nyg)/) )
299  call mpp_write_meta( unit, z, 'Z', 'km', 'Z distance', 'Z', data=(/(i-1.,i=1,nz)/) )
300  call mpp_write_meta( unit, t, 'T', 'sec', 'Time', 'T' )
301  call mpp_write_meta( unit, f, (/x,y,z,t/), 'Data', 'metres', 'Random data', pack=pack_size )
302 
303  call mpp_write( unit, x )
304  call mpp_write( unit, y )
305  call mpp_write( unit, z )
306 
307  do i = 0,nt-1
308  time = i*10.
309  call mpp_write( unit, f, domain, data, time)
310  end do
311  call mpp_close(unit)
312  allocate( rdata(is:ie,js:je,nz) )
313 
314 !netCDF multi-threaded read
315  if( pe.EQ.mpp_root_pe() )print *, 'netCDF multi-threaded read'
316  call mpp_sync()
317  call mpp_open( unit, trim(type)//"_"//trim(file)//'s', action=mpp_rdonly, &
318  form=mpp_netcdf, threading=mpp_multi, fileset=mpp_single )
319  call mpp_get_info( unit, ndim, nvar, natt, ntime )
320  allocate( atts(natt) )
321  allocate( axes(ndim) )
322  allocate( vars(nvar) )
323  allocate( tstamp(ntime) )
324  call mpp_get_atts ( unit, atts(:) )
325  call mpp_get_axes ( unit, axes(:) )
326  call mpp_get_fields ( unit, vars(:) )
327  call mpp_get_times( unit, tstamp(:) )
328 
329  call mpp_get_atts(vars(1),name=varname)
330 
331  if( varname.NE.'Data' )call mpp_error( fatal, 'File being read is not the expected one.' )
332  call mpp_read( unit, vars(1), domain, rdata, 1 )
333  rchk = mpp_chksum(rdata(is:ie,js:je,:))
334  chk = mpp_chksum( data(is+ioff:ie+ioff,js+joff:je+joff,:))
335  if( pe.EQ.mpp_root_pe() )print '(a,2z18)', trim(type)//' checksum=', rchk, chk
336  if( rchk == chk ) then
337  if( pe.EQ.mpp_root_pe() )call mpp_error( note, trim(type)//': single-fileset: data comparison are OK.' )
338  else
339  call mpp_error( fatal, 'Checksum error on multi-threaded/single-fileset netCDF read for type ' &
340  //trim(type) )
341  end if
342 
343  deallocate( atts, axes, vars, tstamp )
344 
345 !netCDF distributed read
346  if( pe.EQ.mpp_root_pe() )print *, 'netCDF multi-threaded read'
347  call mpp_sync() !wait for previous write to complete
348  call mpp_open( unit, trim(type)//"_"//trim(file)//'d', action=mpp_rdonly, &
349  form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi )
350  call mpp_get_info( unit, ndim, nvar, natt, ntime )
351  allocate( atts(natt) )
352  allocate( axes(ndim) )
353  allocate( vars(nvar) )
354  allocate( tstamp(ntime) )
355  call mpp_get_atts ( unit, atts(:) )
356  call mpp_get_axes ( unit, axes(:) )
357  call mpp_get_fields ( unit, vars(:) )
358  call mpp_get_times( unit, tstamp(:) )
359 
360  call mpp_get_atts(vars(1),name=varname)
361  rdata = 0
362 
363  if( varname.NE.'Data' )call mpp_error( fatal, 'File being read is not the expected one.' )
364 
365  call mpp_read( unit, vars(1), domain, rdata, 1 )
366 
367  rchk = mpp_chksum(rdata(is:ie,js:je,:))
368  chk = mpp_chksum( data(is+ioff:ie+ioff,js+joff:je+joff,:))
369  if( pe.EQ.mpp_root_pe() )print '(a,2z18)', trim(type)//' checksum=', rchk, chk
370  if( rchk == chk ) then
371  if( pe.EQ.mpp_root_pe() )call mpp_error( note, trim(type)//': multi-fileset: data comparison are OK.' )
372  else
373  call mpp_error( fatal, 'Checksum error on multi-threaded/multi-fileset netCDF read for type ' &
374  //trim(type) )
375  end if
376 
377  deallocate( atts, axes, vars, tstamp )
378 
379  deallocate( rdata, gdata, data)
380 
381  end subroutine test_netcdf_io
382 
383  subroutine test_netcdf_io_append
384  integer :: ndim, nvar, natt, ntime
385  integer :: is, ie, js, je, isd, ied, jsd, jed, ism, iem, jsm, jem
386  integer :: position, msize(2), ioff, joff, nxg, nyg
387  logical :: symmetry
388  real :: data
389  type(fieldtype), allocatable :: vars(:)
390 
391 !netCDF distributed write
392  if( pe.EQ.mpp_root_pe() )print *, 'netCDF single thread write'
393  call mpp_open( unit, "timestats.nc", action=mpp_overwr, &
394  form=mpp_netcdf, threading=mpp_single, fileset=mpp_single )
395  call mpp_write_meta( unit, t, 'T', 'sec', 'Time', 'T' )
396  call mpp_write_meta( unit, f, (/t/), 'Data', 'metres', 'Random data', pack=pack_size )
397  do i = 0,nt-1
398  time = i*1.
399  data = i*3.0
400  call mpp_write( unit, f, data, time )
401  end do
402  call mpp_close(unit)
403 
404 !--- append
405  if( pe.EQ.mpp_root_pe() )print *, 'netCDF single thread append'
406  call mpp_open( unit, "timestats.nc", action=mpp_append, &
407  form=mpp_netcdf, threading=mpp_single, fileset=mpp_single )
408  allocate(vars(1))
409  if(pe.EQ.mpp_root_pe() ) then
410  call mpp_get_info(unit, ndim, nvar, natt, ntime)
411 
412  if (nvar /= 1) then
413  call mpp_error(fatal, "test_netcdf_io_append: nvar should be 1")
414  endif
415  call mpp_get_fields(unit,vars(1:nvar))
416  endif
417 
418  do i = nt,2*nt-1
419  time = i*1.
420  data = i*3.0
421  call mpp_write( unit, vars(1), data, time )
422  end do
423  call mpp_close(unit)
424  deallocate(vars)
425 
426 
427  end subroutine test_netcdf_io_append
428 
429 
430  subroutine test_netcdf_io_mosaic(type, layout, ntiles_x, ntiles_y, io_layout)
431  character(len=*), intent(in) :: type
432  integer, intent(in) :: layout(:)
433  integer, intent(in) :: io_layout(:)
434  integer, intent(in) :: ntiles_x, ntiles_y
435 
436  integer :: ndim, nvar, natt, ntime
437  integer :: isc, iec, jsc, jec, nlon, nlat, n, i, j
438  integer :: my_tile, ncontacts, npes_per_tile, ntiles
439  integer, dimension(:), allocatable :: tile1, istart1, iend1, jstart1, jend1
440  integer, dimension(:), allocatable :: tile2, istart2, iend2, jstart2, jend2
441  integer, dimension(:), allocatable :: pe_start, pe_end
442  integer, dimension(:,:), allocatable :: layout2D, global_indices
443  character(len=64) :: output_file
444  logical :: is_root_pe
445  real, dimension(:,:,:), allocatable :: data, rdata
446  type(fieldtype), save :: vars(1)
447  integer :: id_clock_read, id_clock_write
448 
449  ! first get number of tiles of this mosaic. when there is one tile,
450  ! the file will be read/write using distributed file.
451  ! when there is more than one tile, single fileset will be used
452  npes = mpp_npes()
453 
454  id_clock_read = mpp_clock_id(trim(type)//" read", flags=mpp_clock_sync)
455  id_clock_write = mpp_clock_id(trim(type)//" write", flags=mpp_clock_sync)
456 
457  ncontacts = 0
458  ntiles = ntiles_x*ntiles_y
459 
460  npes_per_tile = npes/ntiles
461  my_tile = mpp_pe()/npes_per_tile + 1
462  is_root_pe = .false.
463  if(mpp_pe() == (my_tile-1)*npes_per_tile ) is_root_pe = .true.
464 
465  allocate(layout2d(2,ntiles), global_indices(4,ntiles), pe_start(ntiles), pe_end(ntiles) )
466  !--- for simplify purpose, we assume all the tiles have the same size.
467  do n = 1, ntiles
468  pe_start(n) = (n-1)*npes_per_tile
469  pe_end(n) = n*npes_per_tile-1
470  end do
471  if(ntiles>1) then
472  nlon = nx/ntiles_x
473  nlat = ny/ntiles_y
474  else
475  nlon = nx
476  nlat = ny
477  endif
478 
479  do n = 1, ntiles
480  global_indices(:,n) = (/1,nlon,1,nlat/)
481  layout2d(1,n) = layout(1)/ntiles_x
482  layout2d(2,n) = layout(2)/ntiles_y
483  end do
484 
485  call mpp_define_mosaic(global_indices, layout2d, domain, ntiles, ncontacts, tile1, tile2, &
486  istart1, iend1, jstart1, jend1, istart2, iend2, jstart2, jend2, pe_start, pe_end, &
487  name = type)
488  call mpp_get_compute_domain( domain, isc, iec, jsc, jec )
489  call mpp_get_domain_components(domain, xdom, ydom)
490  allocate( data (isc:iec,jsc:jec,nz) )
491  allocate( rdata(isc:iec,jsc:jec,nz) )
492  do k = 1,nz
493  do j = jsc, jec
494  do i = isc, iec
495  data(i,j,k) = k + i*1e-3 + j*1e-6
496  enddo
497  enddo
498  enddo
499 
500  !--- netcdf distribute write if ntiles = 1, otherwise single-thread write
501  output_file = type
502  select case(type)
503  case("Single_tile_single_file")
504  call mpp_open( unit, output_file, action=mpp_overwr, form=mpp_netcdf, threading=mpp_single, fileset=mpp_single )
505  case("Single_tile_mult_file")
506  call mpp_open( unit, output_file, action=mpp_overwr, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi )
507  case("Mult_tile")
508  write(output_file, '(a,I4.4)') type//'.tile', my_tile
509  call mpp_open( unit, output_file, action=mpp_overwr, form=mpp_netcdf, threading=mpp_single, is_root_pe=is_root_pe )
510  case("Single_tile_with_group")
511  call mpp_define_io_domain(domain, io_layout)
512  call mpp_open( unit, output_file, action=mpp_overwr, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi, domain=domain)
513  case("Mult_tile_with_group")
514  write(output_file, '(a,I4.4)') type//'.tile', my_tile
515  call mpp_define_io_domain(domain, io_layout)
516  call mpp_open( unit, output_file, action=mpp_overwr, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi, domain=domain)
517 
518  case default
519  call mpp_error(fatal, "program test_mpp_io: invaid value of type="//type)
520  end select
521 
522  call mpp_write_meta( unit, x, 'X', 'km', 'X distance', 'X', domain=xdom, data=(/(i-1.,i=1,nlon)/) )
523  call mpp_write_meta( unit, y, 'Y', 'km', 'Y distance', 'Y', domain=ydom, data=(/(i-1.,i=1,nlat)/) )
524  call mpp_write_meta( unit, z, 'Z', 'km', 'Z distance', 'Z', data=(/(i-1.,i=1,nz)/) )
525  call mpp_write_meta( unit, t, 'T', 'sec', 'Time', 'T' )
526  call mpp_write_meta( unit, f, (/x,y,z,t/), 'Data', 'metres', 'Random data', pack=pack_size )
527  call mpp_write( unit, x )
528  call mpp_write( unit, y )
529  call mpp_write( unit, z )
530  call mpp_clock_begin(id_clock_write)
531  do i = 0,nt-1
532  time = i*10.
533  call mpp_write( unit, f, domain, data, time )
534  end do
535  call mpp_clock_end(id_clock_write)
536  call mpp_close(unit)
537 
538  call mpp_sync() !wait for previous write to complete
539 
540  select case(type)
541  case("Single_tile_single_file")
542  call mpp_open( unit, output_file, action=mpp_rdonly, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_single )
543  case("Single_tile_mult_file")
544  call mpp_open( unit, output_file, action=mpp_rdonly, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi )
545  case("Mult_tile")
546  call mpp_open( unit, output_file, action=mpp_rdonly, form=mpp_netcdf, threading=mpp_multi, &
547  fileset=mpp_single, is_root_pe=is_root_pe )
548  case("Single_tile_with_group", "Mult_tile_with_group")
549  call mpp_open( unit, output_file, action=mpp_rdonly, form=mpp_netcdf, threading=mpp_multi, fileset=mpp_multi, domain=domain)
550  case default
551  call mpp_error(fatal, "program test_mpp_io: invaid value of type="//type)
552  end select
553 
554  call mpp_get_info( unit, ndim, nvar, natt, ntime )
555  call mpp_get_fields ( unit, vars(:) )
556  call mpp_get_atts(vars(1),name=varname)
557 
558  if( varname.NE.'Data' )call mpp_error( fatal, 'File being read is not the expected one.' )
559  call mpp_clock_begin(id_clock_read)
560  do i = 0,nt-1
561  call mpp_read( unit, vars(1), domain, rdata, 1 )
562  enddo
563  call mpp_clock_end(id_clock_read)
564  rchk = mpp_chksum(rdata)
565  chk = mpp_chksum( data)
566  if( pe.EQ.mpp_root_pe() )print '(a,2z18)', trim(type)//' checksum=', rchk, chk
567  if( rchk == chk ) then
568  if( pe.EQ.mpp_root_pe() )call mpp_error( note, trim(type)//': data comparison are OK.' )
569  else
570  call mpp_error( fatal, 'Checksum error on netCDF read for type ' &
571  //trim(type) )
572  end if
573 
574 ! deallocate( vars)
575 
576  deallocate( rdata, data)
577  call mpp_deallocate_domain(domain)
578 
579  end subroutine test_netcdf_io_mosaic
580 
581 end program test
582 
583 #else
585 end module
586 #endif
Definition: mpp.F90:39
character(len=input_str_length), dimension(:), allocatable, target, public input_nml_file
Definition: mpp.F90:1378