FV3 Bundle
fv_nesting_tlm.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU General Public License *
3 !* This file is a part of fvGFS. *
4 !* *
5 !* fvGFS is free software; you can redistribute it and/or modify it *
6 !* and are expected to follow the terms of the GNU General Public *
7 !* License as published by the Free Software Foundation; either *
8 !* version 2 of the License, or (at your option) any later version. *
9 !* *
10 !* fvGFS is distributed in the hope that it will be useful, but *
11 !* WITHOUT ANY WARRANTY; without even the implied warranty of *
12 !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
13 !* General Public License for more details. *
14 !* *
15 !* For the full text of the GNU General Public License, *
16 !* write to: Free Software Foundation, Inc., *
17 !* 675 Mass Ave, Cambridge, MA 02139, USA. *
18 !* or see: http://www.gnu.org/licenses/gpl.html *
19 !***********************************************************************
21 
26  use fv_sg_nlm_mod, only: neg_adj3
28  use mpp_domains_mod, only: dgrid_ne, domain2d
31  use mpp_mod, only: mpp_sync_self, mpp_sync, mpp_send, mpp_recv, mpp_error, fatal
32  use mpp_domains_mod, only: mpp_global_sum, bitwise_efp_sum, bitwise_exact_sum
35  use fv_mp_nlm_mod, only: is, ie, js, je, isd, ied, jsd, jed, isc, iec, jsc, jec
39  use init_hydro_nlm_mod, only: p_var
41  use fv_mapz_nlm_mod, only: mappm
43  use fv_mp_nlm_mod, only: is_master
44  use fv_mp_nlm_mod, only: mp_reduce_sum
47 
48 implicit none
49  logical :: rf_initialized = .false.
50  logical :: bad_range
51  real, allocatable :: rf(:), rw(:)
52  integer :: kmax=1
53  !Arrays for global grid total energy, used for grid nesting
54  real, allocatable :: te_2d_coarse(:,:)
55  real, allocatable :: dp1_coarse(:,:,:)
56 
57  !For nested grid buffers
58  !Individual structures are allocated by nested_grid_BC_recv
60  type(fv_nest_bc_type_3d), allocatable:: q_buf(:)
61 !#ifdef USE_COND
62  real, dimension(:,:,:), allocatable, target :: dum_west, dum_east, dum_north, dum_south
63 !#endif
64 
65 private
68 
69 CONTAINS
70 ! Differentiation of setup_nested_grid_bcs in forward (tangent) mode:
71 ! variations of useful results: u v uc vc
72 ! with respect to varying inputs: u v
73 !!!! NOTE: Many of the routines here and in boundary.F90 have a lot of
74 !!!! redundant code, which could be cleaned up and simplified.
75  SUBROUTINE setup_nested_grid_bcs_tlm(npx, npy, npz, zvir, ncnst, u, &
76 & u_tl, v, v_tl, w, pt, delp, delz, q, uc, uc_tl, vc, vc_tl, pkz, &
77 & nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, &
78 & nest_timestep, tracer_nest_timestep, domain, bd, nwat)
79  IMPLICIT NONE
80  TYPE(fv_grid_bounds_type), INTENT(IN) :: bd
81  REAL, INTENT(IN) :: zvir
82  INTEGER, INTENT(IN) :: npx, npy, npz
83  INTEGER, INTENT(IN) :: ncnst, ng, nwat
84  LOGICAL, INTENT(IN) :: inline_q, make_nh, nested
85 ! D grid zonal wind (m/s)
86  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz), INTENT(INOUT) &
87 & :: u
88  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz), INTENT(INOUT) &
89 & :: u_tl
90 ! D grid meridional wind (m/s)
91  REAL, DIMENSION(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz), INTENT(INOUT) &
92 & :: v
93  REAL, DIMENSION(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz), INTENT(INOUT) &
94 & :: v_tl
95 ! W (m/s)
96  REAL, INTENT(INOUT) :: w(bd%isd:, bd%jsd:, :)
97 ! temperature (K)
98  REAL, INTENT(INOUT) :: pt(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
99 ! pressure thickness (pascal)
100  REAL, INTENT(INOUT) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
101 ! height thickness (m)
102  REAL, INTENT(INOUT) :: delz(bd%isd:, bd%jsd:, :)
103 ! specific humidity and constituents
104  REAL, INTENT(INOUT) :: q(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst)
105 ! (uc,vc) mostly used as the C grid winds
106  REAL, INTENT(INOUT) :: uc(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz)
107  REAL, INTENT(INOUT) :: uc_tl(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz)
108  REAL, INTENT(INOUT) :: vc(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz)
109  REAL, INTENT(INOUT) :: vc_tl(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz)
110 ! finite-volume mean pk
111  REAL, INTENT(INOUT) :: pkz(bd%is:bd%ie, bd%js:bd%je, npz)
112  INTEGER, INTENT(INOUT) :: nest_timestep, tracer_nest_timestep
113  TYPE(fv_grid_type), INTENT(INOUT) :: gridstruct
114  TYPE(fv_flags_type), INTENT(INOUT) :: flagstruct
115  TYPE(fv_nest_type), INTENT(INOUT), TARGET :: neststruct
116  TYPE(domain2d), INTENT(INOUT) :: domain
117  REAL :: divg(bd%isd:bd%ied+1, bd%jsd:bd%jed+1, npz)
118  REAL :: ua(bd%isd:bd%ied, bd%jsd:bd%jed)
119  REAL :: va(bd%isd:bd%ied, bd%jsd:bd%jed)
120  REAL :: pkz_coarse(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
121  INTEGER :: i, j, k, n, p, sphum
122  LOGICAL :: do_pd
123  TYPE(fv_nest_bc_type_3d) :: pkz_bc
124 !local pointers
125  LOGICAL, POINTER :: child_grids(:)
126  INTEGER :: is, ie, js, je
127  INTEGER :: isd, ied, jsd, jed
128  INTRINSIC any
129  INTRINSIC ALLOCATED
130  INTRINSIC SIZE
131  is = bd%is
132  ie = bd%ie
133  js = bd%js
134  je = bd%je
135  isd = bd%isd
136  ied = bd%ied
137  jsd = bd%jsd
138  jed = bd%jed
139  child_grids => neststruct%child_grids
140 !IF nested, set up nested grid BCs for time-interpolation
141 !(actually applying the BCs is done in dyn_core
142  nest_timestep = 0
143  IF (.NOT.inline_q) tracer_nest_timestep = 0
144  IF (neststruct%nested .AND. ((.NOT.neststruct%first_step) .OR. &
145 & make_nh)) THEN
146  do_pd = .true.
147  CALL set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
148  ELSE
149 !On first timestep the t0 BCs are not initialized and may contain garbage
150  do_pd = .false.
151  END IF
152 !compute uc/vc for nested-grid BCs
153 !!! CLEANUP: if we compute uc/vc here we don't need to do on the first call of c_sw, right?
154  IF (any(neststruct%child_grids)) THEN
155  CALL timing_on('COMM_TOTAL')
156 !!! CLEANUP: could we make this a non-blocking operation?
157 !!! Is this needed? it is on the initialization step.
158  CALL mpp_update_domains_tlm(u, u_tl, v, v_tl, domain, gridtype=&
159 & dgrid_ne, complete=.true.)
160  CALL timing_off('COMM_TOTAL')
161  uc_tl = 0.0
162  vc_tl = 0.0
163 !$OMP parallel do default(none) shared(isd,jsd,ied,jed,is,ie,js,je,npx,npy,npz, &
164 !$OMP gridstruct,flagstruct,bd,u,v,uc,vc,nested,divg) &
165 !$OMP private(ua,va)
166  DO k=1,npz
167  CALL d2c_setup_tlm(u(isd, jsd, k), u_tl(isd, jsd, k), v(isd, jsd&
168 & , k), v_tl(isd, jsd, k), ua, va, uc(isd, jsd, k), &
169 & uc_tl(isd, jsd, k), vc(isd, jsd, k), vc_tl(isd, jsd&
170 & , k), flagstruct%nord .GT. 0, isd, ied, jsd, jed, &
171 & is, ie, js, je, npx, npy, gridstruct%grid_type, &
172 & gridstruct%nested, gridstruct%se_corner, gridstruct&
173 & %sw_corner, gridstruct%ne_corner, gridstruct%&
174 & nw_corner, gridstruct%rsin_u, gridstruct%rsin_v, &
175 & gridstruct%cosa_s, gridstruct%rsin2)
176  IF (nested) THEN
177  CALL divergence_corner_nest(u(isd, jsd, k), v(isd, jsd, k), ua&
178 & , va, divg(isd, jsd, k), gridstruct, &
179 & flagstruct, bd)
180  ELSE
181  CALL divergence_corner(u(isd, jsd, k), v(isd, jsd, k), ua, va&
182 & , divg(isd, jsd, k), gridstruct, flagstruct, &
183 & bd)
184  END IF
185  END DO
186  ELSE
187  uc_tl = 0.0
188  vc_tl = 0.0
189  END IF
190  IF (flagstruct%hydrostatic) THEN
191 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pkz,pkz_coarse)
192  DO k=1,npz
193  DO j=js,je
194  DO i=is,ie
195  pkz_coarse(i, j, k) = pkz(i, j, k)
196  END DO
197  END DO
198  END DO
199  END IF
200 !! Nested grid: receive from parent grid
201  IF (neststruct%nested) THEN
202  IF (.NOT.ALLOCATED(q_buf)) THEN
203  ALLOCATE(q_buf(ncnst))
204  END IF
205  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
206 & delp_buf)
207  DO n=1,ncnst
208  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
209 & q_buf(n))
210  END DO
211  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
212 & pt_buf)
213  IF (flagstruct%hydrostatic) THEN
214  CALL allocate_fv_nest_bc_type(pkz_bc, is, ie, js, je, isd, ied, &
215 & jsd, jed, npx, npy, npz, ng, 0, 0, 0, &
216 & .false.)
217  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
218 & pkz_buf)
219  ELSE
220  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
221 & w_buf)
222  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
223 & delz_buf)
224  END IF
225  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
226 & u_buf)
227  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
228 & vc_buf)
229  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
230 & v_buf)
231  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
232 & uc_buf)
233  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 1, npz, bd, &
234 & divg_buf)
235  END IF
236 !! Coarse grid: send to child grids
237  DO p=1,SIZE(child_grids)
238  IF (child_grids(p)) THEN
239  CALL nested_grid_bc_send(delp, neststruct%nest_domain_all(p), 0&
240 & , 0)
241  DO n=1,ncnst
242  CALL nested_grid_bc_send(q(:, :, :, n), neststruct%&
243 & nest_domain_all(p), 0, 0)
244  END DO
245  CALL nested_grid_bc_send(pt, neststruct%nest_domain_all(p), 0, 0&
246 & )
247  IF (flagstruct%hydrostatic) THEN
248 !Working with PKZ is more complicated since it is only defined on the interior of the grid.
249  CALL nested_grid_bc_send(pkz_coarse, neststruct%&
250 & nest_domain_all(p), 0, 0)
251  ELSE
252  CALL nested_grid_bc_send(w, neststruct%nest_domain_all(p), 0, &
253 & 0)
254  CALL nested_grid_bc_send(delz, neststruct%nest_domain_all(p), &
255 & 0, 0)
256  END IF
257  CALL nested_grid_bc_send(u, neststruct%nest_domain_all(p), 0, 1)
258  CALL nested_grid_bc_send(vc, neststruct%nest_domain_all(p), 0, 1&
259 & )
260  CALL nested_grid_bc_send(v, neststruct%nest_domain_all(p), 1, 0)
261  CALL nested_grid_bc_send(uc, neststruct%nest_domain_all(p), 1, 0&
262 & )
263  CALL nested_grid_bc_send(divg, neststruct%nest_domain_all(p), 1&
264 & , 1)
265  END IF
266  END DO
267 !Nested grid: do computations
268  IF (nested) THEN
269  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
270 & ind_h, neststruct%wt_h, 0, 0, npx, npy, &
271 & npz, bd, neststruct%delp_bc, delp_buf, &
272 & do_pd)
273  DO n=1,ncnst
274  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
275 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
276 & , npz, bd, neststruct%q_bc(n), q_buf(n)&
277 & , do_pd)
278  END DO
279  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
280 & ind_h, neststruct%wt_h, 0, 0, npx, npy, &
281 & npz, bd, neststruct%pt_bc, pt_buf)
282  sphum = get_tracer_index(model_atmos, 'sphum')
283  IF (flagstruct%hydrostatic) THEN
284  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
285 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
286 & , npz, bd, pkz_bc, pkz_buf)
287  CALL setup_pt_bc(neststruct%pt_bc, pkz_bc, neststruct%q_bc(sphum&
288 & ), npx, npy, npz, zvir, bd)
289  ELSE
290  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
291 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
292 & , npz, bd, neststruct%w_bc, w_buf)
293  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
294 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
295 & , npz, bd, neststruct%delz_bc, delz_buf)
296 !Need a negative-definite method?
297  CALL setup_pt_nh_bc(neststruct%pt_bc, neststruct%delp_bc, &
298 & neststruct%delz_bc, neststruct%q_bc(sphum), &
299 & neststruct%q_bc, ncnst, npx, npy, npz, zvir, bd)
300  END IF
301  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
302 & ind_u, neststruct%wt_u, 0, 1, npx, npy, &
303 & npz, bd, neststruct%u_bc, u_buf)
304  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
305 & ind_u, neststruct%wt_u, 0, 1, npx, npy, &
306 & npz, bd, neststruct%vc_bc, vc_buf)
307  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
308 & ind_v, neststruct%wt_v, 1, 0, npx, npy, &
309 & npz, bd, neststruct%v_bc, v_buf)
310  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
311 & ind_v, neststruct%wt_v, 1, 0, npx, npy, &
312 & npz, bd, neststruct%uc_bc, uc_buf)
313  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
314 & ind_b, neststruct%wt_b, 1, 1, npx, npy, &
315 & npz, bd, neststruct%divg_bc, divg_buf)
316  END IF
317  IF (neststruct%first_step) THEN
318  IF (neststruct%nested) CALL set_bcs_t0(ncnst, flagstruct%&
319 & hydrostatic, neststruct)
320  neststruct%first_step = .false.
321  IF (.NOT.flagstruct%hydrostatic) flagstruct%make_nh = .false.
322  ELSE IF (flagstruct%make_nh) THEN
323  IF (neststruct%nested) CALL set_nh_bcs_t0(neststruct)
324  flagstruct%make_nh = .false.
325  END IF
326 !Unnecessary?
327 !!$ if ( neststruct%nested .and. .not. neststruct%divg_BC%initialized) then
328 !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
329 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
330 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
331 !!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
332 !!$ neststruct%divg_BC%initialized = .true.
333 !!$ endif
334  CALL mpp_sync_self()
335  END SUBROUTINE setup_nested_grid_bcs_tlm
336 !!!! NOTE: Many of the routines here and in boundary.F90 have a lot of
337 !!!! redundant code, which could be cleaned up and simplified.
338  SUBROUTINE setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, &
339 & pt, delp, delz, q, uc, vc, pkz, nested, inline_q, make_nh, ng, &
340 & gridstruct, flagstruct, neststruct, nest_timestep, &
341 & tracer_nest_timestep, domain, bd, nwat)
342  IMPLICIT NONE
343  TYPE(fv_grid_bounds_type), INTENT(IN) :: bd
344  REAL, INTENT(IN) :: zvir
345  INTEGER, INTENT(IN) :: npx, npy, npz
346  INTEGER, INTENT(IN) :: ncnst, ng, nwat
347  LOGICAL, INTENT(IN) :: inline_q, make_nh, nested
348 ! D grid zonal wind (m/s)
349  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz), INTENT(INOUT) &
350 & :: u
351 ! D grid meridional wind (m/s)
352  REAL, DIMENSION(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz), INTENT(INOUT) &
353 & :: v
354 ! W (m/s)
355  REAL, INTENT(INOUT) :: w(bd%isd:, bd%jsd:, :)
356 ! temperature (K)
357  REAL, INTENT(INOUT) :: pt(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
358 ! pressure thickness (pascal)
359  REAL, INTENT(INOUT) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
360 ! height thickness (m)
361  REAL, INTENT(INOUT) :: delz(bd%isd:, bd%jsd:, :)
362 ! specific humidity and constituents
363  REAL, INTENT(INOUT) :: q(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst)
364 ! (uc,vc) mostly used as the C grid winds
365  REAL, INTENT(INOUT) :: uc(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz)
366  REAL, INTENT(INOUT) :: vc(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz)
367 ! finite-volume mean pk
368  REAL, INTENT(INOUT) :: pkz(bd%is:bd%ie, bd%js:bd%je, npz)
369  INTEGER, INTENT(INOUT) :: nest_timestep, tracer_nest_timestep
370  TYPE(fv_grid_type), INTENT(INOUT) :: gridstruct
371  TYPE(fv_flags_type), INTENT(INOUT) :: flagstruct
372  TYPE(fv_nest_type), INTENT(INOUT), TARGET :: neststruct
373  TYPE(domain2d), INTENT(INOUT) :: domain
374  REAL :: divg(bd%isd:bd%ied+1, bd%jsd:bd%jed+1, npz)
375  REAL :: ua(bd%isd:bd%ied, bd%jsd:bd%jed)
376  REAL :: va(bd%isd:bd%ied, bd%jsd:bd%jed)
377  REAL :: pkz_coarse(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
378  INTEGER :: i, j, k, n, p, sphum
379  LOGICAL :: do_pd
380  TYPE(fv_nest_bc_type_3d) :: pkz_bc
381 !local pointers
382  LOGICAL, POINTER :: child_grids(:)
383  INTEGER :: is, ie, js, je
384  INTEGER :: isd, ied, jsd, jed
385  INTRINSIC any
386  INTRINSIC ALLOCATED
387  INTRINSIC SIZE
388  is = bd%is
389  ie = bd%ie
390  js = bd%js
391  je = bd%je
392  isd = bd%isd
393  ied = bd%ied
394  jsd = bd%jsd
395  jed = bd%jed
396  child_grids => neststruct%child_grids
397 !IF nested, set up nested grid BCs for time-interpolation
398 !(actually applying the BCs is done in dyn_core
399  nest_timestep = 0
400  IF (.NOT.inline_q) tracer_nest_timestep = 0
401  IF (neststruct%nested .AND. ((.NOT.neststruct%first_step) .OR. &
402 & make_nh)) THEN
403  do_pd = .true.
404  CALL set_bcs_t0(ncnst, flagstruct%hydrostatic, neststruct)
405  ELSE
406 !On first timestep the t0 BCs are not initialized and may contain garbage
407  do_pd = .false.
408  END IF
409 !compute uc/vc for nested-grid BCs
410 !!! CLEANUP: if we compute uc/vc here we don't need to do on the first call of c_sw, right?
411  IF (any(neststruct%child_grids)) THEN
412  CALL timing_on('COMM_TOTAL')
413 !!! CLEANUP: could we make this a non-blocking operation?
414 !!! Is this needed? it is on the initialization step.
415  CALL mpp_update_domains(u, v, domain, gridtype=dgrid_ne, complete=&
416 & .true.)
417  CALL timing_off('COMM_TOTAL')
418 !$OMP parallel do default(none) shared(isd,jsd,ied,jed,is,ie,js,je,npx,npy,npz, &
419 !$OMP gridstruct,flagstruct,bd,u,v,uc,vc,nested,divg) &
420 !$OMP private(ua,va)
421  DO k=1,npz
422  CALL d2c_setup(u(isd, jsd, k), v(isd, jsd, k), ua, va, uc(isd, &
423 & jsd, k), vc(isd, jsd, k), flagstruct%nord .GT. 0, isd, &
424 & ied, jsd, jed, is, ie, js, je, npx, npy, gridstruct%&
425 & grid_type, gridstruct%nested, gridstruct%se_corner, &
426 & gridstruct%sw_corner, gridstruct%ne_corner, gridstruct%&
427 & nw_corner, gridstruct%rsin_u, gridstruct%rsin_v, &
428 & gridstruct%cosa_s, gridstruct%rsin2)
429  IF (nested) THEN
430  CALL divergence_corner_nest(u(isd, jsd, k), v(isd, jsd, k), ua&
431 & , va, divg(isd, jsd, k), gridstruct, &
432 & flagstruct, bd)
433  ELSE
434  CALL divergence_corner(u(isd, jsd, k), v(isd, jsd, k), ua, va&
435 & , divg(isd, jsd, k), gridstruct, flagstruct, &
436 & bd)
437  END IF
438  END DO
439  END IF
440  IF (flagstruct%hydrostatic) THEN
441 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pkz,pkz_coarse)
442  DO k=1,npz
443  DO j=js,je
444  DO i=is,ie
445  pkz_coarse(i, j, k) = pkz(i, j, k)
446  END DO
447  END DO
448  END DO
449  END IF
450 !! Nested grid: receive from parent grid
451  IF (neststruct%nested) THEN
452  IF (.NOT.ALLOCATED(q_buf)) THEN
453  ALLOCATE(q_buf(ncnst))
454  END IF
455  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
456 & delp_buf)
457  DO n=1,ncnst
458  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
459 & q_buf(n))
460  END DO
461  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
462 & pt_buf)
463  IF (flagstruct%hydrostatic) THEN
464  CALL allocate_fv_nest_bc_type(pkz_bc, is, ie, js, je, isd, ied, &
465 & jsd, jed, npx, npy, npz, ng, 0, 0, 0, &
466 & .false.)
467  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
468 & pkz_buf)
469  ELSE
470  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
471 & w_buf)
472  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 0, npz, bd, &
473 & delz_buf)
474  END IF
475  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
476 & u_buf)
477  CALL nested_grid_bc_recv(neststruct%nest_domain, 0, 1, npz, bd, &
478 & vc_buf)
479  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
480 & v_buf)
481  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 0, npz, bd, &
482 & uc_buf)
483  CALL nested_grid_bc_recv(neststruct%nest_domain, 1, 1, npz, bd, &
484 & divg_buf)
485  END IF
486 !! Coarse grid: send to child grids
487  DO p=1,SIZE(child_grids)
488  IF (child_grids(p)) THEN
489  CALL nested_grid_bc_send(delp, neststruct%nest_domain_all(p), 0&
490 & , 0)
491  DO n=1,ncnst
492  CALL nested_grid_bc_send(q(:, :, :, n), neststruct%&
493 & nest_domain_all(p), 0, 0)
494  END DO
495  CALL nested_grid_bc_send(pt, neststruct%nest_domain_all(p), 0, 0&
496 & )
497  IF (flagstruct%hydrostatic) THEN
498 !Working with PKZ is more complicated since it is only defined on the interior of the grid.
499  CALL nested_grid_bc_send(pkz_coarse, neststruct%&
500 & nest_domain_all(p), 0, 0)
501  ELSE
502  CALL nested_grid_bc_send(w, neststruct%nest_domain_all(p), 0, &
503 & 0)
504  CALL nested_grid_bc_send(delz, neststruct%nest_domain_all(p), &
505 & 0, 0)
506  END IF
507  CALL nested_grid_bc_send(u, neststruct%nest_domain_all(p), 0, 1)
508  CALL nested_grid_bc_send(vc, neststruct%nest_domain_all(p), 0, 1&
509 & )
510  CALL nested_grid_bc_send(v, neststruct%nest_domain_all(p), 1, 0)
511  CALL nested_grid_bc_send(uc, neststruct%nest_domain_all(p), 1, 0&
512 & )
513  CALL nested_grid_bc_send(divg, neststruct%nest_domain_all(p), 1&
514 & , 1)
515  END IF
516  END DO
517 !Nested grid: do computations
518  IF (nested) THEN
519  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
520 & ind_h, neststruct%wt_h, 0, 0, npx, npy, &
521 & npz, bd, neststruct%delp_bc, delp_buf, &
522 & do_pd)
523  DO n=1,ncnst
524  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
525 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
526 & , npz, bd, neststruct%q_bc(n), q_buf(n)&
527 & , do_pd)
528  END DO
529  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
530 & ind_h, neststruct%wt_h, 0, 0, npx, npy, &
531 & npz, bd, neststruct%pt_bc, pt_buf)
532  sphum = get_tracer_index(model_atmos, 'sphum')
533  IF (flagstruct%hydrostatic) THEN
534  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
535 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
536 & , npz, bd, pkz_bc, pkz_buf)
537  CALL setup_pt_bc(neststruct%pt_bc, pkz_bc, neststruct%q_bc(sphum&
538 & ), npx, npy, npz, zvir, bd)
539  ELSE
540  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
541 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
542 & , npz, bd, neststruct%w_bc, w_buf)
543  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct&
544 & %ind_h, neststruct%wt_h, 0, 0, npx, npy&
545 & , npz, bd, neststruct%delz_bc, delz_buf)
546 !Need a negative-definite method?
547  CALL setup_pt_nh_bc(neststruct%pt_bc, neststruct%delp_bc, &
548 & neststruct%delz_bc, neststruct%q_bc(sphum), &
549 & neststruct%q_bc, ncnst, npx, npy, npz, zvir, bd)
550  END IF
551  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
552 & ind_u, neststruct%wt_u, 0, 1, npx, npy, &
553 & npz, bd, neststruct%u_bc, u_buf)
554  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
555 & ind_u, neststruct%wt_u, 0, 1, npx, npy, &
556 & npz, bd, neststruct%vc_bc, vc_buf)
557  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
558 & ind_v, neststruct%wt_v, 1, 0, npx, npy, &
559 & npz, bd, neststruct%v_bc, v_buf)
560  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
561 & ind_v, neststruct%wt_v, 1, 0, npx, npy, &
562 & npz, bd, neststruct%uc_bc, uc_buf)
563  CALL nested_grid_bc_save_proc(neststruct%nest_domain, neststruct%&
564 & ind_b, neststruct%wt_b, 1, 1, npx, npy, &
565 & npz, bd, neststruct%divg_bc, divg_buf)
566  END IF
567  IF (neststruct%first_step) THEN
568  IF (neststruct%nested) CALL set_bcs_t0(ncnst, flagstruct%&
569 & hydrostatic, neststruct)
570  neststruct%first_step = .false.
571  IF (.NOT.flagstruct%hydrostatic) flagstruct%make_nh = .false.
572  ELSE IF (flagstruct%make_nh) THEN
573  IF (neststruct%nested) CALL set_nh_bcs_t0(neststruct)
574  flagstruct%make_nh = .false.
575  END IF
576 !Unnecessary?
577 !!$ if ( neststruct%nested .and. .not. neststruct%divg_BC%initialized) then
578 !!$ neststruct%divg_BC%east_t0 = neststruct%divg_BC%east_t1
579 !!$ neststruct%divg_BC%west_t0 = neststruct%divg_BC%west_t1
580 !!$ neststruct%divg_BC%north_t0 = neststruct%divg_BC%north_t1
581 !!$ neststruct%divg_BC%south_t0 = neststruct%divg_BC%south_t1
582 !!$ neststruct%divg_BC%initialized = .true.
583 !!$ endif
584  CALL mpp_sync_self()
585  END SUBROUTINE setup_nested_grid_bcs
586  SUBROUTINE setup_pt_bc(pt_bc, pkz_bc, sphum_bc, npx, npy, npz, zvir, &
587 & bd)
588  IMPLICIT NONE
589  TYPE(FV_GRID_BOUNDS_TYPE), INTENT(IN) :: bd
590  TYPE(FV_NEST_BC_TYPE_3D), INTENT(IN), TARGET :: pkz_bc, sphum_bc
591  TYPE(FV_NEST_BC_TYPE_3D), INTENT(INOUT), TARGET :: pt_bc
592  INTEGER, INTENT(IN) :: npx, npy, npz
593  REAL, INTENT(IN) :: zvir
594  REAL, DIMENSION(:, :, :), POINTER :: ptbc, pkzbc, sphumbc
595  INTEGER :: i, j, k, istart, iend
596  INTEGER :: is, ie, js, je
597  INTEGER :: isd, ied, jsd, jed
598  is = bd%is
599  ie = bd%ie
600  js = bd%js
601  je = bd%je
602  isd = bd%isd
603  ied = bd%ied
604  jsd = bd%jsd
605  jed = bd%jed
606  IF (is .EQ. 1) THEN
607  ptbc => pt_bc%west_t1
608  pkzbc => pkz_bc%west_t1
609  sphumbc => sphum_bc%west_t1
610 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,ptBC,pkzBC,zvir,sphumBC)
611  DO k=1,npz
612  DO j=jsd,jed
613  DO i=isd,0
614  ptbc(i, j, k) = ptbc(i, j, k)/pkzbc(i, j, k)*(1.+zvir*&
615 & sphumbc(i, j, k))
616  END DO
617  END DO
618  END DO
619  END IF
620  IF (js .EQ. 1) THEN
621  ptbc => pt_bc%south_t1
622  pkzbc => pkz_bc%south_t1
623  sphumbc => sphum_bc%south_t1
624  IF (is .EQ. 1) THEN
625  istart = is
626  ELSE
627  istart = isd
628  END IF
629  IF (ie .EQ. npx - 1) THEN
630  iend = ie
631  ELSE
632  iend = ied
633  END IF
634 !$OMP parallel do default(none) shared(npz,jsd,istart,iend,ptBC,pkzBC,zvir,sphumBC)
635  DO k=1,npz
636  DO j=jsd,0
637  DO i=istart,iend
638  ptbc(i, j, k) = ptbc(i, j, k)/pkzbc(i, j, k)*(1.+zvir*&
639 & sphumbc(i, j, k))
640  END DO
641  END DO
642  END DO
643  END IF
644  IF (ie .EQ. npx - 1) THEN
645  ptbc => pt_bc%east_t1
646  pkzbc => pkz_bc%east_t1
647  sphumbc => sphum_bc%east_t1
648 !$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,ptBC,pkzBC,zvir,sphumBC)
649  DO k=1,npz
650  DO j=jsd,jed
651  DO i=npx,ied
652  ptbc(i, j, k) = ptbc(i, j, k)/pkzbc(i, j, k)*(1.+zvir*&
653 & sphumbc(i, j, k))
654  END DO
655  END DO
656  END DO
657  END IF
658  IF (je .EQ. npy - 1) THEN
659  ptbc => pt_bc%north_t1
660  pkzbc => pkz_bc%north_t1
661  sphumbc => sphum_bc%north_t1
662  IF (is .EQ. 1) THEN
663  istart = is
664  ELSE
665  istart = isd
666  END IF
667  IF (ie .EQ. npx - 1) THEN
668  iend = ie
669  ELSE
670  iend = ied
671  END IF
672 !$OMP parallel do default(none) shared(npz,npy,jed,npx,istart,iend,ptBC,pkzBC,zvir,sphumBC)
673  DO k=1,npz
674  DO j=npy,jed
675  DO i=istart,iend
676  ptbc(i, j, k) = ptbc(i, j, k)/pkzbc(i, j, k)*(1.+zvir*&
677 & sphumbc(i, j, k))
678  END DO
679  END DO
680  END DO
681  END IF
682  END SUBROUTINE setup_pt_bc
683  SUBROUTINE setup_pt_nh_bc(pt_bc, delp_bc, delz_bc, sphum_bc, q_bc, nq&
684 & , npx, npy, npz, zvir, bd)
685  IMPLICIT NONE
686  TYPE(FV_GRID_BOUNDS_TYPE), INTENT(IN) :: bd
687  TYPE(FV_NEST_BC_TYPE_3D), INTENT(IN), TARGET :: delp_bc, delz_bc, &
688 & sphum_bc
689  TYPE(FV_NEST_BC_TYPE_3D), INTENT(INOUT), TARGET :: pt_bc
690  INTEGER, INTENT(IN) :: nq
691  TYPE(FV_NEST_BC_TYPE_3D), INTENT(IN), TARGET :: q_bc(nq)
692  INTEGER, INTENT(IN) :: npx, npy, npz
693  REAL, INTENT(IN) :: zvir
694 ! heat capacity of water at 0C
695  REAL, PARAMETER :: c_liq=4185.5
696 ! heat capacity of ice at 0C: c=c_ice+7.3*(T-Tice)
697  REAL, PARAMETER :: c_ice=1972.
698 ! 1384.5
699  REAL, PARAMETER :: cv_vap=cp_vapor-rvgas
700  REAL, DIMENSION(:, :, :), POINTER :: ptbc, sphumbc, qconbc, delpbc, &
701 & delzbc, cappabc
702  REAL, DIMENSION(:, :, :), POINTER :: liq_watbc_west, ice_watbc_west&
703 & , rainwatbc_west, snowwatbc_west, graupelbc_west
704  REAL, DIMENSION(:, :, :), POINTER :: liq_watbc_east, ice_watbc_east&
705 & , rainwatbc_east, snowwatbc_east, graupelbc_east
706  REAL, DIMENSION(:, :, :), POINTER :: liq_watbc_north, &
707 & ice_watbc_north, rainwatbc_north, snowwatbc_north, graupelbc_north
708  REAL, DIMENSION(:, :, :), POINTER :: liq_watbc_south, &
709 & ice_watbc_south, rainwatbc_south, snowwatbc_south, graupelbc_south
710  REAL :: dp1, q_liq, q_sol
711  REAL, SAVE :: q_con=0.
712  REAL :: cvm
713  REAL :: pkz
714  REAL :: rdg
715  REAL :: cv_air
716  INTEGER :: i, j, k, istart, iend
717  INTEGER :: liq_wat, ice_wat, rainwat, snowwat, graupel
718 ! For GFS Partitioning
719  REAL, PARAMETER :: tice=273.16
720  REAL, PARAMETER :: t_i0=15.
721  INTEGER :: is, ie, js, je
722  INTEGER :: isd, ied, jsd, jed
723  INTRINSIC ALLOCATED
724  INTRINSIC log
725  INTRINSIC exp
726  REAL :: arg1
727  REAL :: arg2
728  is = bd%is
729  ie = bd%ie
730  js = bd%js
731  je = bd%je
732  isd = bd%isd
733  ied = bd%ied
734  jsd = bd%jsd
735  jed = bd%jed
736  rdg = -(rdgas/grav)
737  cv_air = cp_air - rdgas
738  liq_wat = get_tracer_index(model_atmos, 'liq_wat')
739  ice_wat = get_tracer_index(model_atmos, 'ice_wat')
740  rainwat = get_tracer_index(model_atmos, 'rainwat')
741  snowwat = get_tracer_index(model_atmos, 'snowwat')
742  graupel = get_tracer_index(model_atmos, 'graupel')
743  IF (is .EQ. 1) THEN
744  IF (.NOT.ALLOCATED(dum_west)) THEN
745  ALLOCATE(dum_west(isd:0, jsd:jed, npz))
746 !$OMP parallel do default(none) shared(npz,isd,jsd,jed,dum_West)
747  DO k=1,npz
748  DO j=jsd,jed
749  DO i=isd,0
750  dum_west(i, j, k) = 0.
751  END DO
752  END DO
753  END DO
754  END IF
755  END IF
756  IF (js .EQ. 1) THEN
757  IF (.NOT.ALLOCATED(dum_south)) THEN
758  ALLOCATE(dum_south(isd:ied, jsd:0, npz))
759 !$OMP parallel do default(none) shared(npz,isd,ied,jsd,dum_South)
760  DO k=1,npz
761  DO j=jsd,0
762  DO i=isd,ied
763  dum_south(i, j, k) = 0.
764  END DO
765  END DO
766  END DO
767  END IF
768  END IF
769  IF (ie .EQ. npx - 1) THEN
770  IF (.NOT.ALLOCATED(dum_east)) THEN
771  ALLOCATE(dum_east(npx:ied, jsd:jed, npz))
772 !$OMP parallel do default(none) shared(npx,npz,ied,jsd,jed,dum_East)
773  DO k=1,npz
774  DO j=jsd,jed
775  DO i=npx,ied
776  dum_east(i, j, k) = 0.
777  END DO
778  END DO
779  END DO
780  END IF
781  END IF
782  IF (je .EQ. npy - 1) THEN
783  IF (.NOT.ALLOCATED(dum_north)) THEN
784  ALLOCATE(dum_north(isd:ied, npy:jed, npz))
785 !$OMP parallel do default(none) shared(npy,npz,isd,ied,jed,dum_North)
786  DO k=1,npz
787  DO j=npy,jed
788  DO i=isd,ied
789  dum_north(i, j, k) = 0.
790  END DO
791  END DO
792  END DO
793  END IF
794  END IF
795  IF (liq_wat .GT. 0) THEN
796  liq_watbc_west => q_bc(liq_wat)%west_t1
797  liq_watbc_east => q_bc(liq_wat)%east_t1
798  liq_watbc_north => q_bc(liq_wat)%north_t1
799  liq_watbc_south => q_bc(liq_wat)%south_t1
800  ELSE
801  liq_watbc_west => dum_west
802  liq_watbc_east => dum_east
803  liq_watbc_north => dum_north
804  liq_watbc_south => dum_south
805  END IF
806  IF (ice_wat .GT. 0) THEN
807  ice_watbc_west => q_bc(ice_wat)%west_t1
808  ice_watbc_east => q_bc(ice_wat)%east_t1
809  ice_watbc_north => q_bc(ice_wat)%north_t1
810  ice_watbc_south => q_bc(ice_wat)%south_t1
811  ELSE
812  ice_watbc_west => dum_west
813  ice_watbc_east => dum_east
814  ice_watbc_north => dum_north
815  ice_watbc_south => dum_south
816  END IF
817  IF (rainwat .GT. 0) THEN
818  rainwatbc_west => q_bc(rainwat)%west_t1
819  rainwatbc_east => q_bc(rainwat)%east_t1
820  rainwatbc_north => q_bc(rainwat)%north_t1
821  rainwatbc_south => q_bc(rainwat)%south_t1
822  ELSE
823  rainwatbc_west => dum_west
824  rainwatbc_east => dum_east
825  rainwatbc_north => dum_north
826  rainwatbc_south => dum_south
827  END IF
828  IF (snowwat .GT. 0) THEN
829  snowwatbc_west => q_bc(snowwat)%west_t1
830  snowwatbc_east => q_bc(snowwat)%east_t1
831  snowwatbc_north => q_bc(snowwat)%north_t1
832  snowwatbc_south => q_bc(snowwat)%south_t1
833  ELSE
834  snowwatbc_west => dum_west
835  snowwatbc_east => dum_east
836  snowwatbc_north => dum_north
837  snowwatbc_south => dum_south
838  END IF
839  IF (graupel .GT. 0) THEN
840  graupelbc_west => q_bc(graupel)%west_t1
841  graupelbc_east => q_bc(graupel)%east_t1
842  graupelbc_north => q_bc(graupel)%north_t1
843  graupelbc_south => q_bc(graupel)%south_t1
844  ELSE
845  graupelbc_west => dum_west
846  graupelbc_east => dum_east
847  graupelbc_north => dum_north
848  graupelbc_south => dum_south
849  END IF
850  IF (is .EQ. 1) THEN
851  ptbc => pt_bc%west_t1
852  sphumbc => sphum_bc%west_t1
853  delpbc => delp_bc%west_t1
854  delzbc => delz_bc%west_t1
855 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,zvir,sphumBC,liq_watBC_west,rainwatBC_west,ice_watBC_west,snowwatBC_west,g
856 !raupelBC_west,qconBC,cappaBC, &
857 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
858 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
859  DO k=1,npz
860  DO j=jsd,jed
861  DO i=isd,0
862  dp1 = zvir*sphumbc(i, j, k)
863  arg1 = rdg*delpbc(i, j, k)*ptbc(i, j, k)*(1.+dp1)/delzbc(i, &
864 & j, k)
865  arg2 = kappa*log(arg1)
866  pkz = exp(arg2)
867  ptbc(i, j, k) = ptbc(i, j, k)*(1.+dp1)/pkz
868  END DO
869  END DO
870  END DO
871  END IF
872  IF (js .EQ. 1) THEN
873  ptbc => pt_bc%south_t1
874  sphumbc => sphum_bc%south_t1
875  delpbc => delp_bc%south_t1
876  delzbc => delz_bc%south_t1
877  IF (is .EQ. 1) THEN
878  istart = is
879  ELSE
880  istart = isd
881  END IF
882  IF (ie .EQ. npx - 1) THEN
883  iend = ie
884  ELSE
885  iend = ied
886  END IF
887 !$OMP parallel do default(none) shared(npz,jsd,istart,iend,zvir,sphumBC, &
888 !$OMP liq_watBC_south,rainwatBC_south,ice_watBC_south,&
889 !$OMP snowwatBC_south,graupelBC_south,qconBC,cappaBC, &
890 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
891 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
892  DO k=1,npz
893  DO j=jsd,0
894  DO i=istart,iend
895  dp1 = zvir*sphumbc(i, j, k)
896  arg1 = rdg*delpbc(i, j, k)*ptbc(i, j, k)*(1.+dp1)/delzbc(i, &
897 & j, k)
898  arg2 = kappa*log(arg1)
899  pkz = exp(arg2)
900  ptbc(i, j, k) = ptbc(i, j, k)*(1.+dp1)/pkz
901  END DO
902  END DO
903  END DO
904  END IF
905  IF (ie .EQ. npx - 1) THEN
906  ptbc => pt_bc%east_t1
907  sphumbc => sphum_bc%east_t1
908  delpbc => delp_bc%east_t1
909  delzbc => delz_bc%east_t1
910 !$OMP parallel do default(none) shared(npz,jsd,jed,npx,ied,zvir,sphumBC, &
911 !$OMP liq_watBC_east,rainwatBC_east,ice_watBC_east,snowwatBC_east,graupelBC_east,qconBC,cappaBC, &
912 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
913 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
914  DO k=1,npz
915  DO j=jsd,jed
916  DO i=npx,ied
917  dp1 = zvir*sphumbc(i, j, k)
918  arg1 = rdg*delpbc(i, j, k)*ptbc(i, j, k)*(1.+dp1)/delzbc(i, &
919 & j, k)
920  arg2 = kappa*log(arg1)
921  pkz = exp(arg2)
922  ptbc(i, j, k) = ptbc(i, j, k)*(1.+dp1)/pkz
923  END DO
924  END DO
925  END DO
926  END IF
927  IF (je .EQ. npy - 1) THEN
928  ptbc => pt_bc%north_t1
929  sphumbc => sphum_bc%north_t1
930  delpbc => delp_bc%north_t1
931  delzbc => delz_bc%north_t1
932  IF (is .EQ. 1) THEN
933  istart = is
934  ELSE
935  istart = isd
936  END IF
937  IF (ie .EQ. npx - 1) THEN
938  iend = ie
939  ELSE
940  iend = ied
941  END IF
942 !$OMP parallel do default(none) shared(npz,npy,jed,istart,iend,zvir, &
943 !$OMP sphumBC,liq_watBC_north,rainwatBC_north,ice_watBC_north,snowwatBC_north,graupelBC_north,qconBC,cappaBC, &
944 !$OMP rdg,cv_air,delpBC,delzBC,ptBC) &
945 !$OMP private(dp1,q_con,q_liq,q_sol,cvm,pkz)
946  DO k=1,npz
947  DO j=npy,jed
948  DO i=istart,iend
949  dp1 = zvir*sphumbc(i, j, k)
950  arg1 = rdg*delpbc(i, j, k)*ptbc(i, j, k)*(1.+dp1)/delzbc(i, &
951 & j, k)
952  arg2 = kappa*log(arg1)
953  pkz = exp(arg2)
954  ptbc(i, j, k) = ptbc(i, j, k)*(1.+dp1)/pkz
955  END DO
956  END DO
957  END DO
958  END IF
959  END SUBROUTINE setup_pt_nh_bc
960  SUBROUTINE set_nh_bcs_t0(neststruct)
961  IMPLICIT NONE
962  TYPE(FV_NEST_TYPE), INTENT(INOUT) :: neststruct
963  neststruct%delz_bc%east_t0 = neststruct%delz_bc%east_t1
964  neststruct%delz_bc%west_t0 = neststruct%delz_bc%west_t1
965  neststruct%delz_bc%north_t0 = neststruct%delz_bc%north_t1
966  neststruct%delz_bc%south_t0 = neststruct%delz_bc%south_t1
967  neststruct%w_bc%east_t0 = neststruct%w_bc%east_t1
968  neststruct%w_bc%west_t0 = neststruct%w_bc%west_t1
969  neststruct%w_bc%north_t0 = neststruct%w_bc%north_t1
970  neststruct%w_bc%south_t0 = neststruct%w_bc%south_t1
971  END SUBROUTINE set_nh_bcs_t0
972  SUBROUTINE set_bcs_t0(ncnst, hydrostatic, neststruct)
973  IMPLICIT NONE
974  INTEGER, INTENT(IN) :: ncnst
975  LOGICAL, INTENT(IN) :: hydrostatic
976  TYPE(FV_NEST_TYPE), INTENT(INOUT) :: neststruct
977  INTEGER :: n
978  neststruct%delp_bc%east_t0 = neststruct%delp_bc%east_t1
979  neststruct%delp_bc%west_t0 = neststruct%delp_bc%west_t1
980  neststruct%delp_bc%north_t0 = neststruct%delp_bc%north_t1
981  neststruct%delp_bc%south_t0 = neststruct%delp_bc%south_t1
982  DO n=1,ncnst
983  neststruct%q_bc(n)%east_t0 = neststruct%q_bc(n)%east_t1
984  neststruct%q_bc(n)%west_t0 = neststruct%q_bc(n)%west_t1
985  neststruct%q_bc(n)%north_t0 = neststruct%q_bc(n)%north_t1
986  neststruct%q_bc(n)%south_t0 = neststruct%q_bc(n)%south_t1
987  END DO
988  neststruct%pt_bc%east_t0 = neststruct%pt_bc%east_t1
989  neststruct%pt_bc%west_t0 = neststruct%pt_bc%west_t1
990  neststruct%pt_bc%north_t0 = neststruct%pt_bc%north_t1
991  neststruct%pt_bc%south_t0 = neststruct%pt_bc%south_t1
992  neststruct%pt_bc%east_t0 = neststruct%pt_bc%east_t1
993  neststruct%pt_bc%west_t0 = neststruct%pt_bc%west_t1
994  neststruct%pt_bc%north_t0 = neststruct%pt_bc%north_t1
995  neststruct%pt_bc%south_t0 = neststruct%pt_bc%south_t1
996  IF (.NOT.hydrostatic) CALL set_nh_bcs_t0(neststruct)
997  neststruct%u_bc%east_t0 = neststruct%u_bc%east_t1
998  neststruct%u_bc%west_t0 = neststruct%u_bc%west_t1
999  neststruct%u_bc%north_t0 = neststruct%u_bc%north_t1
1000  neststruct%u_bc%south_t0 = neststruct%u_bc%south_t1
1001  neststruct%v_bc%east_t0 = neststruct%v_bc%east_t1
1002  neststruct%v_bc%west_t0 = neststruct%v_bc%west_t1
1003  neststruct%v_bc%north_t0 = neststruct%v_bc%north_t1
1004  neststruct%v_bc%south_t0 = neststruct%v_bc%south_t1
1005  neststruct%vc_bc%east_t0 = neststruct%vc_bc%east_t1
1006  neststruct%vc_bc%west_t0 = neststruct%vc_bc%west_t1
1007  neststruct%vc_bc%north_t0 = neststruct%vc_bc%north_t1
1008  neststruct%vc_bc%south_t0 = neststruct%vc_bc%south_t1
1009  neststruct%uc_bc%east_t0 = neststruct%uc_bc%east_t1
1010  neststruct%uc_bc%west_t0 = neststruct%uc_bc%west_t1
1011  neststruct%uc_bc%north_t0 = neststruct%uc_bc%north_t1
1012  neststruct%uc_bc%south_t0 = neststruct%uc_bc%south_t1
1013  neststruct%divg_bc%east_t0 = neststruct%divg_bc%east_t1
1014  neststruct%divg_bc%west_t0 = neststruct%divg_bc%west_t1
1015  neststruct%divg_bc%north_t0 = neststruct%divg_bc%north_t1
1016  neststruct%divg_bc%south_t0 = neststruct%divg_bc%south_t1
1017  END SUBROUTINE set_bcs_t0
1018 !! nestupdate types
1019 !! 1 - Interpolation update on all variables
1020 !! 2 - Conserving update (over areas on cell-
1021 !! centered variables, over faces on winds) on all variables
1022 !! 3 - Interpolation update on winds only
1023 !! 4 - Interpolation update on all variables except delp (mass conserving)
1024 !! 5 - Remap interpolating update, delp not updated
1025 !! 6 - Remap conserving update, delp not updated
1026 !! 7 - Remap conserving update, delp and q not updated
1027 !! 8 - Remap conserving update, only winds updated
1028 !! Note that nestupdate > 3 will not update delp.
1029 !! "Remap update" remaps updated variables from the nested grid's
1030 !! vertical coordinate to that of the coarse grid. When delp is not
1031 !! updated (nestbctype >= 3) the vertical coordinates differ on
1032 !! the two grids, because the surface pressure will be different
1033 !! on the two grids.
1034 !! Note: "conserving updates" do not guarantee global conservation
1035 !! unless flux nested grid BCs are specified, or if a quantity is
1036 !! not updated at all. This ability has not been implemented.
1037  SUBROUTINE twoway_nesting(atm, ngrids, grids_on_this_pe, zvir)
1038  IMPLICIT NONE
1039  INTEGER, INTENT(IN) :: ngrids
1040  TYPE(fv_atmos_type), INTENT(INOUT) :: atm(ngrids)
1041  LOGICAL, INTENT(IN) :: grids_on_this_pe(ngrids)
1042  REAL, INTENT(IN) :: zvir
1043  INTEGER :: n, p, sphum
1044 ! ngrids > 1
1045  IF (ngrids .GT. 1) THEN
1046 !loop backwards to allow information to propagate from finest to coarsest grids
1047  DO n=ngrids,2,-1
1048 !two-way updating
1049  IF (atm(n)%neststruct%twowaynest) THEN
1050  IF (grids_on_this_pe(n) .OR. grids_on_this_pe(atm(n)%&
1051 & parent_grid%grid_number)) THEN
1052  sphum = get_tracer_index(model_atmos, 'sphum')
1053  CALL twoway_nest_update(atm(n)%npx, atm(n)%npy, atm(n)%npz, &
1054 & zvir, atm(n)%ncnst, sphum, atm(n)%u, atm(n&
1055 & )%v, atm(n)%w, atm(n)%omga, atm(n)%pt, atm&
1056 & (n)%delp, atm(n)%q, atm(n)%uc, atm(n)%vc, &
1057 & atm(n)%pkz, atm(n)%delz, atm(n)%ps, atm(n)&
1058 & %ptop, atm(n)%gridstruct, atm(n)%&
1059 & flagstruct, atm(n)%neststruct, atm(n)%&
1060 & parent_grid, atm(n)%bd, .false.)
1061  END IF
1062  END IF
1063  END DO
1064 !NOTE: these routines need to be used with any grid which has been updated to, not just the coarsest grid.
1065  DO n=1,ngrids
1066  IF (atm(n)%neststruct%parent_of_twoway .AND. grids_on_this_pe(n)&
1067 & ) CALL after_twoway_nest_update(atm(n)%npx, atm(n)%npy, atm(n)%&
1068 & npz, atm(n)%ng, atm(n)%ncnst, atm(n)%u&
1069 & , atm(n)%v, atm(n)%w, atm(n)%delz, atm&
1070 & (n)%pt, atm(n)%delp, atm(n)%q, atm(n)%&
1071 & ps, atm(n)%pe, atm(n)%pk, atm(n)%peln&
1072 & , atm(n)%pkz, atm(n)%phis, atm(n)%ua, &
1073 & atm(n)%va, atm(n)%ptop, atm(n)%&
1074 & gridstruct, atm(n)%flagstruct, atm(n)%&
1075 & domain, atm(n)%bd)
1076  END DO
1077  END IF
1078  END SUBROUTINE twoway_nesting
1079 !!!CLEANUP: this routine assumes that the PARENT GRID has pt = (regular) temperature,
1080 !!!not potential temperature; which may cause problems when updating if this is not the case.
1081  SUBROUTINE twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, u, v&
1082 & , w, omga, pt, delp, q, uc, vc, pkz, delz, ps, ptop, gridstruct, &
1083 & flagstruct, neststruct, parent_grid, bd, conv_theta_in)
1084  IMPLICIT NONE
1085  REAL, INTENT(IN) :: zvir, ptop
1086  INTEGER, INTENT(IN) :: npx, npy, npz
1087  INTEGER, INTENT(IN) :: ncnst, sphum
1088  LOGICAL, INTENT(IN), OPTIONAL :: conv_theta_in
1089  TYPE(FV_GRID_BOUNDS_TYPE), INTENT(IN) :: bd
1090 ! D grid zonal wind (m/s)
1091  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz), INTENT(INOUT) &
1092 & :: u
1093 ! D grid meridional wind (m/s)
1094  REAL, DIMENSION(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz), INTENT(INOUT) &
1095 & :: v
1096 ! W (m/s)
1097  REAL, INTENT(INOUT) :: w(bd%isd:, bd%jsd:, :)
1098 ! Vertical pressure velocity (pa/s)
1099  REAL, INTENT(INOUT) :: omga(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1100 ! temperature (K)
1101  REAL, INTENT(INOUT) :: pt(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1102 ! pressure thickness (pascal)
1103  REAL, INTENT(INOUT) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1104 ! specific humidity and constituents
1105  REAL, INTENT(INOUT) :: q(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst)
1106 ! (uc,vc) C grid winds
1107  REAL, INTENT(INOUT) :: uc(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz)
1108  REAL, INTENT(INOUT) :: vc(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz)
1109 ! finite-volume mean pk
1110  REAL, INTENT(INOUT) :: pkz(bd%is:bd%ie, bd%js:bd%je, npz)
1111 ! delta-height (m); non-hydrostatic only
1112  REAL, INTENT(INOUT) :: delz(bd%isd:, bd%jsd:, :)
1113 ! Surface pressure (pascal)
1114  REAL, INTENT(INOUT) :: ps(bd%isd:bd%ied, bd%jsd:bd%jed)
1115  TYPE(FV_GRID_TYPE), INTENT(INOUT) :: gridstruct
1116  TYPE(FV_FLAGS_TYPE), INTENT(INOUT) :: flagstruct
1117  TYPE(FV_NEST_TYPE), INTENT(INOUT) :: neststruct
1118  TYPE(FV_ATMOS_TYPE), INTENT(INOUT) :: parent_grid
1119  REAL, ALLOCATABLE :: t_nest(:, :, :), ps0(:, :)
1120  INTEGER :: i, j, k, n
1121  INTEGER :: isd_p, ied_p, jsd_p, jed_p, isc_p, iec_p, jsc_p, jec_p
1122  INTEGER :: isg, ieg, jsg, jeg, npx_p, npy_p
1123  INTEGER :: istart, iend
1124  REAL :: qmass_b, qmass_a
1125  REAL, SAVE :: fix=1.
1126  LOGICAL :: used
1127  LOGICAL, SAVE :: conv_theta=.true.
1128  REAL :: qdp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1129  REAL, ALLOCATABLE :: qdp_coarse(:, :, :)
1130  REAL(kind=f_p), ALLOCATABLE :: q_diff(:, :, :)
1131  REAL :: l_sum_b(npz), l_sum_a(npz)
1132  INTEGER :: upoff
1133  INTEGER :: is, ie, js, je
1134  INTEGER :: isd, ied, jsd, jed
1135  INTEGER :: isu, ieu, jsu, jeu
1136  INTRINSIC PRESENT
1137  INTRINSIC ALLOCATED
1138  is = bd%is
1139  ie = bd%ie
1140  js = bd%js
1141  je = bd%je
1142  isd = bd%isd
1143  ied = bd%ied
1144  jsd = bd%jsd
1145  jed = bd%jed
1146  isu = neststruct%isu
1147  ieu = neststruct%ieu
1148  jsu = neststruct%jsu
1149  jeu = neststruct%jeu
1150  upoff = neststruct%upoff
1151 !We update actual temperature, not theta.
1152 !If pt is actual temperature, set conv_theta to .false.
1153  IF (PRESENT(conv_theta_in)) conv_theta = conv_theta_in
1154  IF (.NOT.neststruct%parent_proc .AND. (.NOT.neststruct%child_proc)) &
1155 & THEN
1156  RETURN
1157  ELSE
1158  CALL mpp_get_data_domain(parent_grid%domain, isd_p, ied_p, jsd_p, &
1159 & jed_p)
1160  CALL mpp_get_compute_domain(parent_grid%domain, isc_p, iec_p, &
1161 & jsc_p, jec_p)
1162 !delp/ps
1163  IF (neststruct%nestupdate .LT. 3) THEN
1164  CALL update_coarse_grid(parent_grid%delp, delp, neststruct%&
1165 & nest_domain, neststruct%ind_update_h, &
1166 & gridstruct%dx, gridstruct%dy, gridstruct%area&
1167 & , isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, &
1168 & jed, neststruct%isu, neststruct%ieu, &
1169 & neststruct%jsu, neststruct%jeu, npx, npy, npz&
1170 & , 0, 0, neststruct%refinement, neststruct%&
1171 & nestupdate, upoff, 0, neststruct%parent_proc, &
1172 & neststruct%child_proc, parent_grid)
1173 !self
1174  CALL mpp_sync()
1175  END IF
1176 !if (neststruct%nestupdate /= 3 .and. neststruct%nestbctype /= 3) then
1177  IF (neststruct%nestupdate .NE. 3 .AND. neststruct%nestupdate .NE. &
1178 & 7 .AND. neststruct%nestupdate .NE. 8) THEN
1179  ALLOCATE(qdp_coarse(isd_p:ied_p, jsd_p:jed_p, npz))
1180  IF (parent_grid%flagstruct%nwat .GT. 0) THEN
1181  ALLOCATE(q_diff(isd_p:ied_p, jsd_p:jed_p, npz))
1182  q_diff = 0.
1183  END IF
1184  DO n=1,parent_grid%flagstruct%nwat
1185  qdp_coarse = 0.
1186  IF (neststruct%child_proc) THEN
1187  DO k=1,npz
1188  DO j=jsd,jed
1189  DO i=isd,ied
1190  qdp(i, j, k) = q(i, j, k, n)*delp(i, j, k)
1191  END DO
1192  END DO
1193  END DO
1194  ELSE
1195  qdp = 0.
1196  END IF
1197  IF (neststruct%parent_proc) THEN
1198 !Add up ONLY region being replaced by nested grid
1199  DO k=1,npz
1200  DO j=jsu,jeu
1201  DO i=isu,ieu
1202  qdp_coarse(i, j, k) = parent_grid%q(i, j, k, n)*&
1203 & parent_grid%delp(i, j, k)
1204  END DO
1205  END DO
1206  END DO
1207  CALL level_sum(qdp_coarse, parent_grid%gridstruct%area, &
1208 & parent_grid%domain, parent_grid%bd, npz, l_sum_b)
1209  ELSE
1210  qdp_coarse = 0.
1211  END IF
1212  IF (neststruct%parent_proc) THEN
1213  IF (n .LE. parent_grid%flagstruct%nwat) THEN
1214  DO k=1,npz
1215  DO j=jsu,jeu
1216  DO i=isu,ieu
1217  q_diff(i, j, k) = q_diff(i, j, k) - qdp_coarse(i, j&
1218 & , k)
1219  END DO
1220  END DO
1221  END DO
1222  END IF
1223  END IF
1224  CALL update_coarse_grid(qdp_coarse, qdp, neststruct%&
1225 & nest_domain, neststruct%ind_update_h, &
1226 & gridstruct%dx, gridstruct%dy, gridstruct%&
1227 & area, isd_p, ied_p, jsd_p, jed_p, isd, ied, &
1228 & jsd, jed, neststruct%isu, neststruct%ieu, &
1229 & neststruct%jsu, neststruct%jeu, npx, npy, &
1230 & npz, 0, 0, neststruct%refinement, neststruct&
1231 & %nestupdate, upoff, 0, neststruct%&
1232 & parent_proc, neststruct%child_proc, &
1233 & parent_grid)
1234 !self
1235  CALL mpp_sync()
1236  IF (neststruct%parent_proc) THEN
1237  CALL level_sum(qdp_coarse, parent_grid%gridstruct%area, &
1238 & parent_grid%domain, parent_grid%bd, npz, l_sum_a)
1239  DO k=1,npz
1240  IF (l_sum_a(k) .GT. 0.) THEN
1241  fix = l_sum_b(k)/l_sum_a(k)
1242  DO j=jsu,jeu
1243  DO i=isu,ieu
1244 !Normalization mass fixer
1245  parent_grid%q(i, j, k, n) = qdp_coarse(i, j, k)*fix
1246  END DO
1247  END DO
1248  END IF
1249  END DO
1250  IF (n .EQ. 1) sphum_ll_fix = 1. - fix
1251  END IF
1252  IF (neststruct%parent_proc) THEN
1253  IF (n .LE. parent_grid%flagstruct%nwat) THEN
1254  DO k=1,npz
1255  DO j=jsu,jeu
1256  DO i=isu,ieu
1257  q_diff(i, j, k) = q_diff(i, j, k) + parent_grid%q(i&
1258 & , j, k, n)
1259  END DO
1260  END DO
1261  END DO
1262  END IF
1263  END IF
1264  END DO
1265  IF (neststruct%parent_proc) THEN
1266  IF (parent_grid%flagstruct%nwat .GT. 0) THEN
1267  DO k=1,npz
1268  DO j=jsu,jeu
1269  DO i=isu,ieu
1270  parent_grid%delp(i, j, k) = parent_grid%delp(i, j, k) &
1271 & + q_diff(i, j, k)
1272  END DO
1273  END DO
1274  END DO
1275  END IF
1276  DO n=1,parent_grid%flagstruct%nwat
1277  DO k=1,npz
1278  DO j=jsu,jeu
1279  DO i=isu,ieu
1280  parent_grid%q(i, j, k, n) = parent_grid%q(i, j, k, n)/&
1281 & parent_grid%delp(i, j, k)
1282  END DO
1283  END DO
1284  END DO
1285  END DO
1286  END IF
1287  DEALLOCATE(qdp_coarse)
1288  IF (ALLOCATED(q_diff)) THEN
1289  DEALLOCATE(q_diff)
1290  END IF
1291  END IF
1292 !Neststruct%nestupdate /= 3
1293  IF (neststruct%nestupdate .NE. 3 .AND. neststruct%nestupdate .NE. &
1294 & 8) THEN
1295 !conv_theta
1296  IF (conv_theta) THEN
1297  IF (neststruct%child_proc) THEN
1298 !pt is potential temperature on the nested grid, but actual
1299 !temperature on the coarse grid. Compute actual temperature
1300 !on the nested grid, then gather.
1301  ALLOCATE(t_nest(isd:ied, jsd:jed, 1:npz))
1302 !$OMP parallel do default(none) shared(npz,js,je,is,ie,t_nest,pt,pkz,zvir,q,sphum)
1303  DO k=1,npz
1304  DO j=js,je
1305  DO i=is,ie
1306  t_nest(i, j, k) = pt(i, j, k)*pkz(i, j, k)/(1.+zvir*q(&
1307 & i, j, k, sphum))
1308  END DO
1309  END DO
1310  END DO
1311  DEALLOCATE(t_nest)
1312  END IF
1313  CALL update_coarse_grid(parent_grid%pt, t_nest, neststruct%&
1314 & nest_domain, neststruct%ind_update_h, &
1315 & gridstruct%dx, gridstruct%dy, gridstruct%&
1316 & area, isd_p, ied_p, jsd_p, jed_p, isd, ied, &
1317 & jsd, jed, neststruct%isu, neststruct%ieu, &
1318 & neststruct%jsu, neststruct%jeu, npx, npy, &
1319 & npz, 0, 0, neststruct%refinement, neststruct&
1320 & %nestupdate, upoff, 0, neststruct%&
1321 & parent_proc, neststruct%child_proc, &
1322 & parent_grid)
1323  ELSE
1324  CALL update_coarse_grid(parent_grid%pt, pt, neststruct%&
1325 & nest_domain, neststruct%ind_update_h, &
1326 & gridstruct%dx, gridstruct%dy, gridstruct%&
1327 & area, isd_p, ied_p, jsd_p, jed_p, isd, ied, &
1328 & jsd, jed, neststruct%isu, neststruct%ieu, &
1329 & neststruct%jsu, neststruct%jeu, npx, npy, &
1330 & npz, 0, 0, neststruct%refinement, neststruct&
1331 & %nestupdate, upoff, 0, neststruct%&
1332 & parent_proc, neststruct%child_proc, &
1333 & parent_grid)
1334  END IF
1335 !self
1336  CALL mpp_sync()
1337  IF (.NOT.flagstruct%hydrostatic) THEN
1338  CALL update_coarse_grid(parent_grid%w, w, neststruct%&
1339 & nest_domain, neststruct%ind_update_h, &
1340 & gridstruct%dx, gridstruct%dy, gridstruct%&
1341 & area, isd_p, ied_p, jsd_p, jed_p, isd, ied, &
1342 & jsd, jed, neststruct%isu, neststruct%ieu, &
1343 & neststruct%jsu, neststruct%jeu, npx, npy, &
1344 & npz, 0, 0, neststruct%refinement, neststruct&
1345 & %nestupdate, upoff, 0, neststruct%&
1346 & parent_proc, neststruct%child_proc, &
1347 & parent_grid)
1348 !Updating for delz not yet implemented; may be problematic
1349 !!$ call update_coarse_grid(parent_grid%delz, delz, neststruct%nest_domain, &
1350 !!$ neststruct%ind_update_h, &
1351 !!$ isd_p, ied_p, jsd_p, jed_p, isd, ied, jsd, jed, npz, 0, 0, &
1352 !!$ neststruct%refinement, neststruct%nestupdate, upoff, 0, neststruct%parent_proc, neststruct%child_proc)
1353 !self
1354  CALL mpp_sync()
1355  END IF
1356  END IF
1357  CALL update_coarse_grid(parent_grid%u, u, neststruct%nest_domain, &
1358 & neststruct%ind_update_h, gridstruct%dx, &
1359 & gridstruct%dy, gridstruct%area, isd_p, ied_p, &
1360 & jsd_p, jed_p, isd, ied, jsd, jed, neststruct%isu&
1361 & , neststruct%ieu, neststruct%jsu, neststruct%jeu&
1362 & , npx, npy, npz, 0, 1, neststruct%refinement, &
1363 & neststruct%nestupdate, upoff, 0, neststruct%&
1364 & parent_proc, neststruct%child_proc, parent_grid)
1365  CALL update_coarse_grid(parent_grid%v, v, neststruct%nest_domain, &
1366 & neststruct%ind_update_h, gridstruct%dx, &
1367 & gridstruct%dy, gridstruct%area, isd_p, ied_p, &
1368 & jsd_p, jed_p, isd, ied, jsd, jed, neststruct%isu&
1369 & , neststruct%ieu, neststruct%jsu, neststruct%jeu&
1370 & , npx, npy, npz, 1, 0, neststruct%refinement, &
1371 & neststruct%nestupdate, upoff, 0, neststruct%&
1372 & parent_proc, neststruct%child_proc, parent_grid)
1373 !self
1374  CALL mpp_sync()
1375  IF (neststruct%nestupdate .GE. 5 .AND. npz .GT. 4) THEN
1376 !Use PS0 from nested grid, NOT the full delp. Also we assume the same number of levels on both grids.
1377 !PS0 should be initially set to be ps so that this routine does NOTHING outside of the update region
1378 !Re-compute nested (AND COARSE) grid ps
1379  ALLOCATE(ps0(isd_p:ied_p, jsd_p:jed_p))
1380  IF (neststruct%parent_proc) THEN
1381  parent_grid%ps = parent_grid%ptop
1382 !This loop appears to cause problems with OMP
1383 !$OMP parallel do default(none) shared(npz,jsd_p,jed_p,isd_p,ied_p,parent_grid)
1384  DO j=jsd_p,jed_p
1385  DO k=1,npz
1386  DO i=isd_p,ied_p
1387  parent_grid%ps(i, j) = parent_grid%ps(i, j) + &
1388 & parent_grid%delp(i, j, k)
1389  END DO
1390  END DO
1391  END DO
1392  ps0 = parent_grid%ps
1393  END IF
1394  IF (neststruct%child_proc) THEN
1395  ps = ptop
1396 !$OMP parallel do default(none) shared(npz,jsd,jed,isd,ied,ps,delp)
1397  DO j=jsd,jed
1398  DO k=1,npz
1399  DO i=isd,ied
1400  ps(i, j) = ps(i, j) + delp(i, j, k)
1401  END DO
1402  END DO
1403  END DO
1404  END IF
1405  CALL update_coarse_grid(ps0, ps, neststruct%nest_domain, &
1406 & neststruct%ind_update_h, gridstruct%dx, &
1407 & gridstruct%dy, gridstruct%area, isd_p, ied_p, &
1408 & jsd_p, jed_p, isd, ied, jsd, jed, neststruct%&
1409 & isu, neststruct%ieu, neststruct%jsu, &
1410 & neststruct%jeu, npx, npy, 0, 0, neststruct%&
1411 & refinement, neststruct%nestupdate, upoff, 0, &
1412 & neststruct%parent_proc, neststruct%child_proc&
1413 & , parent_grid)
1414 !!! The mpp version of update_coarse_grid does not return a consistent value of ps
1415 !!! across PEs, as it does not go into the haloes of a given coarse-grid PE. This
1416 !!! update_domains call takes care of the problem.
1417  IF (neststruct%parent_proc) THEN
1418  CALL mpp_update_domains(parent_grid%ps, parent_grid%domain, &
1419 & complete=.true.)
1420  CALL mpp_update_domains(ps0, parent_grid%domain, complete=&
1421 & .true.)
1422  END IF
1423 !self
1424  CALL mpp_sync()
1425  IF (parent_grid%tile .EQ. neststruct%parent_tile) THEN
1426 !neststruct%parent_proc
1427  IF (neststruct%parent_proc) THEN
1428 !comment out if statement to always remap theta instead of t in the remap-update.
1429 !(In LtE typically we use remap_t = .true.: remapping t is better (except in
1430 !idealized simulations with a background uniform theta) since near the top
1431 !boundary theta is exponential, which is hard to accurately interpolate with a spline
1432  IF (parent_grid%flagstruct%remap_option .NE. 0) THEN
1433 !$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
1434  DO k=1,npz
1435  DO j=jsc_p,jec_p
1436  DO i=isc_p,iec_p
1437  parent_grid%pt(i, j, k) = parent_grid%pt(i, j, k)/&
1438 & parent_grid%pkz(i, j, k)*(1.+zvir*parent_grid%q(i&
1439 & , j, k, sphum))
1440  END DO
1441  END DO
1442  END DO
1443  END IF
1444  CALL update_remap_tqw(npz, parent_grid%ak, parent_grid%bk, &
1445 & parent_grid%ps, parent_grid%delp, &
1446 & parent_grid%pt, parent_grid%q, parent_grid%w&
1447 & , parent_grid%flagstruct%hydrostatic, npz, &
1448 & ps0, zvir, parent_grid%ptop, ncnst, &
1449 & parent_grid%flagstruct%kord_tm, parent_grid%&
1450 & flagstruct%kord_tr, parent_grid%flagstruct%&
1451 & kord_wz, isc_p, iec_p, jsc_p, jec_p, isd_p, &
1452 & ied_p, jsd_p, jed_p, .false.)
1453 !neststruct%nestupdate < 7)
1454  IF (parent_grid%flagstruct%remap_option .NE. 0) THEN
1455 !$OMP parallel do default(none) shared(npz,jsc_p,jec_p,isc_p,iec_p,parent_grid,zvir,sphum)
1456  DO k=1,npz
1457  DO j=jsc_p,jec_p
1458  DO i=isc_p,iec_p
1459  parent_grid%pt(i, j, k) = parent_grid%pt(i, j, k)*&
1460 & parent_grid%pkz(i, j, k)/(1.+zvir*parent_grid%q(i&
1461 & , j, k, sphum))
1462  END DO
1463  END DO
1464  END DO
1465  END IF
1466  CALL update_remap_uv(npz, parent_grid%ak, parent_grid%bk, &
1467 & parent_grid%ps, parent_grid%u, parent_grid%v&
1468 & , npz, ps0, parent_grid%flagstruct%kord_mt, &
1469 & isc_p, iec_p, jsc_p, jec_p, isd_p, ied_p, &
1470 & jsd_p, jed_p, parent_grid%ptop)
1471  END IF
1472  END IF
1473  IF (ALLOCATED(ps0)) THEN
1474  DEALLOCATE(ps0)
1475  END IF
1476  END IF
1477  END IF
1478  END SUBROUTINE twoway_nest_update
1479  SUBROUTINE level_sum(q, area, domain, bd, npz, l_sum)
1480  IMPLICIT NONE
1481 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EXACT_SUM)
1482 ! L_sum(k) = mpp_global_sum(domain, qA, flags=BITWISE_EFP_SUM) ! doesn't work??
1483  INTEGER, INTENT(IN) :: npz
1484  TYPE(FV_GRID_BOUNDS_TYPE), INTENT(IN) :: bd
1485  REAL, INTENT(IN) :: area(bd%isd:bd%ied, bd%jsd:bd%jed)
1486  REAL, INTENT(IN) :: q(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1487  REAL, INTENT(OUT) :: l_sum(npz)
1488  TYPE(DOMAIN2D), INTENT(IN) :: domain
1489  INTEGER :: i, j, k, n
1490 !(bd%is:bd%ie, bd%js:bd%je)
1491  REAL :: qa
1492  DO k=1,npz
1493  qa = 0.
1494  DO j=bd%js,bd%je
1495  DO i=bd%is,bd%ie
1496 !qA(i,j) = q(i,j,k)*area(i,j)
1497  qa = qa + q(i, j, k)*area(i, j)
1498  END DO
1499  END DO
1500  CALL mp_reduce_sum(qa)
1501  l_sum(k) = qa
1502  END DO
1503  END SUBROUTINE level_sum
1504  SUBROUTINE after_twoway_nest_update(npx, npy, npz, ng, ncnst, u, v, w&
1505 & , delz, pt, delp, q, ps, pe, pk, peln, pkz, phis, ua, va, ptop, &
1506 & gridstruct, flagstruct, domain, bd)
1507  IMPLICIT NONE
1508  TYPE(FV_GRID_BOUNDS_TYPE), INTENT(IN) :: bd
1509  REAL, INTENT(IN) :: ptop
1510  INTEGER, INTENT(IN) :: ng, npx, npy, npz
1511  INTEGER, INTENT(IN) :: ncnst
1512 ! D grid zonal wind (m/s)
1513  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed+1, npz), INTENT(INOUT) &
1514 & :: u
1515 ! D grid meridional wind (m/s)
1516  REAL, DIMENSION(bd%isd:bd%ied+1, bd%jsd:bd%jed, npz), INTENT(INOUT) &
1517 & :: v
1518 ! W (m/s)
1519  REAL, INTENT(INOUT) :: w(bd%isd:, bd%jsd:, :)
1520 ! temperature (K)
1521  REAL, INTENT(INOUT) :: pt(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1522 ! pressure thickness (pascal)
1523  REAL, INTENT(INOUT) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1524 ! specific humidity and constituents
1525  REAL, INTENT(INOUT) :: q(bd%isd:bd%ied, bd%jsd:bd%jed, npz, ncnst)
1526 ! delta-height (m); non-hydrostatic only
1527  REAL, INTENT(INOUT) :: delz(bd%isd:, bd%jsd:, :)
1528 !-----------------------------------------------------------------------
1529 ! Auxilliary pressure arrays:
1530 ! The 5 vars below can be re-computed from delp and ptop.
1531 !-----------------------------------------------------------------------
1532 ! dyn_aux:
1533 ! Surface pressure (pascal)
1534  REAL, INTENT(INOUT) :: ps(bd%isd:bd%ied, bd%jsd:bd%jed)
1535 ! edge pressure (pascal)
1536  REAL, INTENT(INOUT) :: pe(bd%is-1:bd%ie+1, npz+1, bd%js-1:bd%je+1)
1537 ! pe**cappa
1538  REAL, INTENT(INOUT) :: pk(bd%is:bd%ie, bd%js:bd%je, npz+1)
1539 ! ln(pe)
1540  REAL, INTENT(INOUT) :: peln(bd%is:bd%ie, npz+1, bd%js:bd%je)
1541 ! finite-volume mean pk
1542  REAL, INTENT(INOUT) :: pkz(bd%is:bd%ie, bd%js:bd%je, npz)
1543 !-----------------------------------------------------------------------
1544 ! Others:
1545 !-----------------------------------------------------------------------
1546 ! Surface geopotential (g*Z_surf)
1547  REAL, INTENT(INOUT) :: phis(bd%isd:bd%ied, bd%jsd:bd%jed)
1548  REAL, DIMENSION(bd%isd:bd%ied, bd%jsd:bd%jed, npz), INTENT(INOUT) ::&
1549 & ua, va
1550  TYPE(FV_GRID_TYPE), INTENT(IN) :: gridstruct
1551  TYPE(FV_FLAGS_TYPE), INTENT(IN) :: flagstruct
1552  TYPE(DOMAIN2D), INTENT(INOUT) :: domain
1553  LOGICAL :: bad_range
1554  INTEGER :: is, ie, js, je
1555  INTEGER :: isd, ied, jsd, jed
1556  is = bd%is
1557  ie = bd%ie
1558  js = bd%js
1559  je = bd%je
1560  isd = bd%isd
1561  ied = bd%ied
1562  jsd = bd%jsd
1563  jed = bd%jed
1564  CALL cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, npz, 1, &
1565 & gridstruct%grid_type, domain, gridstruct%nested, &
1566 & flagstruct%c2l_ord, bd)
1567 !To get coarse grid pkz, etc right after a two-way update so
1568 !that it is consistent across a restart:
1569 !(should only be called after doing such an update)
1570 !! CLEANUP: move to twoway_nest_update??
1571 !mountain argument not used
1572  CALL p_var(npz, is, ie, js, je, ptop, ptop_min, delp, delz, pt, ps, &
1573 & pe, peln, pk, pkz, kappa, q, ng, flagstruct%ncnst, gridstruct%&
1574 & area_64, 0., .false., .false., flagstruct%moist_phys, &
1575 & flagstruct%hydrostatic, flagstruct%nwat, domain, .false.)
1576  IF (flagstruct%range_warn) THEN
1577  CALL range_check('TA update', pt, is, ie, js, je, ng, npz, &
1578 & gridstruct%agrid, 130., 350., bad_range)
1579  CALL range_check('UA update', ua, is, ie, js, je, ng, npz, &
1580 & gridstruct%agrid, -220., 250., bad_range)
1581  CALL range_check('VA update', va, is, ie, js, je, ng, npz, &
1582 & gridstruct%agrid, -220., 220., bad_range)
1583  IF (.NOT.flagstruct%hydrostatic) CALL range_check('W update', w, &
1584 & is, ie, js, je, ng, &
1585 & npz, gridstruct%agrid&
1586 & , -50., 100., &
1587 & bad_range)
1588  END IF
1589  END SUBROUTINE after_twoway_nest_update
1590 !Routines for remapping (interpolated) nested-grid data to the coarse-grid's vertical coordinate.
1591 !This does not yet do anything for the tracers
1592  SUBROUTINE update_remap_tqw(npz, ak, bk, ps, delp, t, q, w, &
1593 & hydrostatic, kmd, ps0, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, is&
1594 & , ie, js, je, isd, ied, jsd, jed, do_q)
1595  IMPLICIT NONE
1596  INTEGER, INTENT(IN) :: npz, kmd, nq, kord_tm, kord_tr, kord_wz
1597  REAL, INTENT(IN) :: zvir, ptop
1598  REAL, INTENT(IN) :: ak(npz+1), bk(npz+1)
1599  INTEGER, INTENT(IN) :: is, ie, js, je, isd, ied, jsd, jed
1600  REAL, DIMENSION(isd:ied, jsd:jed), INTENT(IN) :: ps0
1601  REAL, DIMENSION(isd:ied, jsd:jed), INTENT(IN) :: ps
1602  REAL, DIMENSION(isd:ied, jsd:jed, npz), INTENT(IN) :: delp
1603  REAL, DIMENSION(isd:ied, jsd:jed, npz), INTENT(INOUT) :: t, w
1604  REAL, DIMENSION(isd:ied, jsd:jed, npz, nq), INTENT(INOUT) :: q
1605  LOGICAL, INTENT(IN) :: hydrostatic, do_q
1606 ! local:
1607  REAL, DIMENSION(is:ie, kmd) :: tp, qp
1608  REAL, DIMENSION(is:ie, kmd+1) :: pe0, pn0
1609  REAL, DIMENSION(is:ie, npz) :: qn1
1610  REAL, DIMENSION(is:ie, npz+1) :: pe1, pn1
1611  INTEGER :: i, j, k, iq
1612  INTRINSIC log
1613  INTRINSIC abs
1614  INTEGER :: abs0
1615 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps0,q,npz,ptop,do_q,&
1616 !$OMP t,w,ps,nq,hydrostatic,kord_tm,kord_tr,kord_wz) &
1617 !$OMP private(pe0,pn0,pe1,pn1,qp,tp,qn1)
1618  DO j=js,je
1619  DO k=1,kmd+1
1620  DO i=is,ie
1621  pe0(i, k) = ak(k) + bk(k)*ps0(i, j)
1622  pn0(i, k) = log(pe0(i, k))
1623  END DO
1624  END DO
1625  DO k=1,kmd+1
1626  DO i=is,ie
1627  pe1(i, k) = ak(k) + bk(k)*ps(i, j)
1628  pn1(i, k) = log(pe1(i, k))
1629  END DO
1630  END DO
1631  IF (do_q) THEN
1632  DO iq=1,nq
1633  DO k=1,kmd
1634  DO i=is,ie
1635  qp(i, k) = q(i, j, k, iq)
1636  END DO
1637  END DO
1638  CALL mappm(kmd, pe0, qp, npz, pe1, qn1, is, ie, 0, kord_tr, &
1639 & ptop)
1640  DO k=1,npz
1641  DO i=is,ie
1642  q(i, j, k, iq) = qn1(i, k)
1643  END DO
1644  END DO
1645  END DO
1646  END IF
1647  DO k=1,kmd
1648  DO i=is,ie
1649  tp(i, k) = t(i, j, k)
1650  END DO
1651  END DO
1652  IF (kord_tm .GE. 0.) THEN
1653  abs0 = kord_tm
1654  ELSE
1655  abs0 = -kord_tm
1656  END IF
1657 !Remap T using logp
1658  CALL mappm(kmd, pn0, tp, npz, pn1, qn1, is, ie, 1, abs0, ptop)
1659  DO k=1,npz
1660  DO i=is,ie
1661  t(i, j, k) = qn1(i, k)
1662  END DO
1663  END DO
1664  IF (.NOT.hydrostatic) THEN
1665  DO k=1,kmd
1666  DO i=is,ie
1667  tp(i, k) = w(i, j, k)
1668  END DO
1669  END DO
1670 !Remap w using p
1671 !Using iv == -1 instead of -2
1672  CALL mappm(kmd, pe0, tp, npz, pe1, qn1, is, ie, -1, kord_wz, &
1673 & ptop)
1674  DO k=1,npz
1675  DO i=is,ie
1676  w(i, j, k) = qn1(i, k)
1677  END DO
1678  END DO
1679  END IF
1680  END DO
1681  END SUBROUTINE update_remap_tqw
1682 !remap_uv as-is remaps only a-grid velocities. A new routine has been written to handle staggered grids.
1683  SUBROUTINE update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, &
1684 & is, ie, js, je, isd, ied, jsd, jed, ptop)
1685  IMPLICIT NONE
1686  INTEGER, INTENT(IN) :: npz
1687  REAL, INTENT(IN) :: ak(npz+1), bk(npz+1)
1688  INTEGER, INTENT(IN) :: is, ie, js, je, isd, ied, jsd, jed
1689  REAL, INTENT(IN) :: ps(isd:ied, jsd:jed)
1690  REAL, DIMENSION(isd:ied, jsd:jed+1, npz), INTENT(INOUT) :: u
1691  REAL, DIMENSION(isd:ied+1, jsd:jed, npz), INTENT(INOUT) :: v
1692 !
1693  INTEGER, INTENT(IN) :: kmd, kord_mt
1694  REAL, INTENT(IN) :: ptop
1695  REAL, INTENT(IN) :: ps0(isd:ied, jsd:jed)
1696 !
1697 ! local:
1698  REAL, DIMENSION(is:ie+1, kmd+1) :: pe0
1699  REAL, DIMENSION(is:ie+1, npz+1) :: pe1
1700  REAL, DIMENSION(is:ie+1, kmd) :: qt
1701  REAL, DIMENSION(is:ie+1, npz) :: qn1
1702  INTEGER :: i, j, k
1703 !------
1704 ! map u
1705 !------
1706 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,u,ptop,kord_mt) &
1707 !$OMP private(pe0,pe1,qt,qn1)
1708  DO j=js,je+1
1709 !------
1710 ! Data
1711 !------
1712  DO k=1,kmd+1
1713  DO i=is,ie
1714  pe0(i, k) = ak(k) + bk(k)*0.5*(ps0(i, j)+ps0(i, j-1))
1715  END DO
1716  END DO
1717 !------
1718 ! Model
1719 !------
1720  DO k=1,kmd+1
1721  DO i=is,ie
1722  pe1(i, k) = ak(k) + bk(k)*0.5*(ps(i, j)+ps(i, j-1))
1723  END DO
1724  END DO
1725 !------
1726 !Do map
1727 !------
1728  qt = 0.
1729  DO k=1,kmd
1730  DO i=is,ie
1731  qt(i, k) = u(i, j, k)
1732  END DO
1733  END DO
1734  qn1 = 0.
1735  CALL mappm(kmd, pe0(is:ie, :), qt(is:ie, :), npz, pe1(is:ie, :), &
1736 & qn1(is:ie, :), is, ie, -1, kord_mt, ptop)
1737  DO k=1,npz
1738  DO i=is,ie
1739  u(i, j, k) = qn1(i, k)
1740  END DO
1741  END DO
1742  END DO
1743 !------
1744 ! map v
1745 !------
1746 !$OMP parallel do default(none) shared(js,je,kmd,is,ie,ak,bk,ps,ps0,npz,v,ptop) &
1747 !$OMP private(pe0,pe1,qt,qn1)
1748  DO j=js,je
1749 !------
1750 ! Data
1751 !------
1752  DO k=1,kmd+1
1753  DO i=is,ie+1
1754  pe0(i, k) = ak(k) + bk(k)*0.5*(ps0(i, j)+ps0(i-1, j))
1755  END DO
1756  END DO
1757 !------
1758 ! Model
1759 !------
1760  DO k=1,kmd+1
1761  DO i=is,ie+1
1762  pe1(i, k) = ak(k) + bk(k)*0.5*(ps(i, j)+ps(i-1, j))
1763  END DO
1764  END DO
1765 !------
1766 !Do map
1767 !------
1768  qt = 0.
1769  DO k=1,kmd
1770  DO i=is,ie+1
1771  qt(i, k) = v(i, j, k)
1772  END DO
1773  END DO
1774  qn1 = 0.
1775  CALL mappm(kmd, pe0(is:ie+1, :), qt(is:ie+1, :), npz, pe1(is:ie+1&
1776 & , :), qn1(is:ie+1, :), is, ie + 1, -1, 8, ptop)
1777  DO k=1,npz
1778  DO i=is,ie+1
1779  v(i, j, k) = qn1(i, k)
1780  END DO
1781  END DO
1782  END DO
1783  END SUBROUTINE update_remap_uv
1784 
1785 end module fv_nesting_tlm_mod
subroutine set_nh_bcs_t0(neststruct)
type(fv_nest_bc_type_3d) vc_buf
real, parameter, public radius
Radius of the Earth [m].
Definition: constants.F90:72
subroutine, public twoway_nesting(atm, ngrids, grids_on_this_pe, zvir)
integer, parameter, public model_atmos
subroutine, public p_var(km, ifirst, ilast, jfirst, jlast, ptop, ptop_min, delp, delz, pt, ps, pe, peln, pk, pkz, cappa, q, ng, nq, area, dry_mass, adjust_dry_mass, mountain, moist_phys, hydrostatic, nwat, domain, make_nh)
type(fv_nest_bc_type_3d) pkz_buf
real, dimension(:,:,:), allocatable, target dum_east
real, dimension(:,:,:), allocatable, target dum_north
real, parameter, public ptop_min
type(fv_nest_bc_type_3d) delp_buf
subroutine setup_pt_nh_bc(pt_bc, delp_bc, delz_bc, sphum_bc, q_bc, nq, npx, npy, npz, zvir, bd)
real, dimension(:,:,:), allocatable, target dum_south
real(kind=8), parameter, public pi_8
Ratio of circle circumference to diameter [N/A].
Definition: constants.F90:73
type(fv_nest_bc_type_3d) uc_buf
real, parameter, public hlv
Latent heat of evaporation [J/kg].
Definition: constants.F90:80
subroutine, public d2c_setup(u, v, ua, va, uc, vc, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, nested, se_corner, sw_corner, ne_corner, nw_corner, rsin_u, rsin_v, cosa_s, rsin2)
type(fv_nest_bc_type_3d) v_buf
real, parameter, public rdgas
Gas constant for dry air [J/kg/deg].
Definition: constants.F90:77
subroutine set_bcs_t0(ncnst, hydrostatic, neststruct)
real, parameter, public cp_vapor
Specific heat capacity of water vapor at constant pressure [J/kg/deg].
Definition: constants.F90:89
subroutine after_twoway_nest_update(npx, npy, npz, ng, ncnst, u, v, w, delz, pt, delp, q, ps, pe, pk, peln, pkz, phis, ua, va, ptop, gridstruct, flagstruct, domain, bd)
type(fv_nest_bc_type_3d) w_buf
subroutine, public cubed_to_latlon(u, v, ua, va, gridstruct, npx, npy, km, mode, grid_type, domain, nested, c2l_ord, bd)
Definition: mpp.F90:39
type(fv_nest_bc_type_3d) pt_buf
subroutine, public nested_grid_bc_recv(nest_domain, istag, jstag, npz, bd, nest_BC_buffers)
subroutine setup_pt_bc(pt_bc, pkz_bc, sphum_bc, npx, npy, npz, zvir, bd)
integer, parameter, public f_p
subroutine update_remap_tqw(npz, ak, bk, ps, delp, t, q, w, hydrostatic, kmd, ps0, zvir, ptop, nq, kord_tm, kord_tr, kord_wz, is, ie, js, je, isd, ied, jsd, jed, do_q)
real, parameter, public rvgas
Gas constant for water vapor [J/kg/deg].
Definition: constants.F90:78
subroutine, public setup_nested_grid_bcs_tlm(npx, npy, npz, zvir, ncnst, u, u_tl, v, v_tl, w, pt, delp, delz, q, uc, uc_tl, vc, vc_tl, pkz, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, bd, nwat)
type(fv_nest_bc_type_3d) delz_buf
subroutine timing_on(blk_name)
subroutine update_remap_uv(npz, ak, bk, ps, u, v, kmd, ps0, kord_mt, is, ie, js, je, isd, ied, jsd, jed, ptop)
real, parameter, public cp_air
Specific heat capacity of dry air at constant pressure [J/kg/deg].
Definition: constants.F90:83
type(fv_nest_bc_type_3d) u_buf
subroutine, public d2a_setup(u, v, ua, va, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, nested, cosa_s, rsin2)
subroutine, public divergence_corner(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
subroutine level_sum(q, area, domain, bd, npz, l_sum)
subroutine, public d2c_setup_tlm(u, u_tl, v, v_tl, ua, va, uc, uc_tl, vc, vc_tl, dord4, isd, ied, jsd, jed, is, ie, js, je, npx, npy, grid_type, nested, se_corner, sw_corner, ne_corner, nw_corner, rsin_u, rsin_v, cosa_s, rsin2)
subroutine, public nested_grid_bc_send(var_coarse, nest_domain, istag, jstag)
real, parameter, public grav
Acceleration due to gravity [m/s^2].
Definition: constants.F90:76
real function, public g_sum(domain, p, ifirst, ilast, jfirst, jlast, ngc, area, mode, reproduce)
type(fv_nest_bc_type_3d) divg_buf
subroutine, public mappm(km, pe1, q1, kn, pe2, q2, i1, i2, iv, kord, ptop)
subroutine, public neg_adj3(is, ie, js, je, ng, kbot, hydrostatic, peln, delz, pt, dp, qv, ql, qr, qi, qs, qg, qa, check_negative)
Definition: fv_sg_nlm.F90:1132
subroutine, public divergence_corner_nest(u, v, ua, va, divg_d, gridstruct, flagstruct, bd)
type(fv_nest_bc_type_3d), dimension(:), allocatable q_buf
real, dimension(:), allocatable rw
subroutine, public nested_grid_bc_save_proc(nest_domain, ind, wt, istag, jstag, npx, npy, npz, bd, nest_BC, nest_BC_buffers, pd_in)
real, dimension(:,:), allocatable te_2d_coarse
subroutine twoway_nest_update(npx, npy, npz, zvir, ncnst, sphum, u, v, w, omga, pt, delp, q, uc, vc, pkz, delz, ps, ptop, gridstruct, flagstruct, neststruct, parent_grid, bd, conv_theta_in)
real, parameter, public kappa
RDGAS / CP_AIR [dimensionless].
Definition: constants.F90:82
real, dimension(:,:,:), allocatable, target dum_west
subroutine, public range_check(qname, q, is, ie, js, je, n_g, km, pos, q_low, q_hi, bad_range)
subroutine, public setup_nested_grid_bcs(npx, npy, npz, zvir, ncnst, u, v, w, pt, delp, delz, q, uc, vc, pkz, nested, inline_q, make_nh, ng, gridstruct, flagstruct, neststruct, nest_timestep, tracer_nest_timestep, domain, bd, nwat)
Derived type containing the data.
real, dimension(:), allocatable rf
real, dimension(:,:,:), allocatable dp1_coarse
real(fp), parameter, public pi
subroutine timing_off(blk_name)