FV3 Bundle
dyn_core_nlm.F90
Go to the documentation of this file.
1 !***********************************************************************
2 !* GNU General Public License *
3 !* This file is a part of fvGFS. *
4 !* *
5 !* fvGFS is free software; you can redistribute it and/or modify it *
6 !* and are expected to follow the terms of the GNU General Public *
7 !* License as published by the Free Software Foundation; either *
8 !* version 2 of the License, or (at your option) any later version. *
9 !* *
10 !* fvGFS is distributed in the hope that it will be useful, but *
11 !* WITHOUT ANY WARRANTY; without even the implied warranty of *
12 !* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
13 !* General Public License for more details. *
14 !* *
15 !* For the full text of the GNU General Public License, *
16 !* write to: Free Software Foundation, Inc., *
17 !* 675 Mass Ave, Cambridge, MA 02139, USA. *
18 !* or see: http://www.gnu.org/licenses/gpl.html *
19 !***********************************************************************
21 
22  use platform_mod, only: r8_kind
23  use constants_mod, only: rdgas, radius, cp_air, pi
24  use mpp_mod, only: mpp_pe
25  use mpp_domains_mod, only: cgrid_ne, dgrid_ne, mpp_get_boundary, mpp_update_domains, &
26  domain2d
27  use mpp_parameter_mod, only: corner
28  use fv_mp_nlm_mod, only: is_master
29  use fv_mp_nlm_mod, only: start_group_halo_update, complete_group_halo_update
30  use fv_mp_nlm_mod, only: group_halo_update_type
31  use sw_core_nlm_mod, only: c_sw, d_sw
33  use nh_core_nlm_mod, only: riem_solver3, riem_solver_c, update_dz_c, update_dz_d, nest_halo_nh
34  use tp_core_nlm_mod, only: copy_corners
37 #ifdef ROT3
39 #endif
40 #if defined (ADA_NUDGE)
41  use fv_ada_nudge_mod, only: breed_slp_inline_ada
42 #else
44 #endif
45  use diag_manager_mod, only: send_data
48 
50 
51 #ifdef SW_DYNAMICS
53 #endif
54 
55 implicit none
56 private
57 
59 
60  real :: ptk, peln1, rgrav
61  real :: d3_damp
62  real, allocatable, dimension(:,:,:) :: ut, vt, crx, cry, xfx, yfx, divgd, &
63  zh, du, dv, pkc, delpc, pk3, ptc, gz
64 ! real, parameter:: delt_max = 1.e-1 ! Max dissipative heating/cooling rate
65  ! 6 deg per 10-min
66  real(kind=R_GRID), parameter :: cnst_0p20=0.20d0
67 
68  real, allocatable :: rf(:)
69  logical:: rff_initialized = .false.
70  integer :: kmax=1
71 
72 contains
73 
74 !-----------------------------------------------------------------------
75 ! dyn_core :: FV Lagrangian dynamics driver
76 !-----------------------------------------------------------------------
77 
78  subroutine dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, grav, hydrostatic, &
79  u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, &
80  uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, dpx, &
81  ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, &
82  init_step, i_pack, end_step, time_total)
83  integer, intent(IN) :: npx
84  integer, intent(IN) :: npy
85  integer, intent(IN) :: npz
86  integer, intent(IN) :: ng, nq, sphum
87  integer, intent(IN) :: n_split
88  real , intent(IN) :: bdt
89  real , intent(IN) :: zvir, cp, akap, grav
90  real , intent(IN) :: ptop
91  logical, intent(IN) :: hydrostatic
92  logical, intent(IN) :: init_step, end_step
93  real, intent(in) :: pfull(npz)
94  real, intent(in), dimension(npz+1) :: ak, bk
95  integer, intent(IN) :: ks
96  type(group_halo_update_type), intent(inout) :: i_pack(*)
97  type(fv_grid_bounds_type), intent(IN) :: bd
98  real, intent(inout), dimension(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz):: u ! D grid zonal wind (m/s)
99  real, intent(inout), dimension(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz):: v ! D grid meridional wind (m/s)
100  real, intent(inout) :: w( bd%isd:,bd%jsd:,1:) ! vertical vel. (m/s)
101  real, intent(inout) :: delz(bd%isd:,bd%jsd:,1:) ! delta-height (m, negative)
102  real, intent(inout) :: cappa(bd%isd:,bd%jsd:,1:) ! moist kappa
103  real, intent(inout) :: pt( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! temperature (K)
104  real, intent(inout) :: delp(bd%isd:bd%ied ,bd%jsd:bd%jed ,npz) ! pressure thickness (pascal)
105  real, intent(inout) :: q( bd%isd:bd%ied ,bd%jsd:bd%jed ,npz, nq) !
106  real, intent(in), optional:: time_total ! total time (seconds) since start
107 
108 !-----------------------------------------------------------------------
109 ! Auxilliary pressure arrays:
110 ! The 5 vars below can be re-computed from delp and ptop.
111 !-----------------------------------------------------------------------
112 ! dyn_aux:
113  real, intent(inout):: phis(bd%isd:bd%ied,bd%jsd:bd%jed) ! Surface geopotential (g*Z_surf)
114  real, intent(inout):: pe(bd%is-1:bd%ie+1, npz+1,bd%js-1:bd%je+1) ! edge pressure (pascal)
115  real, intent(inout):: peln(bd%is:bd%ie,npz+1,bd%js:bd%je) ! ln(pe)
116  real, intent(inout):: pk(bd%is:bd%ie,bd%js:bd%je, npz+1) ! pe**kappa
117  real(kind=8), intent(inout) :: dpx(bd%is:bd%ie,bd%js:bd%je)
118 
119 !-----------------------------------------------------------------------
120 ! Others:
121  real, parameter:: near0 = 1.e-8
122 #ifdef OVERLOAD_R4
123  real, parameter:: huge_r = 1.e8
124 #else
125  real, parameter:: huge_r = 1.e40
126 #endif
127 !-----------------------------------------------------------------------
128  real, intent(out ):: ws(bd%is:bd%ie,bd%js:bd%je) ! w at surface
129  real, intent(inout):: omga(bd%isd:bd%ied,bd%jsd:bd%jed,npz) ! Vertical pressure velocity (pa/s)
130  real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz) ! (uc, vc) are mostly used as the C grid winds
131  real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
132  real, intent(inout), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
133  real, intent(inout):: q_con(bd%isd:, bd%jsd:, 1:)
134 
135 ! The Flux capacitors: accumulated Mass flux arrays
136  real, intent(inout):: mfx(bd%is:bd%ie+1, bd%js:bd%je, npz)
137  real, intent(inout):: mfy(bd%is:bd%ie , bd%js:bd%je+1, npz)
138 ! Accumulated Courant number arrays
139  real, intent(inout):: cx(bd%is:bd%ie+1, bd%jsd:bd%jed, npz)
140  real, intent(inout):: cy(bd%isd:bd%ied ,bd%js:bd%je+1, npz)
141  real, intent(inout),dimension(bd%is:bd%ie,bd%js:bd%je,npz):: pkz
142 
143  type(fv_grid_type), intent(INOUT), target :: gridstruct
144  type(fv_flags_type), intent(IN), target :: flagstruct
145  type(fv_nest_type), intent(INOUT) :: neststruct
146  type(fv_diag_type), intent(IN) :: idiag
147  type(domain2d), intent(INOUT) :: domain
148 
149  real, allocatable, dimension(:,:,:):: pem, heat_source
150 ! Auto 1D & 2D arrays:
151  real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: ws3, z_rat
152  real:: dp_ref(npz)
153  real:: zs(bd%isd:bd%ied,bd%jsd:bd%jed) ! surface height (m)
154  real:: om2d(bd%is:bd%ie,npz)
155  real wbuffer(npy+2,npz)
156  real ebuffer(npy+2,npz)
157  real nbuffer(npx+2,npz)
158  real sbuffer(npx+2,npz)
159 ! ---- For external mode:
160  real divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
161  real wk(bd%isd:bd%ied,bd%jsd:bd%jed)
162  real fz(bd%is: bd%ie+1,bd%js: bd%je+1)
163  real heat_s(bd%is:bd%ie,bd%js:bd%je)
164  real damp_vt(npz+1)
165  integer nord_v(npz+1)
166 !-------------------------------------
167  integer :: hord_m, hord_v, hord_t, hord_p
168  integer :: nord_k, nord_w, nord_t
169  integer :: ms
170 !---------------------------------------
171  integer :: i,j,k, it, iq, n_con, nf_ke
172  integer :: iep1, jep1
173  real :: beta, beta_d, d_con_k, damp_w, damp_t, kgb, cv_air
174  real :: dt, dt2, rdt
175  real :: d2_divg
176  real :: k1k, rdg, dtmp, delt
177  logical :: last_step, remap_step
178  logical used
179  real :: split_timestep_bc
180 
181  integer :: is, ie, js, je
182  integer :: isd, ied, jsd, jed
183 
184  is = bd%is
185  ie = bd%ie
186  js = bd%js
187  je = bd%je
188  isd = bd%isd
189  ied = bd%ied
190  jsd = bd%jsd
191  jed = bd%jed
192 
193 #ifdef SW_DYNAMICS
194  peln1 = 0.
195 #else
196  peln1 = log(ptop)
197 #endif
198  ptk = ptop ** akap
199  dt = bdt / real(n_split)
200  dt2 = 0.5*dt
201  rdt = 1.0/dt
202  ms = max(1, flagstruct%m_split/2)
203  beta = flagstruct%beta
204  rdg = -rdgas / grav
205  cv_air = cp_air - rdgas
206 
207 ! Indexes:
208  iep1 = ie + 1
209  jep1 = je + 1
210 
211  if ( .not.hydrostatic ) then
212 
213  rgrav = 1.0/grav
214  k1k = akap / (1.-akap) ! rg/Cv=0.4
215 
216 !$OMP parallel do default(none) shared(npz,dp_ref,ak,bk)
217  do k=1,npz
218  dp_ref(k) = (ak(k+1)-ak(k)) + (bk(k+1)-bk(k))*1.e5
219  enddo
220 
221 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,zs,phis,rgrav)
222  do j=jsd,jed
223  do i=isd,ied
224  zs(i,j) = phis(i,j) * rgrav
225  enddo
226  enddo
227  endif
228 
229  if ( init_step ) then ! Start of the big dynamic time stepping
230 
231  allocate( gz(isd:ied, jsd:jed ,npz+1) )
232  call init_ijk_mem(isd,ied, jsd,jed, npz+1, gz, huge_r)
233  allocate( pkc(isd:ied, jsd:jed ,npz+1) )
234  allocate( ptc(isd:ied, jsd:jed ,npz ) )
235  allocate( crx(is :ie+1, jsd:jed, npz) )
236  allocate( xfx(is :ie+1, jsd:jed, npz) )
237  allocate( cry(isd:ied, js :je+1, npz) )
238  allocate( yfx(isd:ied, js :je+1, npz) )
239  allocate( divgd(isd:ied+1,jsd:jed+1,npz) )
240  allocate( delpc(isd:ied, jsd:jed ,npz ) )
241 ! call init_ijk_mem(isd,ied, jsd,jed, npz, delpc, 0.)
242  allocate( ut(isd:ied, jsd:jed, npz) )
243 ! call init_ijk_mem(isd,ied, jsd,jed, npz, ut, 0.)
244  allocate( vt(isd:ied, jsd:jed, npz) )
245 ! call init_ijk_mem(isd,ied, jsd,jed, npz, vt, 0.)
246 
247  if ( .not. hydrostatic ) then
248  allocate( zh(isd:ied, jsd:jed, npz+1) )
249 ! call init_ijk_mem(isd,ied, jsd,jed, npz+1, zh, huge_r )
250  allocate ( pk3(isd:ied,jsd:jed,npz+1) )
251  call init_ijk_mem(isd,ied, jsd,jed, npz+1, pk3, huge_r )
252  endif
253  if ( beta > near0 ) then
254  allocate( du(isd:ied, jsd:jed+1,npz) )
255  call init_ijk_mem(isd,ied, jsd,jed+1, npz, du, 0.)
256  allocate( dv(isd:ied+1,jsd:jed, npz) )
257  call init_ijk_mem(isd,ied+1, jsd,jed , npz, dv, 0.)
258  endif
259  endif ! end init_step
260 
261 ! Empty the "flux capacitors"
262  call init_ijk_mem(is, ie+1, js, je, npz, mfx, 0.)
263  call init_ijk_mem(is, ie , js, je+1, npz, mfy, 0.)
264  call init_ijk_mem(is, ie+1, jsd, jed, npz, cx, 0.)
265  call init_ijk_mem(isd, ied, js, je+1, npz, cy, 0.)
266 
267  if ( flagstruct%d_con > 1.0e-5 ) then
268  allocate( heat_source(isd:ied, jsd:jed, npz) )
269  call init_ijk_mem(isd, ied, jsd, jed, npz, heat_source, 0.)
270  endif
271 
272  if ( flagstruct%convert_ke .or. flagstruct%vtdm4> 1.e-4 ) then
273  n_con = npz
274  else
275  if ( flagstruct%d2_bg_k1 < 1.e-3 ) then
276  n_con = 0
277  else
278  if ( flagstruct%d2_bg_k2 < 1.e-3 ) then
279  n_con = 1
280  else
281  n_con = 2
282  endif
283  endif
284  endif
285 
286 
287 
288 !-----------------------------------------------------
289  do it=1,n_split
290 !-----------------------------------------------------
291 #ifdef ROT3
292  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
293 #endif
294  if ( flagstruct%breed_vortex_inline .or. it==n_split ) then
295  remap_step = .true.
296  else
297  remap_step = .false.
298  endif
299 
300  if ( flagstruct%fv_debug ) then
301  if(is_master()) write(*,*) 'n_split loop, it=', it
302  if ( .not. flagstruct%hydrostatic ) &
303  call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
304  call prt_mxm('PT', pt, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
305  endif
306 
307  if (gridstruct%nested) then
308  !First split timestep has split_timestep_BC = n_split*k_split
309  ! to do time-extrapolation on BCs.
310  split_timestep_bc = real(n_split*flagstruct%k_split+neststruct%nest_timestep)
311  endif
312 
313  if ( nq > 0 ) then
314  call timing_on('COMM_TOTAL')
315  call timing_on('COMM_TRACER')
316  if ( flagstruct%inline_q ) then
317  call start_group_halo_update(i_pack(10), q, domain)
318  endif
319  call timing_off('COMM_TRACER')
320  call timing_off('COMM_TOTAL')
321  endif
322 
323  if ( .not. hydrostatic ) then
324  call timing_on('COMM_TOTAL')
325  call start_group_halo_update(i_pack(7), w, domain)
326  call timing_off('COMM_TOTAL')
327 
328  if ( it==1 ) then
329  if (gridstruct%nested) then
330 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,gz,zs,delz)
331  do j=jsd,jed
332  do i=isd,ied
333  gz(i,j,npz+1) = zs(i,j)
334  enddo
335  do k=npz,1,-1
336  do i=isd,ied
337  gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)
338  enddo
339  enddo
340  enddo
341  else
342 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zs,delz)
343  do j=js,je
344  do i=is,ie
345  gz(i,j,npz+1) = zs(i,j)
346  enddo
347  do k=npz,1,-1
348  do i=is,ie
349  gz(i,j,k) = gz(i,j,k+1) - delz(i,j,k)
350  enddo
351  enddo
352  enddo
353  endif
354  call timing_on('COMM_TOTAL')
355  call start_group_halo_update(i_pack(5), gz, domain)
356  call timing_off('COMM_TOTAL')
357  endif
358 
359  endif
360 
361 
362 #ifdef SW_DYNAMICS
363  if (test_case>1) then
364 #ifdef USE_OLD
365  if (test_case==9) call case9_forcing1(phis, time_total)
366 #endif
367 #endif
368 
369  if ( it==1 ) then
370  call timing_on('COMM_TOTAL')
371  call complete_group_halo_update(i_pack(1), domain)
372  call timing_off('COMM_TOTAL')
373  beta_d = 0.
374  else
375  beta_d = beta
376  endif
377 
378  if ( it==n_split .and. end_step ) then
379  if ( flagstruct%use_old_omega ) then
380  allocate ( pem(is-1:ie+1,npz+1,js-1:je+1) )
381 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pem,delp,ptop)
382  do j=js-1,je+1
383  do i=is-1,ie+1
384  pem(i,1,j) = ptop
385  enddo
386  do k=1,npz
387  do i=is-1,ie+1
388  pem(i,k+1,j) = pem(i,k,j) + delp(i,j,k)
389  enddo
390  enddo
391  enddo
392  endif
393  last_step = .true.
394  else
395  last_step = .false.
396  endif
397 
398  call timing_on('COMM_TOTAL')
399  call complete_group_halo_update(i_pack(8), domain)
400  if( .not. hydrostatic ) &
401  call complete_group_halo_update(i_pack(7), domain)
402  call timing_off('COMM_TOTAL')
403 
404  call timing_on('c_sw')
405 !$OMP parallel do default(none) shared(npz,isd,jsd,delpc,delp,ptc,pt,u,v,w,uc,vc,ua,va, &
406 !$OMP omga,ut,vt,divgd,flagstruct,dt2,hydrostatic,bd, &
407 !$OMP gridstruct)
408  do k=1,npz
409  call c_sw(delpc(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), &
410  pt(isd,jsd,k), u(isd,jsd,k), v(isd,jsd,k), &
411  w(isd:,jsd:,k), uc(isd,jsd,k), vc(isd,jsd,k), &
412  ua(isd,jsd,k), va(isd,jsd,k), omga(isd,jsd,k), &
413  ut(isd,jsd,k), vt(isd,jsd,k), divgd(isd,jsd,k), &
414  flagstruct%nord, dt2, hydrostatic, .true., bd, &
415  gridstruct, flagstruct)
416  enddo
417  call timing_off('c_sw')
418  if ( flagstruct%nord > 0 ) then
419  call timing_on('COMM_TOTAL')
420  call start_group_halo_update(i_pack(3), divgd, domain, position=corner)
421  call timing_off('COMM_TOTAL')
422  endif
423 
424  if (gridstruct%nested) then
426  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
427  neststruct%delp_bc, bctype=neststruct%nestbctype)
428 #ifndef SW_DYNAMICS
430  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
431  neststruct%pt_bc, bctype=neststruct%nestbctype )
432 #endif
433  endif
434  if ( hydrostatic ) then
435  call geopk(ptop, pe, peln, delpc, pkc, gz, phis, ptc, q_con, pkz, npz, akap, .true., &
436  gridstruct%nested, .false., npx, npy, flagstruct%a2b_ord, bd)
437  else
438 #ifndef SW_DYNAMICS
439  if ( it == 1 ) then
440 
441  call timing_on('COMM_TOTAL')
442  call complete_group_halo_update(i_pack(5), domain)
443  call timing_off('COMM_TOTAL')
444 
445 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
446  do k=1,npz+1
447  do j=jsd,jed
448  do i=isd,ied
449 ! Save edge heights for update_dz_d
450  zh(i,j,k) = gz(i,j,k)
451  enddo
452  enddo
453  enddo
454 
455  else
456 !$OMP parallel do default(none) shared(isd,ied,jsd,jed,npz,zh,gz)
457  do k=1, npz+1
458  do j=jsd,jed
459  do i=isd,ied
460  gz(i,j,k) = zh(i,j,k)
461  enddo
462  enddo
463  enddo
464  endif
465  call timing_on('UPDATE_DZ_C')
466  call update_dz_c(is, ie, js, je, npz, ng, dt2, dp_ref, zs, gridstruct%area, ut, vt, gz, ws3, &
467  npx, npy, gridstruct%sw_corner, gridstruct%se_corner, &
468  gridstruct%ne_corner, gridstruct%nw_corner, bd, gridstruct%grid_type)
469  call timing_off('UPDATE_DZ_C')
470 
471  call timing_on('Riem_Solver')
472  call riem_solver_c( ms, dt2, is, ie, js, je, npz, ng, &
473  akap, cappa, cp, ptop, phis, omga, ptc, &
474  q_con, delpc, gz, pkc, ws3, flagstruct%p_fac, &
475  flagstruct%a_imp, flagstruct%scale_z )
476  call timing_off('Riem_Solver')
477 
478  if (gridstruct%nested) then
479  call nested_grid_bc_apply_intt(delz, &
480  0, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
481  neststruct%delz_bc, bctype=neststruct%nestbctype )
482 
483 
484  !Compute gz/pkc
485  !NOTE: nominally only need to compute quantities one out in the halo for p_grad_c
486  !(instead of entire halo)
487  call nest_halo_nh(ptop, grav, akap, cp, delpc, delz, ptc, phis, &
488 #ifdef USE_COND
489  q_con, &
490 #ifdef MOIST_CAPPA
491  cappa, &
492 #endif
493 #endif
494  pkc, gz, pk3, &
495  npx, npy, npz, gridstruct%nested, .false., .false., .false., bd)
496 
497  endif
498 
499 #endif SW_DYNAMICS
500 
501  endif ! end hydro check
502 
503  call p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, gridstruct%rdxc, gridstruct%rdyc, hydrostatic)
504 
505  call timing_on('COMM_TOTAL')
506  call start_group_halo_update(i_pack(9), uc, vc, domain, gridtype=cgrid_ne)
507  call timing_off('COMM_TOTAL')
508 #ifdef SW_DYNAMICS
509 #ifdef USE_OLD
510  if (test_case==9) call case9_forcing2(phis)
511 #endif
512  endif !test_case>1
513 #endif
514 
515  call timing_on('COMM_TOTAL')
516  if (flagstruct%inline_q .and. nq>0) call complete_group_halo_update(i_pack(10), domain)
517  if (flagstruct%nord > 0) call complete_group_halo_update(i_pack(3), domain)
518  call complete_group_halo_update(i_pack(9), domain)
519 
520  call timing_off('COMM_TOTAL')
521  if (gridstruct%nested) then
522  !On a nested grid we have to do SOMETHING with uc and vc in
523  ! the boundary halo, particularly at the corners of the
524  ! domain and of each processor element. We must either
525  ! apply an interpolated BC, or extrapolate into the
526  ! boundary halo
527  ! NOTE:
528  !The update_domains calls for uc and vc need to go BEFORE the BCs to ensure cross-restart
529  !bitwise-consistent solutions when doing the spatial extrapolation; should not make a
530  !difference for interpolated BCs from the coarse grid.
531 
532 
533  call nested_grid_bc_apply_intt(vc, &
534  0, 1, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
535  neststruct%vc_bc, bctype=neststruct%nestbctype )
536  call nested_grid_bc_apply_intt(uc, &
537  1, 0, npx, npy, npz, bd, split_timestep_bc+0.5, real(n_split*flagstruct%k_split), &
538  neststruct%uc_bc, bctype=neststruct%nestbctype )
539 
540  !QUESTION: What to do with divgd in nested halo?
542  1, 1, npx, npy, npz, bd, split_timestep_bc, real(n_split*flagstruct%k_split), &
543  neststruct%divg_bc, bctype=neststruct%nestbctype )
544 !!$ if (is == 1 .and. js == 1) then
545 !!$ do j=jsd,5
546 !!$ write(mpp_pe()+2000,*) j, divg(isd:5,j,1)
547 !!$ endif
548 
549  end if
550 
551  if ( gridstruct%nested .and. flagstruct%inline_q ) then
552  do iq=1,nq
553  call nested_grid_bc_apply_intt(q(isd:ied,jsd:jed,:,iq), &
554  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
555  neststruct%q_bc(iq), bctype=neststruct%nestbctype )
556  end do
557  endif
558 
559  call timing_on('d_sw')
560 !$OMP parallel do default(none) shared(npz,flagstruct,nord_v,pfull,damp_vt,hydrostatic,last_step, &
561 !$OMP is,ie,js,je,isd,ied,jsd,jed,omga,delp,gridstruct,npx,npy, &
562 !$OMP ng,zh,vt,ptc,pt,u,v,w,uc,vc,ua,va,divgd,mfx,mfy,cx,cy, &
563 !$OMP crx,cry,xfx,yfx,q_con,zvir,sphum,nq,q,dt,bd,rdt,iep1,jep1, &
564 !$OMP heat_source) &
565 !$OMP private(nord_k, nord_w, nord_t, damp_w, damp_t, d2_divg, &
566 !$OMP d_con_k,kgb, hord_m, hord_v, hord_t, hord_p, wk, heat_s, z_rat)
567  do k=1,npz
568  hord_m = flagstruct%hord_mt
569  hord_t = flagstruct%hord_tm
570  hord_v = flagstruct%hord_vt
571  hord_p = flagstruct%hord_dp
572  nord_k = flagstruct%nord
573 
574 ! if ( k==npz ) then
575  kgb = flagstruct%ke_bg
576 ! else
577 ! kgb = 0.
578 ! endif
579 
580  nord_v(k) = min(2, flagstruct%nord)
581 ! d2_divg = min(0.20, flagstruct%d2_bg*(1.-3.*tanh(0.1*log(pfull(k)/pfull(npz)))))
582  d2_divg = min(0.20, flagstruct%d2_bg)
583 
584  if ( flagstruct%do_vort_damp ) then
585  damp_vt(k) = flagstruct%vtdm4 ! for delp, delz, and vorticity
586  else
587  damp_vt(k) = 0.
588  endif
589 
590  nord_w = nord_v(k)
591  nord_t = nord_v(k)
592  damp_w = damp_vt(k)
593  damp_t = damp_vt(k)
594  d_con_k = flagstruct%d_con
595 
596  if ( npz==1 .or. flagstruct%n_sponge<0 ) then
597  d2_divg = flagstruct%d2_bg
598  else
599 ! Sponge layers with del-2 damping on divergence, vorticity, w, z, and air mass (delp).
600 ! no special damping of potential temperature in sponge layers
601  if ( k==1 ) then
602 ! Divergence damping:
603  nord_k=0; d2_divg = max(0.01, flagstruct%d2_bg, flagstruct%d2_bg_k1)
604 ! Vertical velocity:
605  nord_w=0; damp_w = d2_divg
606  if ( flagstruct%do_vort_damp ) then
607 ! damping on delp and vorticity:
608  nord_v(k)=0;
609 #ifndef HIWPP
610  damp_vt(k) = 0.5*d2_divg
611 #endif
612  endif
613  d_con_k = 0.
614  elseif ( k==max(2,flagstruct%n_sponge-1) .and. flagstruct%d2_bg_k2>0.01 ) then
615  nord_k=0; d2_divg = max(flagstruct%d2_bg, flagstruct%d2_bg_k2)
616  nord_w=0; damp_w = d2_divg
617  if ( flagstruct%do_vort_damp ) then
618  nord_v(k)=0;
619 #ifndef HIWPP
620  damp_vt(k) = 0.5*d2_divg
621 #endif
622  endif
623  d_con_k = 0.
624  elseif ( k==max(3,flagstruct%n_sponge) .and. flagstruct%d2_bg_k2>0.05 ) then
625  nord_k=0; d2_divg = max(flagstruct%d2_bg, 0.2*flagstruct%d2_bg_k2)
626  nord_w=0; damp_w = d2_divg
627  d_con_k = 0.
628  endif
629  endif
630 
631  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
632 ! Average horizontal "convergence" to cell center
633  do j=js,je
634  do i=is,ie
635  omga(i,j,k) = delp(i,j,k)
636  enddo
637  enddo
638  endif
639 
640 !--- external mode divergence damping ---
641  if ( flagstruct%d_ext > 0. ) &
642  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, &
643  ie, js, je, ng, .false.)
644 
645  if ( .not.hydrostatic .and. flagstruct%do_f3d ) then
646 ! Correction factor for 3D Coriolis force
647  do j=jsd,jed
648  do i=isd,ied
649  z_rat(i,j) = 1. + (zh(i,j,k)+zh(i,j,k+1))/radius
650  enddo
651  enddo
652  endif
653  call d_sw(vt(isd,jsd,k), delp(isd,jsd,k), ptc(isd,jsd,k), pt(isd,jsd,k), &
654  u(isd,jsd,k), v(isd,jsd,k), w(isd:,jsd:,k), uc(isd,jsd,k), &
655  vc(isd,jsd,k), ua(isd,jsd,k), va(isd,jsd,k), divgd(isd,jsd,k), &
656  mfx(is, js, k), mfy(is, js, k), cx(is, jsd,k), cy(isd,js, k), &
657  crx(is, jsd,k), cry(isd,js, k), xfx(is, jsd,k), yfx(isd,js, k), &
658 #ifdef USE_COND
659  q_con(isd:,jsd:,k), z_rat(isd,jsd), &
660 #else
661  q_con(isd:,jsd:,1), z_rat(isd,jsd), &
662 #endif
663  kgb, heat_s, dpx, zvir, sphum, nq, q, k, npz, flagstruct%inline_q, dt, &
664  flagstruct%hord_tr, hord_m, hord_v, hord_t, hord_p, &
665  nord_k, nord_v(k), nord_w, nord_t, flagstruct%dddmp, d2_divg, flagstruct%d4_bg, &
666  damp_vt(k), damp_w, damp_t, d_con_k, hydrostatic, gridstruct, flagstruct, bd)
667 
668  if( hydrostatic .and. (.not.flagstruct%use_old_omega) .and. last_step ) then
669 ! Average horizontal "convergence" to cell center
670  do j=js,je
671  do i=is,ie
672  omga(i,j,k) = omga(i,j,k)*(xfx(i,j,k)-xfx(i+1,j,k)+yfx(i,j,k)-yfx(i,j+1,k))*gridstruct%rarea(i,j)*rdt
673  enddo
674  enddo
675  endif
676 
677  if ( flagstruct%d_ext > 0. ) then
678  do j=js,jep1
679  do i=is,iep1
680  ptc(i,j,k) = wk(i,j) ! delp at cell corners
681  enddo
682  enddo
683  endif
684  if ( flagstruct%d_con > 1.0e-5 ) then
685 ! Average horizontal "convergence" to cell center
686  do j=js,je
687  do i=is,ie
688  heat_source(i,j,k) = heat_source(i,j,k) + heat_s(i,j)
689  enddo
690  enddo
691  endif
692  enddo ! end openMP k-loop
693 
694  call timing_off('d_sw')
695 
696  if( flagstruct%fill_dp ) call mix_dp(hydrostatic, w, delp, pt, npz, ak, bk, .false., flagstruct%fv_debug, bd)
697 
698  call timing_on('COMM_TOTAL')
699  call start_group_halo_update(i_pack(1), delp, domain, complete=.false.)
700  call start_group_halo_update(i_pack(1), pt, domain, complete=.true.)
701 #ifdef USE_COND
702  call start_group_halo_update(i_pack(11), q_con, domain)
703 #endif
704  call timing_off('COMM_TOTAL')
705 
706  if ( flagstruct%d_ext > 0. ) then
707  d2_divg = flagstruct%d_ext * gridstruct%da_min_c
708 !$OMP parallel do default(none) shared(is,iep1,js,jep1,npz,wk,ptc,divg2,vt,d2_divg)
709  do j=js,jep1
710  do i=is,iep1
711  wk(i,j) = ptc(i,j,1)
712  divg2(i,j) = wk(i,j)*vt(i,j,1)
713  enddo
714  do k=2,npz
715  do i=is,iep1
716  wk(i,j) = wk(i,j) + ptc(i,j,k)
717  divg2(i,j) = divg2(i,j) + ptc(i,j,k)*vt(i,j,k)
718  enddo
719  enddo
720  do i=is,iep1
721  divg2(i,j) = d2_divg*divg2(i,j)/wk(i,j)
722  enddo
723  enddo
724  else
725  divg2(:,:) = 0.
726  endif
727 
728  call timing_on('COMM_TOTAL')
729  call complete_group_halo_update(i_pack(1), domain)
730 #ifdef USE_COND
731  call complete_group_halo_update(i_pack(11), domain)
732 #endif
733  call timing_off('COMM_TOTAL')
734  if ( flagstruct%fv_debug ) then
735  if ( .not. flagstruct%hydrostatic ) &
736  call prt_mxm('delz', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
737  endif
738 
739  !Want to move this block into the hydro/nonhydro branch above and merge the two if structures
740  if (gridstruct%nested) then
741  call nested_grid_bc_apply_intt(delp, &
742  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
743  neststruct%delp_bc, bctype=neststruct%nestbctype )
744 
745 #ifndef SW_DYNAMICS
746 
747  call nested_grid_bc_apply_intt(pt, &
748  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
749  neststruct%pt_bc, bctype=neststruct%nestbctype )
750 
751 #ifdef USE_COND
752  call nested_grid_bc_apply_intt(q_con, &
753  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
754  neststruct%q_con_bc, bctype=neststruct%nestbctype )
755 #endif
756 
757 #endif
758 
759  end if
760  if ( hydrostatic ) then
761  call geopk(ptop, pe, peln, delp, pkc, gz, phis, pt, q_con, pkz, npz, akap, .false., &
762  gridstruct%nested, .true., npx, npy, flagstruct%a2b_ord, bd)
763  else
764 #ifndef SW_DYNAMICS
765  call timing_on('UPDATE_DZ')
766  call update_dz_d(nord_v, damp_vt, flagstruct%hord_tm, is, ie, js, je, npz, ng, npx, npy, gridstruct%area, &
767  gridstruct%rarea, dp_ref, zs, zh, crx, cry, xfx, yfx, delz, ws, rdt, gridstruct, bd)
768  call timing_off('UPDATE_DZ')
769  if ( flagstruct%fv_debug ) then
770  if ( .not. flagstruct%hydrostatic ) &
771  call prt_mxm('delz updated', delz, is, ie, js, je, ng, npz, 1., gridstruct%area_64, domain)
772  endif
773 
774  if (idiag%id_ws>0 .and. last_step) then
775 ! call prt_maxmin('WS', ws, is, ie, js, je, 0, 1, 1., master)
776  used=send_data(idiag%id_ws, ws, fv_time)
777  endif
778 
779 
780 
781 
782 
783  call timing_on('Riem_Solver')
784  call riem_solver3(flagstruct%m_split, dt, is, ie, js, je, npz, ng, &
785  isd, ied, jsd, jed, &
786  akap, cappa, cp, ptop, zs, q_con, w, delz, pt, delp, zh, &
787  pe, pkc, pk3, pk, peln, ws, &
788  flagstruct%scale_z, flagstruct%p_fac, flagstruct%a_imp, &
789  flagstruct%use_logp, remap_step, beta<-0.1)
790  call timing_off('Riem_Solver')
791  call timing_on('COMM_TOTAL')
792  if ( gridstruct%square_domain ) then
793  call start_group_halo_update(i_pack(4), zh , domain)
794  call start_group_halo_update(i_pack(5), pkc, domain, whalo=2, ehalo=2, shalo=2, nhalo=2)
795  else
796  call start_group_halo_update(i_pack(4), zh , domain, complete=.false.)
797  call start_group_halo_update(i_pack(4), pkc, domain, complete=.true.)
798  endif
799  call timing_off('COMM_TOTAL')
800  if ( remap_step ) &
801  call pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
802 
803  if ( flagstruct%use_logp ) then
804  call pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
805  else
806  call pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
807  endif
808  if (gridstruct%nested) then
809  call nested_grid_bc_apply_intt(delz, &
810  0, 0, npx, npy, npz, bd, split_timestep_bc+1., real(n_split*flagstruct%k_split), &
811  neststruct%delz_bc, bctype=neststruct%nestbctype )
812 
813  !Compute gz/pkc/pk3; note that now pkc should be nonhydro pert'n pressure
814  call nest_halo_nh(ptop, grav, akap, cp, delp, delz, pt, phis, &
815 #ifdef USE_COND
816  q_con, &
817 #ifdef MOIST_CAPPA
818  cappa, &
819 #endif
820 #endif
821  pkc, gz, pk3, npx, npy, npz, gridstruct%nested, .true., .true., .true., bd)
822 
823  endif
824  call timing_on('COMM_TOTAL')
825  call complete_group_halo_update(i_pack(4), domain)
826  call timing_off('COMM_TOTAL')
827 !$OMP parallel do default(none) shared(is,ie,js,je,npz,gz,zh,grav)
828  do k=1,npz+1
829  do j=js-2,je+2
830  do i=is-2,ie+2
831  gz(i,j,k) = zh(i,j,k)*grav
832  enddo
833  enddo
834  enddo
835  if ( gridstruct%square_domain ) then
836  call timing_on('COMM_TOTAL')
837  call complete_group_halo_update(i_pack(5), domain)
838  call timing_off('COMM_TOTAL')
839  endif
840 #endif SW_DYNAMICS
841  endif ! end hydro check
842 
843 #ifdef SW_DYNAMICS
844  if (test_case > 1) then
845 #else
846  if ( remap_step .and. hydrostatic ) then
847 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,pkc)
848  do k=1,npz+1
849  do j=js,je
850  do i=is,ie
851  pk(i,j,k) = pkc(i,j,k)
852  enddo
853  enddo
854  enddo
855  endif
856 #endif
857 
858 !----------------------------
859 ! Compute pressure gradient:
860 !----------------------------
861  call timing_on('PG_D')
862  if ( hydrostatic ) then
863  if ( beta > 0. ) then
864  call grad1_p_update(divg2, u, v, pkc, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta_d, flagstruct%a2b_ord)
865  else
866  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
867  endif
868 
869  else
870 
871 
872  if ( beta > 0. ) then
873  call split_p_grad( u, v, pkc, gz, delp, pk3, beta_d, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
874  elseif ( beta < -0.1 ) then
875  call one_grad_p(u, v, pkc, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, flagstruct%a2b_ord, flagstruct%d_ext)
876  else
877  call nh_p_grad(u, v, pkc, gz, delp, pk3, dt, ng, gridstruct, bd, npx, npy, npz, flagstruct%use_logp)
878  endif
879 
880 #ifdef ROT3
881  if ( flagstruct%do_f3d ) then
882 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,gridstruct,w,va,isd,ied,jsd,jed)
883  do k=1,npz
884  do j=js,je
885  do i=is,ie
886  ua(i,j,k) = -gridstruct%w00(i,j)*w(i,j,k)
887  enddo
888  enddo
889  do j=jsd,jed
890  do i=isd,ied
891  va(i,j,k) = 0.
892  enddo
893  enddo
894  enddo
895  call mpp_update_domains(ua, domain, complete=.true.)
896  call update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, ua, va, u, v, gridstruct, npx, npy, npz, domain)
897  endif
898 #endif
899  endif
900  call timing_off('PG_D')
901 
902 ! Inline Rayleigh friction here?
903 #ifdef USE_SUPER_RAY
904  if( flagstruct%tau > 0. ) &
905  call rayleigh_fast(abs(dt), npx, npy, npz, pfull, flagstruct%tau, u, v, w, ptop, hydrostatic, flagstruct%rf_cutoff, bd)
906 #endif
907 
908 !-------------------------------------------------------------------------------------------------------
909  if ( flagstruct%breed_vortex_inline ) then
910  if ( .not. hydrostatic ) then
911 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pkz,cappa,rdg,delp,delz,pt,k1k)
912  do k=1,npz
913  do j=js,je
914  do i=is,ie
915 ! Note: pt at this stage is Theta_m
916 #ifdef MOIST_CAPPA
917  pkz(i,j,k) = exp(cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
918 #else
919  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
920 #endif
921  enddo
922  enddo
923  enddo
924  endif
925 #if defined (ADA_NUDGE)
926  call breed_slp_inline_ada( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, &
927  delp, u, v, pt, q, flagstruct%nwat, zvir, gridstruct, ks, domain, bd )
928 #else
929  call breed_slp_inline( it, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, &
930  flagstruct%nwat, zvir, gridstruct, ks, domain, bd, hydrostatic )
931 #endif
932  endif
933 !-------------------------------------------------------------------------------------------------------
934 
935  call timing_on('COMM_TOTAL')
936  if( it==n_split .and. gridstruct%grid_type<4 .and. .not. gridstruct%nested) then
937 ! Prevent accumulation of rounding errors at overlapped domain edges:
938  call mpp_get_boundary(u, v, domain, ebuffery=ebuffer, &
939  nbufferx=nbuffer, gridtype=dgrid_ne )
940 !$OMP parallel do default(none) shared(is,ie,js,je,npz,u,nbuffer,v,ebuffer)
941  do k=1,npz
942  do i=is,ie
943  u(i,je+1,k) = nbuffer(i-is+1,k)
944  enddo
945  do j=js,je
946  v(ie+1,j,k) = ebuffer(j-js+1,k)
947  enddo
948  enddo
949 
950  endif
951 
952 #ifndef ROT3
953  if ( it/=n_split) &
954  call start_group_halo_update(i_pack(8), u, v, domain, gridtype=dgrid_ne)
955 #endif
956  call timing_off('COMM_TOTAL')
957 
958 #ifdef SW_DYNAMICS
959  endif
960 #endif
961  if ( gridstruct%nested ) then
962  neststruct%nest_timestep = neststruct%nest_timestep + 1
963  endif
964 
965 #ifdef SW_DYNAMICS
966 #else
967  if ( hydrostatic .and. last_step ) then
968  if ( flagstruct%use_old_omega ) then
969 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga,pe,pem,rdt)
970  do k=1,npz
971  do j=js,je
972  do i=is,ie
973  omga(i,j,k) = (pe(i,k+1,j) - pem(i,k+1,j)) * rdt
974  enddo
975  enddo
976  enddo
977 !------------------------------
978 ! Compute the "advective term"
979 !------------------------------
980  call adv_pe(ua, va, pem, omga, gridstruct, bd, npx, npy, npz, ng)
981  else
982 !$OMP parallel do default(none) shared(is,ie,js,je,npz,omga) private(om2d)
983  do j=js,je
984  do k=1,npz
985  do i=is,ie
986  om2d(i,k) = omga(i,j,k)
987  enddo
988  enddo
989  do k=2,npz
990  do i=is,ie
991  om2d(i,k) = om2d(i,k-1) + omga(i,j,k)
992  enddo
993  enddo
994  do k=2,npz
995  do i=is,ie
996  omga(i,j,k) = om2d(i,k)
997  enddo
998  enddo
999  enddo
1000  endif
1001  if (idiag%id_ws>0 .and. hydrostatic) then
1002 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ws,delz,delp,omga)
1003  do j=js,je
1004  do i=is,ie
1005  ws(i,j) = delz(i,j,npz)/delp(i,j,npz) * omga(i,j,npz)
1006  enddo
1007  enddo
1008  used=send_data(idiag%id_ws, ws, fv_time)
1009  endif
1010  endif
1011 #endif
1012 
1013  if (gridstruct%nested) then
1014 
1015 
1016 
1017 #ifndef SW_DYNAMICS
1018  if (.not. hydrostatic) then
1019  call nested_grid_bc_apply_intt(w, &
1020  0, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1021  neststruct%w_bc, bctype=neststruct%nestbctype )
1022  end if
1023 #endif SW_DYNAMICS
1024  call nested_grid_bc_apply_intt(u, &
1025  0, 1, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1026  neststruct%u_bc, bctype=neststruct%nestbctype )
1027  call nested_grid_bc_apply_intt(v, &
1028  1, 0, npx, npy, npz, bd, split_timestep_bc+1, real(n_split*flagstruct%k_split), &
1029  neststruct%v_bc, bctype=neststruct%nestbctype )
1030 
1031  end if
1032 
1033 !-----------------------------------------------------
1034  enddo ! time split loop
1035 !-----------------------------------------------------
1036  if ( nq > 0 .and. .not. flagstruct%inline_q ) then
1037  call timing_on('COMM_TOTAL')
1038  call timing_on('COMM_TRACER')
1039  call start_group_halo_update(i_pack(10), q, domain)
1040  call timing_off('COMM_TRACER')
1041  call timing_off('COMM_TOTAL')
1042  endif
1043 
1044  if ( flagstruct%fv_debug ) then
1045  if(is_master()) write(*,*) 'End of n_split loop'
1046  endif
1047 
1048 
1049  if ( n_con/=0 .and. flagstruct%d_con > 1.e-5 ) then
1050  nf_ke = min(3, flagstruct%nord+1)
1051  call del2_cubed(heat_source, cnst_0p20*gridstruct%da_min, gridstruct, domain, npx, npy, npz, nf_ke, bd)
1052 
1053 ! Note: pt here is cp*(Virtual_Temperature/pkz)
1054  if ( hydrostatic ) then
1055 !
1056 ! del(Cp*T) = - del(KE)
1057 !
1058 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pt,heat_source,delp,pkz,bdt) &
1059 !$OMP private(dtmp)
1060  do j=js,je
1061  do k=1,n_con ! n_con is usually less than 3;
1062  if ( k<3 ) then
1063  do i=is,ie
1064  pt(i,j,k) = pt(i,j,k) + heat_source(i,j,k)/(cp_air*delp(i,j,k)*pkz(i,j,k))
1065  enddo
1066  else
1067  do i=is,ie
1068  dtmp = heat_source(i,j,k) / (cp_air*delp(i,j,k))
1069  pt(i,j,k) = pt(i,j,k) + sign(min(abs(bdt)*flagstruct%delt_max,abs(dtmp)), dtmp)/pkz(i,j,k)
1070  enddo
1071  endif
1072  enddo
1073  enddo
1074  else
1075 !$OMP parallel do default(none) shared(flagstruct,is,ie,js,je,n_con,pkz,cappa,rdg,delp,delz,pt, &
1076 !$OMP heat_source,k1k,cv_air,bdt) &
1077 !$OMP private(dtmp, delt)
1078  do k=1,n_con
1079  delt = abs(bdt*flagstruct%delt_max)
1080 ! Sponge layers:
1081 ! if ( k == 1 ) delt = 2.0*delt
1082 ! if ( k == 2 ) delt = 1.5*delt
1083  do j=js,je
1084  do i=is,ie
1085 #ifdef MOIST_CAPPA
1086  pkz(i,j,k) = exp( cappa(i,j,k)/(1.-cappa(i,j,k))*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1087 #else
1088  pkz(i,j,k) = exp( k1k*log(rdg*delp(i,j,k)/delz(i,j,k)*pt(i,j,k)) )
1089 #endif
1090  dtmp = heat_source(i,j,k) / (cv_air*delp(i,j,k))
1091  pt(i,j,k) = pt(i,j,k) + sign(min(delt, abs(dtmp)),dtmp) / pkz(i,j,k)
1092  enddo
1093  enddo
1094  enddo
1095  endif
1096 
1097  endif
1098  if (allocated(heat_source)) deallocate( heat_source ) !If ncon == 0 but d_con > 1.e-5, this would not be deallocated in earlier versions of the code
1099 
1100 
1101  if ( end_step ) then
1102  deallocate( gz )
1103  deallocate( ptc )
1104  deallocate( crx )
1105  deallocate( xfx )
1106  deallocate( cry )
1107  deallocate( yfx )
1108  deallocate( divgd )
1109  deallocate( pkc )
1110  deallocate( delpc )
1111 
1112  if( allocated(ut)) deallocate( ut )
1113  if( allocated(vt)) deallocate( vt )
1114  if ( allocated (du) ) deallocate( du )
1115  if ( allocated (dv) ) deallocate( dv )
1116  if ( .not. hydrostatic ) then
1117  deallocate( zh )
1118  if( allocated(pk3) ) deallocate ( pk3 )
1119  endif
1120 
1121  endif
1122  if( allocated(pem) ) deallocate ( pem )
1123 
1124 end subroutine dyn_core
1125 
1126 subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
1127 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1128 real, intent(in):: ptop, akap
1129 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1130 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1131 ! Local:
1132 real:: pei(isd:ied)
1133 real:: pej(jsd:jed)
1134 integer:: i,j,k
1135 
1136 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3,akap) &
1137 !$OMP private(pei)
1138  do j=js,je
1139  pei(is-2) = ptop
1140  pei(is-1) = ptop
1141  do k=1,npz
1142  pei(is-2) = pei(is-2) + delp(is-2,j,k)
1143  pei(is-1) = pei(is-1) + delp(is-1,j,k)
1144  pk3(is-2,j,k+1) = exp(akap*log(pei(is-2)))
1145  pk3(is-1,j,k+1) = exp(akap*log(pei(is-1)))
1146  enddo
1147  pei(ie+1) = ptop
1148  pei(ie+2) = ptop
1149  do k=1,npz
1150  pei(ie+1) = pei(ie+1) + delp(ie+1,j,k)
1151  pei(ie+2) = pei(ie+2) + delp(ie+2,j,k)
1152  pk3(ie+1,j,k+1) = exp(akap*log(pei(ie+1)))
1153  pk3(ie+2,j,k+1) = exp(akap*log(pei(ie+2)))
1154  enddo
1155  enddo
1156 
1157 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3,akap) &
1158 !$OMP private(pej)
1159  do i=is-2,ie+2
1160  pej(js-2) = ptop
1161  pej(js-1) = ptop
1162  do k=1,npz
1163  pej(js-2) = pej(js-2) + delp(i,js-2,k)
1164  pej(js-1) = pej(js-1) + delp(i,js-1,k)
1165  pk3(i,js-2,k+1) = exp(akap*log(pej(js-2)))
1166  pk3(i,js-1,k+1) = exp(akap*log(pej(js-1)))
1167  enddo
1168  pej(je+1) = ptop
1169  pej(je+2) = ptop
1170  do k=1,npz
1171  pej(je+1) = pej(je+1) + delp(i,je+1,k)
1172  pej(je+2) = pej(je+2) + delp(i,je+2,k)
1173  pk3(i,je+1,k+1) = exp(akap*log(pej(je+1)))
1174  pk3(i,je+2,k+1) = exp(akap*log(pej(je+2)))
1175  enddo
1176  enddo
1177 
1178 end subroutine pk3_halo
1179 
1180 subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
1181 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1182 real, intent(in):: ptop
1183 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1184 real, intent(inout), dimension(isd:ied,jsd:jed,npz+1):: pk3
1185 ! Local:
1186 real:: pet
1187 integer:: i,j,k
1188 
1189 !$OMP parallel do default(none) shared(is,ie,js,je,isd,ied,npz,ptop,delp,pk3) &
1190 !$OMP private(pet)
1191  do j=js,je
1192  do i=is-2,is-1
1193  pet = ptop
1194  do k=1,npz
1195  pet = pet + delp(i,j,k)
1196  pk3(i,j,k+1) = log(pet)
1197  enddo
1198  enddo
1199  do i=ie+1,ie+2
1200  pet = ptop
1201  do k=1,npz
1202  pet = pet + delp(i,j,k)
1203  pk3(i,j,k+1) = log(pet)
1204  enddo
1205  enddo
1206  enddo
1207 
1208 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ptop,delp,pk3) &
1209 !$OMP private(pet)
1210  do i=is-2,ie+2
1211  do j=js-2,js-1
1212  pet = ptop
1213  do k=1,npz
1214  pet = pet + delp(i,j,k)
1215  pk3(i,j,k+1) = log(pet)
1216  enddo
1217  enddo
1218  do j=je+1,je+2
1219  pet = ptop
1220  do k=1,npz
1221  pet = pet + delp(i,j,k)
1222  pk3(i,j,k+1) = log(pet)
1223  enddo
1224  enddo
1225  enddo
1226 
1227 end subroutine pln_halo
1228 
1229 subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
1230 integer, intent(in):: is, ie, js, je, isd, ied, jsd, jed, npz
1231 real, intent(in):: ptop
1232 real, intent(in ), dimension(isd:ied,jsd:jed,npz):: delp
1233 real, intent(inout), dimension(is-1:ie+1,npz+1,js-1:je+1):: pe
1234 ! Local:
1235 integer:: i,j,k
1236 
1237 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1238  do j=js,je
1239  pe(is-1,1,j) = ptop
1240  pe(ie+1,1,j) = ptop
1241  do k=1,npz
1242  pe(is-1,k+1,j) = pe(is-1,k,j) + delp(is-1,j,k)
1243  pe(ie+1,k+1,j) = pe(ie+1,k,j) + delp(ie+1,j,k)
1244  enddo
1245  enddo
1246 
1247 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pe,delp,ptop)
1248  do i=is-1,ie+1
1249  pe(i,1,js-1) = ptop
1250  pe(i,1,je+1) = ptop
1251  do k=1,npz
1252  pe(i,k+1,js-1) = pe(i,k,js-1) + delp(i,js-1,k)
1253  pe(i,k+1,je+1) = pe(i,k,je+1) + delp(i,je+1,k)
1254  enddo
1255  enddo
1256 
1257 end subroutine pe_halo
1258 
1259 
1260 subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
1262 integer, intent(in) :: npx, npy, npz, ng
1263 type(fv_grid_bounds_type), intent(IN) :: bd
1264 ! Contra-variant wind components:
1265 real, intent(in), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,npz):: ua, va
1266 ! Pressure at edges:
1267 real, intent(in) :: pem(bd%is-1:bd%ie+1,1:npz+1,bd%js-1:bd%je+1)
1268 real, intent(inout) :: om(bd%isd:bd%ied,bd%jsd:bd%jed,npz)
1269 type(fv_grid_type), intent(INOUT), target :: gridstruct
1270 
1271 ! Local:
1272 real, dimension(bd%is:bd%ie,bd%js:bd%je):: up, vp
1273 real v3(3,bd%is:bd%ie,bd%js:bd%je)
1274 
1275 real pin(bd%isd:bd%ied,bd%jsd:bd%jed)
1276 real pb(bd%isd:bd%ied,bd%jsd:bd%jed)
1277 
1278 real grad(3,bd%is:bd%ie,bd%js:bd%je)
1279 real pdx(3,bd%is:bd%ie,bd%js:bd%je+1)
1280 real pdy(3,bd%is:bd%ie+1,bd%js:bd%je)
1281 
1282 integer :: i,j,k, n
1283 integer :: is, ie, js, je
1284 
1285  is = bd%is
1286  ie = bd%ie
1287  js = bd%js
1288  je = bd%je
1289 
1290 !$OMP parallel do default(none) shared(is,ie,js,je,npz,ua,va,gridstruct,pem,npx,npy,ng,om) &
1291 !$OMP private(n, pdx, pdy, pin, pb, up, vp, grad, v3)
1292 do k=1,npz
1293  if ( k==npz ) then
1294  do j=js,je
1295  do i=is,ie
1296  up(i,j) = ua(i,j,npz)
1297  vp(i,j) = va(i,j,npz)
1298  enddo
1299  enddo
1300  else
1301  do j=js,je
1302  do i=is,ie
1303  up(i,j) = 0.5*(ua(i,j,k)+ua(i,j,k+1))
1304  vp(i,j) = 0.5*(va(i,j,k)+va(i,j,k+1))
1305  enddo
1306  enddo
1307  endif
1308 
1309  ! Compute Vect wind:
1310  do j=js,je
1311  do i=is,ie
1312  do n=1,3
1313  v3(n,i,j) = up(i,j)*gridstruct%ec1(n,i,j) + vp(i,j)*gridstruct%ec2(n,i,j)
1314  enddo
1315  enddo
1316  enddo
1317 
1318  do j=js-1,je+1
1319  do i=is-1,ie+1
1320  pin(i,j) = pem(i,k+1,j)
1321  enddo
1322  enddo
1323 
1324  ! Compute pe at 4 cell corners:
1325  call a2b_ord2(pin, pb, gridstruct, npx, npy, is, ie, js, je, ng)
1326 
1327 
1328  do j=js,je+1
1329  do i=is,ie
1330  do n=1,3
1331  pdx(n,i,j) = (pb(i,j)+pb(i+1,j))*gridstruct%dx(i,j)*gridstruct%en1(n,i,j)
1332  enddo
1333  enddo
1334  enddo
1335  do j=js,je
1336  do i=is,ie+1
1337  do n=1,3
1338  pdy(n,i,j) = (pb(i,j)+pb(i,j+1))*gridstruct%dy(i,j)*gridstruct%en2(n,i,j)
1339  enddo
1340  enddo
1341  enddo
1342 
1343  ! Compute grad (pe) by Green's theorem
1344  do j=js,je
1345  do i=is,ie
1346  do n=1,3
1347  grad(n,i,j) = pdx(n,i,j+1) - pdx(n,i,j) - pdy(n,i,j) + pdy(n,i+1,j)
1348  enddo
1349  enddo
1350  enddo
1351 
1352  ! Compute inner product: V3 * grad (pe)
1353  do j=js,je
1354  do i=is,ie
1355  om(i,j,k) = om(i,j,k) + 0.5*gridstruct%rarea(i,j)*(v3(1,i,j)*grad(1,i,j) + &
1356  v3(2,i,j)*grad(2,i,j) + v3(3,i,j)*grad(3,i,j))
1357  enddo
1358  enddo
1359 enddo
1360 
1361 end subroutine adv_pe
1362 
1363 
1364 
1365 
1366 subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
1368 integer, intent(in):: npz
1369 real, intent(in):: dt2
1370 type(fv_grid_bounds_type), intent(IN) :: bd
1371 real, intent(in), dimension(bd%isd:, bd%jsd: ,: ):: delpc
1372 ! pkc is pe**cappa if hydrostatic
1373 ! pkc is full pressure if non-hydrostatic
1374 real, intent(in), dimension(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1):: pkc, gz
1375 real, intent(inout):: uc(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1376 real, intent(inout):: vc(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1377 real, intent(IN) :: rdxc(bd%isd:bd%ied+1,bd%jsd:bd%jed+1)
1378 real, intent(IN) :: rdyc(bd%isd:bd%ied ,bd%jsd:bd%jed)
1379 logical, intent(in):: hydrostatic
1380 ! Local:
1381 real:: wk(bd%is-1:bd%ie+1,bd%js-1:bd%je+1)
1382 integer:: i,j,k
1383 
1384 integer :: is, ie, js, je
1385 
1386  is = bd%is
1387  ie = bd%ie
1388  js = bd%js
1389  je = bd%je
1390 
1391 !$OMP parallel do default(none) shared(is,ie,js,je,npz,hydrostatic,pkc,delpc,uc,dt2,rdxc,gz,vc,rdyc) &
1392 !$OMP private(wk)
1393 do k=1,npz
1394 
1395  if ( hydrostatic ) then
1396  do j=js-1,je+1
1397  do i=is-1,ie+1
1398  wk(i,j) = pkc(i,j,k+1) - pkc(i,j,k)
1399  enddo
1400  enddo
1401  else
1402  do j=js-1,je+1
1403  do i=is-1,ie+1
1404  wk(i,j) = delpc(i,j,k)
1405  enddo
1406  enddo
1407  endif
1408 
1409  do j=js,je
1410  do i=is,ie+1
1411  uc(i,j,k) = uc(i,j,k) + dt2*rdxc(i,j) / (wk(i-1,j)+wk(i,j)) * &
1412  ( (gz(i-1,j,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i-1,j,k)) &
1413  + (gz(i-1,j,k) - gz(i,j,k+1))*(pkc(i-1,j,k+1)-pkc(i,j,k)) )
1414  enddo
1415  enddo
1416  do j=js,je+1
1417  do i=is,ie
1418  vc(i,j,k) = vc(i,j,k) + dt2*rdyc(i,j) / (wk(i,j-1)+wk(i,j)) * &
1419  ( (gz(i,j-1,k+1)-gz(i,j,k ))*(pkc(i,j,k+1)-pkc(i,j-1,k)) &
1420  + (gz(i,j-1,k) - gz(i,j,k+1))*(pkc(i,j-1,k+1)-pkc(i,j,k)) )
1421  enddo
1422  enddo
1423 enddo
1424 
1425 end subroutine p_grad_c
1426 
1427 
1428 subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1429 integer, intent(IN) :: ng, npx, npy, npz
1430 real, intent(IN) :: dt
1431 logical, intent(in) :: use_logp
1432 type(fv_grid_bounds_type), intent(IN) :: bd
1433 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1434 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! perturbation pressure
1435 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! p**kappa
1436 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! g * h
1437 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1438 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1439  type(fv_grid_type), intent(INOUT), target :: gridstruct
1440 ! Local:
1441 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1442 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1443 real du1, dv1, top_value
1444 integer i,j,k
1445 integer :: is, ie, js, je
1446 integer :: isd, ied, jsd, jed
1447 
1448  is = bd%is
1449  ie = bd%ie
1450  js = bd%js
1451  je = bd%je
1452  isd = bd%isd
1453  ied = bd%ied
1454  jsd = bd%jsd
1455  jed = bd%jed
1456 
1457 if ( use_logp ) then
1458  top_value = peln1
1459 else
1460  top_value = ptk
1461 endif
1462 
1463 !Remember that not all compilers set pp to zero by default
1464 !$OMP parallel do default(none) shared(is,ie,js,je,pp,pk,top_value)
1465 do j=js,je+1
1466  do i=is,ie+1
1467  pp(i,j,1) = 0.
1468  pk(i,j,1) = top_value
1469  enddo
1470 enddo
1471 
1472 !$OMP parallel do default(none) shared(isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1473 !$OMP private(wk1)
1474 do k=1,npz+1
1475  if ( k/=1 ) then
1476  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1477  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1478  endif
1479  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1480 enddo
1481 
1482 !$OMP parallel do default(none) shared(is,ie,js,je,npz,delp,gridstruct,npx,npy,ng,isd,jsd, &
1483 !$OMP pk,dt,gz,u,pp,v) &
1484 !$OMP private(wk1, wk, du1, dv1)
1485 do k=1,npz
1486  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1487  do j=js,je+1
1488  do i=is,ie+1
1489  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1490  enddo
1491  enddo
1492  do j=js,je+1
1493  do i=is,ie
1494  ! hydrostatic contributions from past time-step already added in the "beta" part
1495  ! Current gradient from "hydrostatic" components:
1496  du1 = dt / (wk(i,j)+wk(i+1,j)) * &
1497  ( (gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1498  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)) )
1499 #ifdef GAS_HYDRO_P
1500  dul = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1501 #endif
1502  ! Non-hydrostatic contribution
1503  u(i,j,k) = (u(i,j,k) + du1 + dt/(wk1(i,j)+wk1(i+1,j)) * &
1504  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1505  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1506  enddo
1507  enddo
1508  do j=js,je
1509  do i=is,ie+1
1510  ! Current gradient from "hydrostatic" components:
1511  dv1 = dt / (wk(i,j)+wk(i,j+1)) * &
1512  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1513  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1514 #ifdef GAS_HYDRO_P
1515  dvl = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1516 #endif
1517  ! Non-hydrostatic contribution
1518  v(i,j,k) = (v(i,j,k) + dv1 + dt/(wk1(i,j)+wk1(i,j+1)) * &
1519  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1520  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1521  enddo
1522  enddo
1523 
1524 enddo ! end k-loop
1525 end subroutine nh_p_grad
1526 
1527 
1528 subroutine split_p_grad( u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
1529 integer, intent(IN) :: ng, npx, npy, npz
1530 real, intent(IN) :: beta, dt
1531 logical, intent(in):: use_logp
1532 type(fv_grid_bounds_type), intent(IN) :: bd
1533 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed, npz)
1534 real, intent(inout) :: pp(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! perturbation pressure
1535 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! p**kappa
1536 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed, npz+1) ! g * h
1537 ! real, intent(inout) :: du(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1538 ! real, intent(inout) :: dv(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1539 real, intent(inout) :: u(bd%isd:bd%ied, bd%jsd:bd%jed+1,npz)
1540 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed, npz)
1541 type(fv_grid_type), intent(INOUT), target :: gridstruct
1542 ! Local:
1543 real wk1(bd%isd:bd%ied, bd%jsd:bd%jed)
1544 real wk(bd%is: bd%ie+1,bd%js: bd%je+1)
1545 real alpha, top_value
1546 integer i,j,k
1547 integer :: is, ie, js, je
1548 integer :: isd, ied, jsd, jed
1549 
1550  is = bd%is
1551  ie = bd%ie
1552  js = bd%js
1553  je = bd%je
1554  isd = bd%isd
1555  ied = bd%ied
1556  jsd = bd%jsd
1557  jed = bd%jed
1558 
1559 if ( use_logp ) then
1560  top_value = peln1
1561 else
1562  top_value = ptk
1563 endif
1564 
1565 alpha = 1. - beta
1566 
1567 !$OMP parallel do default(none) shared(is,ie,js,je,pp,pk,top_value)
1568 do j=js,je+1
1569  do i=is,ie+1
1570  pp(i,j,1) = 0.
1571  pk(i,j,1) = top_value
1572  enddo
1573 enddo
1574 
1575 !$OMP parallel do default(none) shared(isd,jsd,npz,pp,gridstruct,npx,npy,is,ie,js,je,ng,pk,gz) &
1576 !$OMP private(wk1)
1577 do k=1,npz+1
1578  if ( k/=1 ) then
1579  call a2b_ord4(pp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1580  call a2b_ord4(pk(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1581  endif
1582  call a2b_ord4( gz(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1583 enddo
1584 
1585 !$OMP parallel do default(none) shared(is,ie,js,je,isd,jsd,npz,delp,gridstruct,npx,npy,ng, &
1586 !$OMP pk,u,beta,du,dt,gz,alpha,pp,v,dv) &
1587 !$OMP private(wk1, wk)
1588 do k=1,npz
1589  call a2b_ord4(delp(isd,jsd,k), wk1, gridstruct, npx, npy, is, ie, js, je, ng)
1590 
1591  do j=js,je+1
1592  do i=is,ie+1
1593  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1594  enddo
1595  enddo
1596 
1597  do j=js,je+1
1598  do i=is,ie
1599  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
1600  ! hydrostatic contributions from past time-step already added in the "beta" part
1601  ! Current gradient from "hydrostatic" components:
1602  !---------------------------------------------------------------------------------
1603  du(i,j,k) = dt / (wk(i,j)+wk(i+1,j)) * &
1604  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) + &
1605  (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
1606 #ifdef GAS_HYDRO_P
1607  du = (1.-0.5*(q_con(i,j-1,k)+q_con(i,j,k)))*du
1608 #endif
1609  !---------------------------------------------------------------------------------
1610  ! Non-hydrostatic contribution
1611  u(i,j,k) = (u(i,j,k) + alpha*du(i,j,k) + dt/(wk1(i,j)+wk1(i+1,j)) * &
1612  ((gz(i,j,k+1)-gz(i+1,j,k))*(pp(i+1,j,k+1)-pp(i,j,k)) &
1613  + (gz(i,j,k)-gz(i+1,j,k+1))*(pp(i,j,k+1)-pp(i+1,j,k))))*gridstruct%rdx(i,j)
1614  enddo
1615  enddo
1616  do j=js,je
1617  do i=is,ie+1
1618  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
1619  ! Current gradient from "hydrostatic" components:
1620  !---------------------------------------------------------------------------------
1621  dv(i,j,k) = dt / (wk(i,j)+wk(i,j+1)) * &
1622  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) + &
1623  (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1624 #ifdef GAS_HYDRO_P
1625  dv = (1.-0.5*(q_con(i-1,j,k)+q_con(i,j,k)))*dv
1626 #endif
1627  !---------------------------------------------------------------------------------
1628  ! Non-hydrostatic contribution
1629  v(i,j,k) = (v(i,j,k) + alpha*dv(i,j,k) + dt/(wk1(i,j)+wk1(i,j+1)) * &
1630  ((gz(i,j,k+1)-gz(i,j+1,k))*(pp(i,j+1,k+1)-pp(i,j,k)) &
1631  + (gz(i,j,k)-gz(i,j+1,k+1))*(pp(i,j,k+1)-pp(i,j+1,k))))*gridstruct%rdy(i,j)
1632  enddo
1633  enddo
1634 
1635 enddo ! end k-loop
1636 
1637 
1638 end subroutine split_p_grad
1639 
1640 
1641 
1642 subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, &
1643  ptop, hydrostatic, a2b_ord, d_ext)
1645 integer, intent(IN) :: ng, npx, npy, npz, a2b_ord
1646 real, intent(IN) :: dt, ptop, d_ext
1647 logical, intent(in) :: hydrostatic
1648 type(fv_grid_bounds_type), intent(IN) :: bd
1649 real, intent(in) :: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
1650 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1651 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1652 real, intent(inout) :: delp(bd%isd:bd%ied, bd%jsd:bd%jed ,npz)
1653 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1654 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1655 type(fv_grid_type), intent(INOUT), target :: gridstruct
1656 ! Local:
1657 real, dimension(bd%isd:bd%ied,bd%jsd:bd%jed):: wk
1658 real:: wk1(bd%is:bd%ie+1,bd%js:bd%je+1)
1659 real:: wk2(bd%is:bd%ie,bd%js:bd%je+1)
1660 real top_value
1661 integer i,j,k
1662 
1663 integer :: is, ie, js, je
1664 integer :: isd, ied, jsd, jed
1665 
1666  is = bd%is
1667  ie = bd%ie
1668  js = bd%js
1669  je = bd%je
1670  isd = bd%isd
1671  ied = bd%ied
1672  jsd = bd%jsd
1673  jed = bd%jed
1674 
1675 if ( hydrostatic ) then
1676  ! pk is pe**kappa if hydrostatic
1677  top_value = ptk
1678 else
1679  ! pk is full pressure if non-hydrostatic
1680  top_value = ptop
1681 endif
1682 
1683 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
1684 do j=js,je+1
1685  do i=is,ie+1
1686  pk(i,j,1) = top_value
1687  enddo
1688 enddo
1689 
1690 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1691 !$OMP private(wk)
1692 do k=2,npz+1
1693  if ( a2b_ord==4 ) then
1694  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1695  else
1696  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1697  endif
1698 enddo
1699 
1700 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1701 !$OMP private(wk)
1702 do k=1,npz+1
1703  if ( a2b_ord==4 ) then
1704  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1705  else
1706  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1707  endif
1708 enddo
1709 
1710 if ( d_ext > 0. ) then
1711 
1712  !$OMP parallel do default(none) shared(is,ie,js,je,wk2,divg2)
1713  do j=js,je+1
1714  do i=is,ie
1715  wk2(i,j) = divg2(i,j)-divg2(i+1,j)
1716  enddo
1717  enddo
1718 
1719  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,divg2)
1720  do j=js,je
1721  do i=is,ie+1
1722  wk1(i,j) = divg2(i,j)-divg2(i,j+1)
1723  enddo
1724  enddo
1725 
1726 else
1727 
1728  !$OMP parallel do default(none) shared(is,ie,js,je,wk1,wk2)
1729  do j=js,je+1
1730  do i=is,ie
1731  wk2(i,j) = 0.
1732  enddo
1733  do i=is,ie+1
1734  wk1(i,j) = 0.
1735  enddo
1736  enddo
1737 
1738 endif
1739 
1740 !$OMP parallel do default(none) shared(is,ie,js,je,npz,pk,delp,hydrostatic,a2b_ord,gridstruct, &
1741 !$OMP npx,npy,isd,jsd,ng,u,v,wk2,dt,gz,wk1) &
1742 !$OMP private(wk)
1743 do k=1,npz
1744 
1745  if ( hydrostatic ) then
1746  do j=js,je+1
1747  do i=is,ie+1
1748  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1749  enddo
1750  enddo
1751  else
1752  if ( a2b_ord==4 ) then
1753  call a2b_ord4(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
1754  else
1755  call a2b_ord2(delp(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng)
1756  endif
1757  endif
1758 
1759  do j=js,je+1
1760  do i=is,ie
1761  u(i,j,k) = gridstruct%rdx(i,j)*(wk2(i,j)+u(i,j,k) + dt/(wk(i,j)+wk(i+1,j)) * &
1762  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
1763  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k))))
1764  enddo
1765  enddo
1766  do j=js,je
1767  do i=is,ie+1
1768  v(i,j,k) = gridstruct%rdy(i,j)*(wk1(i,j)+v(i,j,k) + dt/(wk(i,j)+wk(i,j+1)) * &
1769  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
1770  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k))))
1771  enddo
1772  enddo
1773 enddo ! end k-loop
1774 
1775 end subroutine one_grad_p
1776 
1777 
1778 subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
1780 integer, intent(in) :: ng, npx, npy, npz, a2b_ord
1781 real, intent(in) :: dt, ptop, beta
1782 type(fv_grid_bounds_type), intent(IN) :: bd
1783 real, intent(in):: divg2(bd%is:bd%ie+1,bd%js:bd%je+1)
1784 real, intent(inout) :: pk(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1785 real, intent(inout) :: gz(bd%isd:bd%ied, bd%jsd:bd%jed ,npz+1)
1786 real, intent(inout) :: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz)
1787 real, intent(inout) :: v(bd%isd:bd%ied+1,bd%jsd:bd%jed ,npz)
1788 type(fv_grid_type), intent(INOUT), target :: gridstruct
1789 
1790 ! Local:
1791 real:: wk(bd%isd:bd%ied,bd%jsd:bd%jed)
1792 real top_value, alpha
1793 integer i,j,k
1794 
1795  integer :: is, ie, js, je
1796  integer :: isd, ied, jsd, jed
1797 
1798  is = bd%is
1799  ie = bd%ie
1800  js = bd%js
1801  je = bd%je
1802  isd = bd%isd
1803  ied = bd%ied
1804  jsd = bd%jsd
1805  jed = bd%jed
1806 
1807 alpha = 1. - beta
1808 
1809 ! pk is pe**kappa if hydrostatic
1810 top_value = ptk
1811 
1812 !$OMP parallel do default(none) shared(is,ie,js,je,pk,top_value)
1813 do j=js,je+1
1814  do i=is,ie+1
1815  pk(i,j,1) = top_value
1816  enddo
1817 enddo
1818 !$OMP parallel do default(none) shared(npz,isd,jsd,pk,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1819 !$OMP private(wk)
1820 do k=2,npz+1
1821  if ( a2b_ord==4 ) then
1822  call a2b_ord4(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1823  else
1824  call a2b_ord2(pk(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1825  endif
1826 enddo
1827 
1828 !$OMP parallel do default(none) shared(npz,isd,jsd,gz,gridstruct,npx,npy,is,ie,js,je,ng,a2b_ord) &
1829 !$OMP private(wk)
1830 do k=1,npz+1
1831  if ( a2b_ord==4 ) then
1832  call a2b_ord4( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1833  else
1834  call a2b_ord2( gz(isd,jsd,k), wk, gridstruct, npx, npy, is, ie, js, je, ng, .true.)
1835  endif
1836 enddo
1837 
1838 !$OMP parallel do default(none) shared(npz,is,ie,js,je,pk,u,beta,gz,divg2,alpha, &
1839 !$OMP gridstruct,v,dt,du,dv) &
1840 !$OMP private(wk)
1841 do k=1,npz
1842 
1843  do j=js,je+1
1844  do i=is,ie+1
1845  wk(i,j) = pk(i,j,k+1) - pk(i,j,k)
1846  enddo
1847  enddo
1848 
1849  do j=js,je+1
1850  do i=is,ie
1851  u(i,j,k) = u(i,j,k) + beta*du(i,j,k)
1852  du(i,j,k) = dt/(wk(i,j)+wk(i+1,j)) * &
1853  ((gz(i,j,k+1)-gz(i+1,j,k))*(pk(i+1,j,k+1)-pk(i,j,k)) &
1854  + (gz(i,j,k)-gz(i+1,j,k+1))*(pk(i,j,k+1)-pk(i+1,j,k)))
1855  u(i,j,k) = (u(i,j,k) + divg2(i,j)-divg2(i+1,j) + alpha*du(i,j,k))*gridstruct%rdx(i,j)
1856  enddo
1857  enddo
1858  do j=js,je
1859  do i=is,ie+1
1860  v(i,j,k) = v(i,j,k) + beta*dv(i,j,k)
1861  dv(i,j,k) = dt/(wk(i,j)+wk(i,j+1)) * &
1862  ((gz(i,j,k+1)-gz(i,j+1,k))*(pk(i,j+1,k+1)-pk(i,j,k)) &
1863  + (gz(i,j,k)-gz(i,j+1,k+1))*(pk(i,j,k+1)-pk(i,j+1,k)))
1864  v(i,j,k) = (v(i,j,k) + divg2(i,j)-divg2(i,j+1) + alpha*dv(i,j,k))*gridstruct%rdy(i,j)
1865  enddo
1866  enddo
1867 enddo ! end k-loop
1868 
1869 end subroutine grad1_p_update
1870 
1871 
1872 subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
1873 integer, intent(IN) :: km
1874 real , intent(IN) :: ak(km+1), bk(km+1)
1875 type(fv_grid_bounds_type), intent(IN) :: bd
1876 real, intent(INOUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
1877 real, intent(INOUT), dimension(bd%isd:,bd%jsd:,1:):: w
1878 logical, intent(IN) :: hydrostatic, CG, fv_debug
1879 ! Local:
1880 real dp, dpmin
1881 integer i, j, k, ip
1882 integer ifirst, ilast
1883 integer jfirst, jlast
1884 
1885 integer :: is, ie, js, je
1886 integer :: isd, ied, jsd, jed
1887 
1888  is = bd%is
1889  ie = bd%ie
1890  js = bd%js
1891  je = bd%je
1892  isd = bd%isd
1893  ied = bd%ied
1894  jsd = bd%jsd
1895  jed = bd%jed
1896 
1897 
1898 if ( cg ) then
1899  ifirst = is-1; ilast = ie+1
1900  jfirst = js-1; jlast = je+1
1901 else
1902  ifirst = is; ilast = ie
1903  jfirst = js; jlast = je
1904 endif
1905 
1906 
1907 !$OMP parallel do default(none) shared(jfirst,jlast,km,ifirst,ilast,delp,ak,bk,pt, &
1908 !$OMP hydrostatic,w,fv_debug) &
1909 !$OMP private(ip, dpmin, dp)
1910 do 1000 j=jfirst,jlast
1911 
1912  ip = 0
1913 
1914  do k=1, km-1
1915  dpmin = 0.01 * ( ak(k+1)-ak(k) + (bk(k+1)-bk(k))*1.e5 )
1916  do i=ifirst, ilast
1917  if(delp(i,j,k) < dpmin) then
1918  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, k, mpp_pe(), delp(i,j,k), pt(i,j,k)
1919  ! Remap from below and mix pt
1920  dp = dpmin - delp(i,j,k)
1921  pt(i,j,k) = (pt(i,j,k)*delp(i,j,k) + pt(i,j,k+1)*dp) / dpmin
1922  if ( .not.hydrostatic ) w(i,j,k) = (w(i,j,k)*delp(i,j,k) + w(i,j,k+1)*dp) / dpmin
1923  delp(i,j,k) = dpmin
1924  delp(i,j,k+1) = delp(i,j,k+1) - dp
1925  ip = ip + 1
1926  endif
1927  enddo
1928  enddo
1929 
1930  ! Bottom (k=km):
1931  dpmin = 0.01 * ( ak(km+1)-ak(km) + (bk(km+1)-bk(km))*1.e5 )
1932  do i=ifirst, ilast
1933  if(delp(i,j,km) < dpmin) then
1934  if (fv_debug) write(*,*) 'Mix_dp: ', i, j, km, mpp_pe(), delp(i,j,km), pt(i,j,km)
1935  ! Remap from above and mix pt
1936  dp = dpmin - delp(i,j,km)
1937  pt(i,j,km) = (pt(i,j,km)*delp(i,j,km) + pt(i,j,km-1)*dp)/dpmin
1938  if ( .not.hydrostatic ) w(i,j,km) = (w(i,j,km)*delp(i,j,km) + w(i,j,km-1)*dp) / dpmin
1939  delp(i,j,km) = dpmin
1940  delp(i,j,km-1) = delp(i,j,km-1) - dp
1941  ip = ip + 1
1942  endif
1943  enddo
1944  if ( fv_debug .and. ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
1945  ! if ( ip/=0 ) write(*,*) 'Warning: Mix_dp', mpp_pe(), j, ip
1946 1000 continue
1947 
1948  end subroutine mix_dp
1949 
1950 
1951  subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, nested, computehalo, npx, npy, a2b_ord, bd)
1953  integer, intent(IN) :: km, npx, npy, a2b_ord
1954  real , intent(IN) :: akap, ptop
1955  type(fv_grid_bounds_type), intent(IN) :: bd
1956  real , intent(IN) :: hs(bd%isd:bd%ied,bd%jsd:bd%jed)
1957  real, intent(IN), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km):: pt, delp
1958  real, intent(IN), dimension(bd%isd:,bd%jsd:,1:):: q_con
1959  logical, intent(IN) :: CG, nested, computehalo
1960  ! !OUTPUT PARAMETERS
1961  real, intent(OUT), dimension(bd%isd:bd%ied,bd%jsd:bd%jed,km+1):: gz, pk
1962  real, intent(OUT) :: pe(bd%is-1:bd%ie+1,km+1,bd%js-1:bd%je+1)
1963  real, intent(out) :: peln(bd%is:bd%ie,km+1,bd%js:bd%je) ! ln(pe)
1964  real, intent(out) :: pkz(bd%is:bd%ie,bd%js:bd%je,km)
1965  ! !DESCRIPTION:
1966  ! Calculates geopotential and pressure to the kappa.
1967  ! Local:
1968  real peg(bd%isd:bd%ied,km+1)
1969  real pkg(bd%isd:bd%ied,km+1)
1970  real(kind=8) p1d(bd%isd:bd%ied)
1971  real(kind=8) g1d(bd%isd:bd%ied)
1972  real logp(bd%isd:bd%ied)
1973  integer i, j, k
1974  integer ifirst, ilast
1975  integer jfirst, jlast
1976 
1977  integer :: is, ie, js, je
1978  integer :: isd, ied, jsd, jed
1979 
1980  is = bd%is
1981  ie = bd%ie
1982  js = bd%js
1983  je = bd%je
1984  isd = bd%isd
1985  ied = bd%ied
1986  jsd = bd%jsd
1987  jed = bd%jed
1988 
1989  if ( (.not. cg .and. a2b_ord==4) .or. (nested .and. .not. cg) ) then ! D-Grid
1990  ifirst = is-2; ilast = ie+2
1991  jfirst = js-2; jlast = je+2
1992  else
1993  ifirst = is-1; ilast = ie+1
1994  jfirst = js-1; jlast = je+1
1995  endif
1996 
1997  if (nested .and. computehalo) then
1998  if (is == 1) ifirst = isd
1999  if (ie == npx-1) ilast = ied
2000  if (js == 1) jfirst = jsd
2001  if (je == npy-1) jlast = jed
2002  end if
2003 
2004 !$OMP parallel do default(none) shared(jfirst,jlast,ifirst,ilast,pk,km,gz,hs,ptop,ptk, &
2005 !$OMP js,je,is,ie,peln,peln1,pe,delp,akap,pt,CG,pkz,q_con) &
2006 !$OMP private(peg, pkg, p1d, g1d, logp)
2007  do 2000 j=jfirst,jlast
2008 
2009  do i=ifirst, ilast
2010  p1d(i) = ptop
2011  pk(i,j,1) = ptk
2012  g1d(i) = hs(i,j)
2013  gz(i,j,km+1) = hs(i,j)
2014 #ifdef USE_COND
2015  peg(i,1) = ptop
2016  pkg(i,1) = ptk
2017 #endif
2018  enddo
2019 
2020 #ifndef SW_DYNAMICS
2021  if( j>=js .and. j<=je) then
2022  do i=is,ie
2023  peln(i,1,j) = peln1
2024  enddo
2025  endif
2026 #endif
2027 
2028  if( j>(js-2) .and. j<(je+2) ) then
2029  do i=max(ifirst,is-1), min(ilast,ie+1)
2030  pe(i,1,j) = ptop
2031  enddo
2032  endif
2033 
2034  ! Top down
2035  do k=2,km+1
2036  do i=ifirst, ilast
2037  p1d(i) = p1d(i) + delp(i,j,k-1)
2038  logp(i) = log(p1d(i))
2039  pk(i,j,k) = exp( akap*logp(i) )
2040 #ifdef USE_COND
2041  peg(i,k) = peg(i,k-1) + delp(i,j,k-1)*(1.-q_con(i,j,k-1))
2042  pkg(i,k) = exp( akap*log(peg(i,k)) )
2043 #endif
2044  enddo
2045 
2046  if( j>(js-2) .and. j<(je+2) ) then
2047  do i=max(ifirst,is-1), min(ilast,ie+1)
2048  pe(i,k,j) = p1d(i)
2049  enddo
2050  if( j>=js .and. j<=je) then
2051  do i=is,ie
2052  peln(i,k,j) = logp(i)
2053  enddo
2054  endif
2055  endif
2056 
2057  enddo
2058 
2059  ! Bottom up
2060  do k=km,1,-1
2061  do i=ifirst, ilast
2062 #ifdef SW_DYNAMICS
2063  g1d(i) = g1d(i) + pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2064 #else
2065 #ifdef USE_COND
2066  g1d(i) = g1d(i) + cp_air*pt(i,j,k)*(pkg(i,k+1)-pkg(i,k))
2067 #else
2068  g1d(i) = g1d(i) + cp_air*pt(i,j,k)*(pk(i,j,k+1)-pk(i,j,k))
2069 #endif
2070 #endif
2071  gz(i,j,k) = g1d(i)
2072  enddo
2073  enddo
2074 
2075  if ( .not. cg .and. j .ge. js .and. j .le. je ) then
2076  do k=1,km
2077  do i=is,ie
2078  pkz(i,j,k) = (pk(i,j,k+1)-pk(i,j,k))/(akap*(peln(i,k+1,j)-peln(i,k,j)))
2079  enddo
2080  enddo
2081  endif
2082 
2083 2000 continue
2084  end subroutine geopk
2085 
2086 
2087  subroutine del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
2088  !---------------------------------------------------------------
2089  ! This routine is for filtering the omega field for the physics
2090  !---------------------------------------------------------------
2091  integer, intent(in):: npx, npy, km, nmax
2092  real(kind=R_GRID), intent(in):: cd ! cd = K * da_min; 0 < K < 0.25
2093  type(fv_grid_bounds_type), intent(IN) :: bd
2094  real, intent(inout):: q(bd%isd:bd%ied,bd%jsd:bd%jed,km)
2095  type(fv_grid_type), intent(IN), target :: gridstruct
2096  type(domain2d), intent(INOUT) :: domain
2097  real, parameter:: r3 = 1./3.
2098  real :: fx(bd%isd:bd%ied+1,bd%jsd:bd%jed), fy(bd%isd:bd%ied,bd%jsd:bd%jed+1)
2099  real :: q2(bd%isd:bd%ied,bd%jsd:bd%jed)
2100  integer i,j,k, n, nt, ntimes
2101  integer :: is, ie, js, je
2102  integer :: isd, ied, jsd, jed
2103 
2104  !Local routine pointers
2105 ! real, pointer, dimension(:,:) :: rarea
2106 ! real, pointer, dimension(:,:) :: del6_u, del6_v
2107 ! logical, pointer :: sw_corner, se_corner, ne_corner, nw_corner
2108 
2109  is = bd%is
2110  ie = bd%ie
2111  js = bd%js
2112  je = bd%je
2113  isd = bd%isd
2114  ied = bd%ied
2115  jsd = bd%jsd
2116  jed = bd%jed
2117 
2118 ! rarea => gridstruct%rarea
2119 ! del6_u => gridstruct%del6_u
2120 ! del6_v => gridstruct%del6_v
2121 
2122 ! sw_corner => gridstruct%sw_corner
2123 ! nw_corner => gridstruct%nw_corner
2124 ! se_corner => gridstruct%se_corner
2125 ! ne_corner => gridstruct%ne_corner
2126 
2127  ntimes = min(3, nmax)
2128 
2129  call timing_on('COMM_TOTAL')
2130  call mpp_update_domains(q, domain, complete=.true.)
2131  call timing_off('COMM_TOTAL')
2132 
2133 
2134  do n=1,ntimes
2135  nt = ntimes - n
2136 
2137 !$OMP parallel do default(none) shared(km,q,is,ie,js,je,npx,npy, &
2138 !$OMP nt,isd,jsd,gridstruct,bd, &
2139 !$OMP cd) &
2140 !$OMP private(fx, fy)
2141  do k=1,km
2142 
2143  if ( gridstruct%sw_corner ) then
2144  q(1,1,k) = (q(1,1,k)+q(0,1,k)+q(1,0,k)) * r3
2145  q(0,1,k) = q(1,1,k)
2146  q(1,0,k) = q(1,1,k)
2147  endif
2148  if ( gridstruct%se_corner ) then
2149  q(ie, 1,k) = (q(ie,1,k)+q(npx,1,k)+q(ie,0,k)) * r3
2150  q(npx,1,k) = q(ie,1,k)
2151  q(ie, 0,k) = q(ie,1,k)
2152  endif
2153  if ( gridstruct%ne_corner ) then
2154  q(ie, je,k) = (q(ie,je,k)+q(npx,je,k)+q(ie,npy,k)) * r3
2155  q(npx,je,k) = q(ie,je,k)
2156  q(ie,npy,k) = q(ie,je,k)
2157  endif
2158  if ( gridstruct%nw_corner ) then
2159  q(1, je,k) = (q(1,je,k)+q(0,je,k)+q(1,npy,k)) * r3
2160  q(0, je,k) = q(1,je,k)
2161  q(1,npy,k) = q(1,je,k)
2162  endif
2163 
2164  if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 1, gridstruct%nested, bd, &
2165  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner )
2166  do j=js-nt,je+nt
2167  do i=is-nt,ie+1+nt
2168 #ifdef USE_SG
2169  fx(i,j) = gridstruct%dy(i,j)*gridstruct%sina_u(i,j)*(q(i-1,j,k)-q(i,j,k))*gridstruct%rdxc(i,j)
2170 #else
2171  fx(i,j) = gridstruct%del6_v(i,j)*(q(i-1,j,k)-q(i,j,k))
2172 #endif
2173  enddo
2174  enddo
2175 
2176  if(nt>0) call copy_corners(q(isd,jsd,k), npx, npy, 2, gridstruct%nested, bd, &
2177  gridstruct%sw_corner, gridstruct%se_corner, gridstruct%nw_corner, gridstruct%ne_corner)
2178  do j=js-nt,je+1+nt
2179  do i=is-nt,ie+nt
2180 #ifdef USE_SG
2181  fy(i,j) = gridstruct%dx(i,j)*gridstruct%sina_v(i,j)*(q(i,j-1,k)-q(i,j,k))*gridstruct%rdyc(i,j)
2182 #else
2183  fy(i,j) = gridstruct%del6_u(i,j)*(q(i,j-1,k)-q(i,j,k))
2184 #endif
2185  enddo
2186  enddo
2187 
2188  do j=js-nt,je+nt
2189  do i=is-nt,ie+nt
2190  q(i,j,k) = q(i,j,k) + cd*gridstruct%rarea(i,j)*(fx(i,j)-fx(i+1,j)+fy(i,j)-fy(i,j+1))
2191  enddo
2192  enddo
2193  enddo
2194  enddo
2195 
2196  end subroutine del2_cubed
2197 
2198  subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var)
2199  integer, intent(in):: i1, i2, j1, j2, km
2200  real, intent(inout):: array(i1:i2,j1:j2,km)
2201  real, intent(in):: var
2202  integer:: i, j, k
2203 
2204 !$OMP parallel do default(none) shared(i1,i2,j1,j2,km,array,var)
2205  do k=1,km
2206  do j=j1,j2
2207  do i=i1,i2
2208  array(i,j,k) = var
2209  enddo
2210  enddo
2211  enddo
2212 
2213  end subroutine init_ijk_mem
2214 
2215 
2216  subroutine rayleigh_fast(dt, npx, npy, npz, pfull, tau, u, v, w, &
2217  ptop, hydrostatic, rf_cutoff, bd)
2218 ! Simple "inline" version of the Rayleigh friction
2219  real, intent(in):: dt
2220  real, intent(in):: tau ! time scale (days)
2221  real, intent(in):: ptop, rf_cutoff
2222  real, intent(in), dimension(npz):: pfull
2223  integer, intent(in):: npx, npy, npz
2224  logical, intent(in):: hydrostatic
2225  type(fv_grid_bounds_type), intent(IN) :: bd
2226  real, intent(inout):: u(bd%isd:bd%ied ,bd%jsd:bd%jed+1,npz) ! D grid zonal wind (m/s)
2227  real, intent(inout):: v(bd%isd:bd%ied+1,bd%jsd:bd%jed,npz) ! D grid meridional wind (m/s)
2228  real, intent(inout):: w(bd%isd: ,bd%jsd: ,1: ) ! cell center vertical wind (m/s)
2229 !
2230  real(kind=R_GRID):: rff(npz)
2231  real, parameter:: sday = 86400.
2232  real:: tau0
2233  integer i, j, k
2234 
2235  integer :: is, ie, js, je
2236  integer :: isd, ied, jsd, jed
2237 
2238  is = bd%is
2239  ie = bd%ie
2240  js = bd%js
2241  je = bd%je
2242  isd = bd%isd
2243  ied = bd%ied
2244  jsd = bd%jsd
2245  jed = bd%jed
2246 
2247  if ( .not. rff_initialized ) then
2248  tau0 = tau * sday
2249  allocate( rf(npz) )
2250  rf(:) = 1.
2251 
2252  if( is_master() ) write(6,*) 'Fast Rayleigh friction E-folding time (days):'
2253  do k=1, npz
2254  if ( pfull(k) < rf_cutoff ) then
2255  rff(k) = dt/tau0*sin(0.5*pi*log(rf_cutoff/pfull(k))/log(rf_cutoff/ptop))**2
2256 ! Re-FACTOR rf
2257  if( is_master() ) write(6,*) k, 0.01*pfull(k), dt/(rff(k)*sday)
2258  kmax = k
2259  rff(k) = 1.d0 / (1.0d0+rff(k))
2260  rf(k) = rff(k)
2261  else
2262  exit
2263  endif
2264  enddo
2265  rff_initialized = .true.
2266  endif
2267 
2268 !$OMP parallel do default(none) shared(is,ie,js,je,kmax,pfull,rf_cutoff,w,rf,u,v,hydrostatic)
2269  do k=1,kmax
2270  if ( pfull(k) < rf_cutoff ) then
2271  do j=js,je+1
2272  do i=is,ie
2273  u(i,j,k) = rf(k)*u(i,j,k)
2274  enddo
2275  enddo
2276  do j=js,je
2277  do i=is,ie+1
2278  v(i,j,k) = rf(k)*v(i,j,k)
2279  enddo
2280  enddo
2281  if ( .not. hydrostatic ) then
2282  do j=js,je
2283  do i=is,ie
2284  w(i,j,k) = rf(k)*w(i,j,k)
2285  enddo
2286  enddo
2287  endif
2288  endif
2289  enddo
2290 
2291  end subroutine rayleigh_fast
2292 
2293 
2294 end module dyn_core_nlm_mod
subroutine, public del2_cubed(q, cd, gridstruct, domain, npx, npy, km, nmax, bd)
real, dimension(:,:,:), allocatable pk3
real, parameter, public radius
Radius of the Earth [m].
Definition: constants.F90:72
real, dimension(:,:,:), allocatable dv
real, dimension(:,:,:), allocatable divgd
subroutine rayleigh_fast(dt, npx, npy, npz, pfull, tau, u, v, w, ptop, hydrostatic, rf_cutoff, bd)
real, dimension(:,:,:), allocatable ptc
real, dimension(:,:,:), allocatable zh
subroutine, public nested_grid_bc_apply_intt(var_nest, istag, jstag, npx, npy, npz, bd, step, split, BC, bctype)
subroutine, public case9_forcing1(phis, time_since_start)
integer, parameter, public corner
real, dimension(:,:,:), allocatable du
subroutine, public case9_forcing2(phis)
real, dimension(:,:,:), allocatable crx
real, dimension(:,:,:), allocatable yfx
subroutine pe_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pe, delp)
real, parameter, public rdgas
Gas constant for dry air [J/kg/deg].
Definition: constants.F90:77
subroutine adv_pe(ua, va, pem, om, gridstruct, bd, npx, npy, npz, ng)
real, dimension(:,:,:), allocatable gz
subroutine split_p_grad(u, v, pp, gz, delp, pk, beta, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
Definition: mpp.F90:39
void a2b_ord2(int nx, int ny, const double *qin, const double *edge_w, const double *edge_e, const double *edge_s, const double *edge_n, double *qout, int on_west_edge, int on_east_edge, int on_south_edge, int on_north_edge)
Definition: gradient_c2l.c:119
subroutine grad1_p_update(divg2, u, v, pk, gz, dt, ng, gridstruct, bd, npx, npy, npz, ptop, beta, a2b_ord)
real, dimension(:,:,:), allocatable xfx
real(kind=r_grid), parameter cnst_0p20
subroutine, public breed_slp_inline(nstep, dt, npz, ak, bk, phis, pe, pk, peln, pkz, delp, u, v, pt, q, nwat, zvir, gridstruct, ks, domain_local, bd, hydrostatic)
real, parameter, public pi
Ratio of circle circumference to diameter [N/A].
Definition: constants.F90:74
real, dimension(:,:,:), allocatable vt
subroutine geopk(ptop, pe, peln, delp, pk, gz, hs, pt, q_con, pkz, km, akap, CG, nested, computehalo, npx, npy, a2b_ord, bd)
subroutine p_grad_c(dt2, npz, delpc, pkc, gz, uc, vc, bd, rdxc, rdyc, hydrostatic)
real, dimension(:,:,:), allocatable delpc
subroutine, public a2b_ord4(qin, qout, gridstruct, npx, npy, is, ie, js, je, ng, replace)
subroutine timing_on(blk_name)
subroutine mix_dp(hydrostatic, w, delp, pt, km, ak, bk, CG, fv_debug, bd)
subroutine pk3_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, akap, pk3, delp)
real, parameter, public cp_air
Specific heat capacity of dry air at constant pressure [J/kg/deg].
Definition: constants.F90:83
integer, parameter, public r_grid
subroutine, public extrapolation_bc(q, istag, jstag, npx, npy, bd, pd_in, debug_in)
logical, public do_adiabatic_init
real, dimension(:,:,:), allocatable pkc
subroutine, public dyn_core(npx, npy, npz, ng, sphum, nq, bdt, n_split, zvir, cp, akap, cappa, grav, hydrostatic, u, v, w, delz, pt, q, delp, pe, pk, phis, ws, omga, ptop, pfull, ua, va, uc, vc, mfx, mfy, cx, cy, pkz, peln, q_con, ak, bk, dpx, ks, gridstruct, flagstruct, neststruct, idiag, bd, domain, init_step, i_pack, end_step, time_total)
integer, parameter r8_kind
Definition: platform.F90:24
real, dimension(:,:,:), allocatable cry
#define max(a, b)
Definition: mosaic_util.h:33
subroutine, public c_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, wc, ut, vt, divg_d, nord, dt2, hydrostatic, dord4, bd, gridstruct, flagstruct)
Definition: sw_core_nlm.F90:80
real, dimension(:,:,:), allocatable ut
subroutine pln_halo(is, ie, js, je, isd, ied, jsd, jed, npz, ptop, pk3, delp)
real, dimension(:), allocatable rf
subroutine update_dwinds_phys(is, ie, js, je, isd, ied, jsd, jed, dt, u_dt, v_dt, u, v, gridstruct, npx, npy, npz, domain)
subroutine, public prt_mxm(qname, q, is, ie, js, je, n_g, km, fac, area, domain)
subroutine, public copy_corners(q, npx, npy, dir, nested, bd, sw_corner, se_corner, nw_corner, ne_corner)
subroutine, public prt_maxmin(qname, q, is, ie, js, je, n_g, km, fac)
integer, public test_case
subroutine, public init_ijk_mem(i1, i2, j1, j2, km, array, var)
subroutine one_grad_p(u, v, pk, gz, divg2, delp, dt, ng, gridstruct, bd, npx, npy, npz, ptop, hydrostatic, a2b_ord, d_ext)
#define min(a, b)
Definition: mosaic_util.h:32
subroutine, public d_sw(delpc, delp, ptc, pt, u, v, w, uc, vc, ua, va, divg_d, xflux, yflux, cx, cy, crx_adv, cry_adv, xfx_adv, yfx_adv, q_con, z_rat, kgb, heat_source, dpx, zvir, sphum, nq, q, k, km, inline_q, dt, hord_tr, hord_mt, hord_vt, hord_tm, hord_dp, nord, nord_v, nord_w, nord_t, dddmp, d2_bg, d4_bg, damp_v, damp_w, damp_t, d_con, hydrostatic, gridstruct, flagstruct, bd)
subroutine, public riem_solver3(ms, dt, is, ie, js, je, km, ng, isd, ied, jsd, jed, akap, cappa, cp, ptop, zs, q_con, w, delz, pt, delp, zh, pe, ppe, pk3, pk, peln, ws, scale_m, p_fac, a_imp, use_logp, last_call, fp_out)
Definition: nh_core_nlm.F90:46
type(time_type), public fv_time
subroutine nh_p_grad(u, v, pp, gz, delp, pk, dt, ng, gridstruct, bd, npx, npy, npz, use_logp)
subroutine timing_off(blk_name)
logical rff_initialized