45 void cubic_spline_sp(
int size1,
int size2,
const double *grid1,
const double *grid2,
const double *data1,
52 for(
i=1;
i<size1;
i++) {
53 if( grid1[
i] <= grid1[
i-1] )
error_handler(
"cubic_spline_sp: grid1 is not monotonic increasing");
56 for(
i=0;
i<size2;
i++) {
57 if( grid2[
i] < grid1[0] || grid2[
i] > grid1[size1-1])
error_handler(
"cubic_spline_sp: grid2 lies outside grid1");
60 if(size1 < 2)
error_handler(
"cubic_spline_sp: the size of input grid should be at least 2");
62 p = (data1[1]-data1[0])/(grid1[1]-grid1[0]);
63 for(
i=0;
i< size2;
i++) data2[
i] =
p*(grid2[
i] - grid1[0]) + data1[0];
66 delta = (
double *)malloc((size1-1)*
sizeof(
double));
67 dh = (
double *)malloc((size1-1)*
sizeof(
double));
68 d = (
double *)malloc(size1*
sizeof(
double));
69 for(
k=0;
k<size1-1;
k++) {
70 dh[
k] = grid1[
k+1]-grid1[
k];
71 delta[
k] = (data1[
k+1]-data1[
k])/(dh[
k]);
76 for(
k=1;
k<size1-1;
k++) {
77 if( delta[
k]*delta[
k-1] > 0.0 ) {
78 w1 = 2.0*dh[
k] + dh[
k-1];
79 w2 = dh[
k] + 2.0*dh[
k-1];
80 d[
k] = (w1+w2)/(w1/delta[
k-1]+w2/delta[
k]);
90 d[0] = ((2.0*dh[0] + dh[1])*delta[0] - dh[0]*delta[1])/(dh[0]+dh[1]);
92 if (
d[0]*delta[0] < 0.0 ) {
96 if ( delta[0]*delta[1] < 0.0 && fabs(
d[0]) > fabs(3.0*delta[0])) {
106 if ( delta[
kmax-1]*delta[
kmax-2] < 0.0 && fabs(
d[
kmax]) > fabs(3.0*delta[
kmax-1])) {
112 b = (
double *)malloc((size1-1)*
sizeof(
double));
113 c = (
double *)malloc((size1-1)*
sizeof(
double));
114 for (
k=0;
k<size1-1;
k++) {
115 c[
k] = (3.0*delta[
k]-2.0*
d[
k]-
d[
k+1])/dh[
k];
116 b[
k] = (
d[
k]-2.0*delta[
k]+
d[
k+1])/(dh[
k]*dh[
k]);
119 for(
k=0;
k<size2;
k++) {
121 if (grid1[
n] < grid2[
k]) {
133 s = grid2[
k] - grid1[klo];
134 data2[
k] = data1[klo]+s*(
d[klo]+s*(
c[klo]+s*
b[klo]));
170 void cubic_spline(
int size1,
int size2,
const double *grid1,
const double *grid2,
const double *data1,
171 double *data2,
double yp1,
double ypn )
174 double p, sig, qn, un, h,
a,
b;
175 int i,
k,
n, klo, khi;
177 for(
i=1;
i<size1;
i++) {
178 if( grid1[
i] <= grid1[
i-1] )
error_handler(
"cubic_spline: grid1 is not monotonic increasing");
181 for(
i=0;
i<size2;
i++) {
182 if( grid2[
i] < grid1[0] || grid2[
i] > grid1[size1-1])
error_handler(
"cubic_spline: grid2 lies outside grid1");
185 if(size1 < 2)
error_handler(
"cubic_spline: the size of input grid should be at least 2");
187 p = (data1[1]-data1[0])/(grid1[1]-grid1[0]);
188 for(
i=0;
i< size2;
i++) data2[
i] =
p*(grid2[
i] - grid1[0]) + data1[0];
191 y2 = (
double *)malloc(size1*
sizeof(
double));
192 u = (
double *)malloc(size1*
sizeof(
double));
199 u[0]=(3./(grid1[1]-grid1[0]))*((data1[1]-data1[0])/(grid1[1]-grid1[0])-yp1);
202 for(
i=1;
i<size1-1;
i++) {
203 sig=(grid1[
i]-grid1[
i-1])/(grid1[
i+1]-grid1[
i-1]);
206 u[
i]=(6.*((data1[
i+1]-data1[
i])/(grid1[
i+1]-grid1[
i])-(data1[
i]-data1[
i-1])
207 /(grid1[
i]-grid1[
i-1]))/(grid1[
i+1]-grid1[
i-1])-sig*u[
i-1])/
p;
217 un=(3./(grid1[size1-1]-grid1[size1-2]))*(ypn-(data1[size1-1]-data1[size1-2])/(grid1[size1-1]-grid1[size1-2]));
220 y2[size1-1]=(un-qn*u[size1-2])/(qn*y2[size1-2]+1.);
222 for(
k=size1-2;
k>=0;
k--) y2[
k] = y2[
k]*y2[
k+1]+u[
k];
225 for(
k=0;
k<size2;
k++) {
227 if (grid1[
n] < grid2[
k]) {
239 h = grid1[khi]-grid1[klo];
240 a = (grid1[khi] - grid2[
k])/h;
241 b = (grid2[
k] - grid1[klo])/h;
242 data2[
k] =
a*data1[klo] +
b*data1[khi]+ ((pow(
a,3.0)-
a)*y2[klo] + (pow(
b,3.0)-
b)*y2[khi])*(pow(h,2.0))/6;
256 void conserve_interp(
int nx_src,
int ny_src,
int nx_dst,
int ny_dst,
const double *x_src,
257 const double *y_src,
const double *x_dst,
const double *y_dst,
258 const double *mask_src,
const double *data_src,
double *data_dst )
261 int *xgrid_i1, *xgrid_j1, *xgrid_i2, *xgrid_j2;
262 double *xgrid_area, *dst_area, *area_frac;
265 xgrid_i1 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
266 xgrid_j1 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
267 xgrid_i2 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
268 xgrid_j2 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
269 xgrid_area = (
double *)malloc(
MAXXGRID*
sizeof(
double));
270 dst_area = (
double *)malloc(nx_dst*ny_dst*
sizeof(
double));
272 xgrid_i1, xgrid_j1, xgrid_i2, xgrid_j2, xgrid_area );
277 for(
n=0;
n<nx_dst*ny_dst;
n++) dst_area[
n] = 0;
278 for(
n=0;
n<nxgrid;
n++) dst_area[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]] += xgrid_area[
n];
280 area_frac = (
double *)malloc(nxgrid*
sizeof(
double));
281 for(
n=0;
n<nxgrid;
n++) area_frac[
n] = xgrid_area[
n]/dst_area[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]];
283 for(
n=0;
n<nx_dst*ny_dst;
n++) {
286 for(
n=0;
n<nxgrid;
n++) {
287 data_dst[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]] += data_src[xgrid_j1[
n]*nx_src+xgrid_i1[
n]]*area_frac[
n];
307 const double *y_src,
const double *x_dst,
const double *y_dst,
308 const double *mask_src,
const double *data_src,
double *data_dst )
311 int *xgrid_i1, *xgrid_j1, *xgrid_i2, *xgrid_j2;
312 double *xgrid_area, *dst_area, *area_frac, *xgrid_di, *xgrid_dj;
315 xgrid_i1 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
316 xgrid_j1 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
317 xgrid_i2 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
318 xgrid_j2 = (
int *)malloc(
MAXXGRID*
sizeof(
int));
319 xgrid_area = (
double *)malloc(
MAXXGRID*
sizeof(
double));
320 xgrid_di = (
double *)malloc(
MAXXGRID*
sizeof(
double));
321 xgrid_dj = (
double *)malloc(
MAXXGRID*
sizeof(
double));
322 dst_area = (
double *)malloc(nx_dst*ny_dst*
sizeof(
double));
324 xgrid_i1, xgrid_j1, xgrid_i2, xgrid_j2, xgrid_area, xgrid_di, xgrid_dj );
329 for(
n=0;
n<nx_dst*ny_dst;
n++) dst_area[
n] = 0;
330 for(
n=0;
n<nxgrid;
n++) dst_area[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]] += xgrid_area[
n];
332 area_frac = (
double *)malloc(nxgrid*
sizeof(
double));
333 for(
n=0;
n<nxgrid;
n++) area_frac[
n] = xgrid_area[
n]/dst_area[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]];
335 for(
n=0;
n<nx_dst*ny_dst;
n++) {
338 for(
n=0;
n<nxgrid;
n++) {
339 data_dst[xgrid_j2[
n]*nx_dst+xgrid_i2[
n]] += data_src[xgrid_j1[
n]*nx_src+xgrid_i1[
n]]*area_frac[
n];
355 double *data1,
double *data2)
357 int n1,
n2,
i,
n,
k, l;
360 for(
k=1;
k<nk1;
k++) {
361 if(grid1[
k] <= grid1[
k-1])
error_handler(
"interp.c: grid1 not monotonic");
363 for(
k=1;
k<nk2;
k++) {
364 if(grid2[
k] <= grid2[
k-1])
error_handler(
"interp.c: grid2 not monotonic");
367 if (grid1[0] > grid2[0] )
error_handler(
"interp.c: grid2 lies outside grid1");
368 if (grid1[nk1-1] < grid2[nk2-1] )
error_handler(
"interp.c: grid2 lies outside grid1");
370 for(
k=0;
k<nk2;
k++) {
372 if (grid1[
n] < grid2[
k]) {
373 w = (grid2[
k]-grid1[
n])/(grid1[
n+1]-grid1[
n]);
374 for(l=0; l<
nx*
ny; l++) {
382 w = (grid2[
k]-grid1[
n-1])/(grid1[
n]-grid1[
n-1]);
383 for(l=0; l<
nx*
ny; l++) {
l_size ! loop over number of fields ke do je do i
subroutine error_handler(routine, message)
int create_xgrid_great_circle(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat)
integer function, public nearest_index(value, array)
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************! this routine is used to retrieve scalar boundary data for symmetric domain. subroutine MPP_GET_BOUNDARY_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &position, complete, tile_count) type(domain2D), intent(in) ::domain MPP_TYPE_, intent(in) ::field(:,:) MPP_TYPE_, intent(inout), optional ::ebuffer(:), sbuffer(:), wbuffer(:), nbuffer(:) integer, intent(in), optional ::flags, position, tile_count logical, intent(in), optional ::complete MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_, allocatable, dimension(:,:) ::ebuffer2D, sbuffer2D, wbuffer2D, nbuffer2D integer ::xcount, ycount integer ::ntile logical ::need_ebuffer, need_sbuffer, need_wbuffer, need_nbuffer integer(LONG_KIND), dimension(MAX_DOMAIN_FIELDS, MAX_TILES), save ::f_addrs=-9999 integer(LONG_KIND), dimension(4, MAX_DOMAIN_FIELDS, MAX_TILES), save ::b_addrs=-9999 integer, save ::bsize(4)=0, isize=0, jsize=0, ksize=0, pos, list=0, l_size=0, upflags integer ::buffer_size(4) integer ::max_ntile, tile, update_position, ishift, jshift logical ::do_update, is_complete, set_mismatch character(len=3) ::text MPP_TYPE_ ::d_type type(overlapSpec), pointer ::bound=> NULL() ntile
integer, parameter, public double
real(r8), dimension(cast_m, cast_n) p
void cubic_spline_sp(int size1, int size2, const double *grid1, const double *grid2, const double *data1, double *data2)
void conserve_interp(int nx_src, int ny_src, int nx_dst, int ny_dst, const double *x_src, const double *y_src, const double *x_dst, const double *y_dst, const double *mask_src, const double *data_src, double *data_dst)
void conserve_interp_great_circle(int nx_src, int ny_src, int nx_dst, int ny_dst, const double *x_src, const double *y_src, const double *x_dst, const double *y_dst, const double *mask_src, const double *data_src, double *data_dst)
type(gsw_result_mpres) n2
void cubic_spline(int size1, int size2, const double *grid1, const double *grid2, const double *data1, double *data2, double yp1, double ypn)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
void linear_vertical_interp(int nx, int ny, int nk1, int nk2, const double *grid1, const double *grid2, double *data1, double *data2)
int create_xgrid_2dx2d_order1(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area)