4 !***********************************************************************
5 !* GNU Lesser General Public License
7 !* This file
is part of the GFDL Flexible Modeling System (FMS).
9 !* FMS
is free software: you can redistribute it and/or modify it under
10 !* the terms of the GNU Lesser General Public License as published by
11 !* the Free Software Foundation, either
version 3 of the License, or (at
12 !* your option) any later
version.
14 !* FMS
is distributed in the hope that it
will be useful, but WITHOUT
15 !* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 !* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 !* You should have
received a copy of the GNU Lesser General Public
20 !* License along with FMS. If
not, see <http:
21 !***********************************************************************
23 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
25 ! ROUTINES TO INITIALIZE/FINALIZE MPP MODULE: mpp_init, mpp_exit !
27 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
28 ! subroutine mpp_init( flags, in,
out, err, log )
29 !
integer, optional, intent(in) :: flags, in,
out, err, log
30 subroutine mpp_init( flags, localcomm )
31 integer, optional, intent(in) :: flags
32 integer, optional, intent(in) :: localcomm
34 logical :: opened, existed
36 character(
len=5) :: this_pe
37 type(mpp_type), pointer :: dtype
41 call MPI_INITIALIZED( opened,
error ) !in
case called from another MPI package
42 if(opened .and. .NOT. PRESENT(localcomm)) call
mpp_error( FATAL,
'MPP_INIT: communicator is required' )
43 if( .NOT.opened ) then
52 call SHMEM_BARRIER_ALL()
53 call SHPALLOC( p_pSync, SHMEM_BARRIER_SYNC_SIZE,
error, -1 )
54 call SHMEM_BARRIER_ALL()
60 !PEsets: make defaults illegal
66 peset(:)%log2stride = -1
68 !0=
single-
PE, initialized so that count returns 1
70 allocate(
peset(0)%list(1) )
81 clock0 = mpp_clock_id(
'Total runtime', flags=MPP_CLOCK_SYNC )
83 ! Create the bytestream (default) mpp_datatype
95 ! Initialize datatype list with
mpp_byte 100 if( PRESENT(flags) )then
101 debug = flags.EQ.MPP_DEBUG
105 call mpp_init_logfile()
109 #ifdef INTERNAL_FILE_NML
115 inquire( unit_nml,OPENED=opened )
116 if( .NOT.opened )exit
119 open(unit_nml,file=
'input.nml', iostat=io_status)
120 read(unit_nml,mpp_nml,iostat=io_status)
124 if (io_status > 0) then
125 call
mpp_error(FATAL,
'=>mpp_init: Error reading input.nml')
129 "mpp_mod: mpp_nml variable sync_all_clocks is set to .true., all clocks are synchronized in mpp_clock_begin.")
131 ! non-
root pe messages written to other location than stdout()
139 if(opened) call
mpp_error(FATAL,
'Unit 9 is already in use (etc_unit) in mpp_comm_mpi')
143 inquire(file=
etcfile, exist=existed)
151 !
max_request is set to maximum of
npes * REQUEST_MULTIPLY ( default
is 20) and MAX_REQUEST_MIN ( default 10000)
163 !
if optional argument logunit=stdout, write messages to stdout instead.
164 !
if specifying non-defaults, you must specify
units not yet in use.
165 !
if( PRESENT(in) )then
166 ! inquire(
unit=in, opened=opened )
167 !
if( opened )call
mpp_error( FATAL,
'MPP_INIT: unable to open stdin.' )
170 !
if( PRESENT(
out) )then
171 ! inquire(
unit=
out, opened=opened )
172 !
if( opened )call
mpp_error( FATAL,
'MPP_INIT: unable to open stdout.' )
175 !
if( PRESENT(err) )then
176 ! inquire(
unit=err, opened=opened )
177 !
if( opened )call
mpp_error( FATAL,
'MPP_INIT: unable to open stderr.' )
181 !
if( PRESENT(log) )then
182 ! inquire(
unit=log, opened=opened )
183 !
if( opened .AND. log.NE.
out_unit )call
mpp_error( FATAL,
'MPP_INIT: unable to open stdlog.' )
186 !!
log_unit can be written to only from
root_pe, all others write to stdout
189 !
if( opened )call
mpp_error( FATAL,
'MPP_INIT: specified unit for stdlog already in use.' )
197 iunit = stdlog() ! workaround for lf95.
200 write(
iunit,
'(/a)' )
'MPP module ' 201 write(
iunit,
'(a,i6)' )
'MPP started with NPES=',
npes 202 write(
iunit,
'(a)' )
'Using MPI library for message passing...' 203 write(
iunit,
'(a,es12.4,a,i10,a)' ) &
205 write(
iunit,
'(a,es12.4,a,i20,a)' ) &
207 write(
iunit,
'(/a)' )
'MPP Parameter module ' 208 write(
iunit,
'(/a)' )
'MPP Data module ' 213 call mpp_clock_begin(
clock0)
216 end subroutine mpp_init
218 !#######################################################################
219 !to be called at the
end of
a run
220 subroutine mpp_exit()
222 real ::
t,
tmin, tmax, tavg, tstd
223 real ::
m, mmin, mmax, mavg, mstd, t_total
225 type(mpp_type), pointer :: dtype
228 call mpp_set_current_pelist()
229 call mpp_clock_end(
clock0)
235 call sum_clock_data; call dump_clock_summary
242 write(
out_unit,'(/
a,
i6,
a)' ) 'Tabulating mpp_clock statistics across ',
npes, ' PEs...'
244 write(
out_unit,'(
a)' )' ... see mpp_clock.
out.
#### for details on individual PEs.' 245 write(
out_unit,
'(/32x,a)' )
' tmin tmax tavg tstd tfrac grain pemin pemax' 256 !times between mpp_clock ticks
259 tmax =
t; call mpp_max(tmax)
260 tavg =
t; call mpp_sum(tavg); tavg = tavg/mpp_npes()
261 tstd = (
t-tavg)**2; call mpp_sum(tstd); tstd = sqrt( tstd/mpp_npes() )
270 '
tmin tmax tavg tstd mmin mmax mavg mstd mavg/tavg'
273 !messages: bytelengths and times
278 do
j = 1,MAX_EVENT_TYPES
285 mmin =
m; call mpp_min(mmin)
286 mmax =
m; call mpp_max(mmax)
287 mavg =
m; call mpp_sum(mavg); mavg = mavg/mpp_npes()
288 mstd = (
m-mavg)**2; call mpp_sum(mstd); mstd = sqrt( mstd/mpp_npes() )
292 tmax =
t; call mpp_max(tmax)
293 tavg =
t; call mpp_sum(tavg); tavg = tavg/mpp_npes()
294 tstd = (
t-tavg)**2; call mpp_sum(tstd); tstd = sqrt( tstd/mpp_npes() )
297 tmin, tmax, tavg, tstd, mmin, mmax, mavg, mstd, mavg/tavg
313 ! Clear derived data types (skipping list head,
mpp_byte)
315 do while (.
not. associated(dtype))
317 dtype%counter = 1 ! Force deallocation
318 call mpp_type_free(dtype)
321 call mpp_set_current_pelist()
328 end subroutine mpp_exit
330 !
####################################################################### 331 subroutine mpp_malloc( ptr, newlen,
len )
336 pointer( ptr,
dummy )
338 !use existing allocation
if it
is enough
339 if( newlen.LE.
len )return
341 call SHMEM_BARRIER_ALL()
342 !
if the pointer
is already allocated, deallocate
343 !
if(
len.NE.0 )call SHPDEALLC( ptr, error_8, -1 ) !BWA: error_8 instead of
error, see PV 682618 (fixed in mpt.1.3.0.1)
344 if(
len.NE.0 )call SHPDEALLC( ptr,
error, -1 )
345 !allocate new
length: assume that the array
is KIND=8
346 call SHPALLOC( ptr, newlen,
error, -1 )
348 call SHMEM_BARRIER_ALL()
351 call
mpp_error( FATAL,
'mpp_malloc: requires use_MPI_SMA' )
354 end subroutine mpp_malloc
357 !#######################################################################
358 !--- routine to perform GSM allocations
360 subroutine mpp_gsm_malloc( ptr,
len )
362 integer(KIND=MPI_ADDRESS_KIND), intent(in) ::
len 365 !argument ptr
is a cray pointer, points to
a dummy argument in this routine
366 pointer( ptr,
dummy )
373 end subroutine mpp_gsm_malloc
375 !#######################################################################
376 !--- routine to free GSM allocations
378 subroutine mpp_gsm_free( ptr )
382 !argument ptr
is a cray pointer, points to
a dummy argument in this routine
383 pointer( ptr,
dummy )
389 end subroutine mpp_gsm_free
392 !#######################################################################
393 !
set the mpp_stack variable to be at least
n LONG words
long 394 subroutine mpp_set_stack_size(
n)
399 if( .NOT.allocated(mpp_stack) )then
400 allocate( mpp_stack(
n) )
404 write(
text,
'(i8)' )
n 408 end subroutine mpp_set_stack_size
410 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
412 ! BASIC MESSAGE PASSING ROUTINE: mpp_transmit !
414 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
416 character(
len=*), intent(inout) :: data(:)
429 if(mpp_pe() == mpp_root_pe()) then
436 call
mpp_error( FATAL,
'mpp_broadcast_text: broadcasting from invalid PE.' )
447 if( mpp_npes().GT.1 ) call MPI_BCAST( data,
length*
size(data(:)), MPI_CHARACTER, from_rank,
peset(
n)%
id,
error )
450 end subroutine mpp_broadcast_char
453 #define MPP_TRANSMIT_ mpp_transmit_real8 454 #undef MPP_TRANSMIT_SCALAR_ 455 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_real8_scalar 456 #undef MPP_TRANSMIT_2D_ 457 #define MPP_TRANSMIT_2D_ mpp_transmit_real8_2d 458 #undef MPP_TRANSMIT_3D_ 459 #define MPP_TRANSMIT_3D_ mpp_transmit_real8_3d 460 #undef MPP_TRANSMIT_4D_ 461 #define MPP_TRANSMIT_4D_ mpp_transmit_real8_4d 462 #undef MPP_TRANSMIT_5D_ 463 #define MPP_TRANSMIT_5D_ mpp_transmit_real8_5d 465 #define MPP_RECV_ mpp_recv_real8 466 #undef MPP_RECV_SCALAR_ 467 #define MPP_RECV_SCALAR_ mpp_recv_real8_scalar 469 #define MPP_RECV_2D_ mpp_recv_real8_2d 471 #define MPP_RECV_3D_ mpp_recv_real8_3d 473 #define MPP_RECV_4D_ mpp_recv_real8_4d 475 #define MPP_RECV_5D_ mpp_recv_real8_5d 477 #define MPP_SEND_ mpp_send_real8 478 #undef MPP_SEND_SCALAR_ 479 #define MPP_SEND_SCALAR_ mpp_send_real8_scalar 481 #define MPP_SEND_2D_ mpp_send_real8_2d 483 #define MPP_SEND_3D_ mpp_send_real8_3d 485 #define MPP_SEND_4D_ mpp_send_real8_4d 487 #define MPP_SEND_5D_ mpp_send_real8_5d 488 #undef MPP_BROADCAST_ 489 #define MPP_BROADCAST_ mpp_broadcast_real8 490 #undef MPP_BROADCAST_SCALAR_ 491 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_real8_scalar 492 #undef MPP_BROADCAST_2D_ 493 #define MPP_BROADCAST_2D_ mpp_broadcast_real8_2d 494 #undef MPP_BROADCAST_3D_ 495 #define MPP_BROADCAST_3D_ mpp_broadcast_real8_3d 496 #undef MPP_BROADCAST_4D_ 497 #define MPP_BROADCAST_4D_ mpp_broadcast_real8_4d 498 #undef MPP_BROADCAST_5D_ 499 #define MPP_BROADCAST_5D_ mpp_broadcast_real8_5d 501 #define MPP_TYPE_ real(DOUBLE_KIND) 502 #undef MPP_TYPE_BYTELEN_ 503 #define MPP_TYPE_BYTELEN_ 8 505 #define MPI_TYPE_ MPI_REAL8 510 #define MPP_TRANSMIT_ mpp_transmit_cmplx8 511 #undef MPP_TRANSMIT_SCALAR_ 512 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_cmplx8_scalar 513 #undef MPP_TRANSMIT_2D_ 514 #define MPP_TRANSMIT_2D_ mpp_transmit_cmplx8_2d 515 #undef MPP_TRANSMIT_3D_ 516 #define MPP_TRANSMIT_3D_ mpp_transmit_cmplx8_3d 517 #undef MPP_TRANSMIT_4D_ 518 #define MPP_TRANSMIT_4D_ mpp_transmit_cmplx8_4d 519 #undef MPP_TRANSMIT_5D_ 520 #define MPP_TRANSMIT_5D_ mpp_transmit_cmplx8_5d 522 #define MPP_RECV_ mpp_recv_cmplx8 523 #undef MPP_RECV_SCALAR_ 524 #define MPP_RECV_SCALAR_ mpp_recv_cmplx8_scalar 526 #define MPP_RECV_2D_ mpp_recv_cmplx8_2d 528 #define MPP_RECV_3D_ mpp_recv_cmplx8_3d 530 #define MPP_RECV_4D_ mpp_recv_cmplx8_4d 532 #define MPP_RECV_5D_ mpp_recv_cmplx8_5d 534 #define MPP_SEND_ mpp_send_cmplx8 535 #undef MPP_SEND_SCALAR_ 536 #define MPP_SEND_SCALAR_ mpp_send_cmplx8_scalar 538 #define MPP_SEND_2D_ mpp_send_cmplx8_2d 540 #define MPP_SEND_3D_ mpp_send_cmplx8_3d 542 #define MPP_SEND_4D_ mpp_send_cmplx8_4d 544 #define MPP_SEND_5D_ mpp_send_cmplx8_5d 545 #undef MPP_BROADCAST_ 546 #define MPP_BROADCAST_ mpp_broadcast_cmplx8 547 #undef MPP_BROADCAST_SCALAR_ 548 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_cmplx8_scalar 549 #undef MPP_BROADCAST_2D_ 550 #define MPP_BROADCAST_2D_ mpp_broadcast_cmplx8_2d 551 #undef MPP_BROADCAST_3D_ 552 #define MPP_BROADCAST_3D_ mpp_broadcast_cmplx8_3d 553 #undef MPP_BROADCAST_4D_ 554 #define MPP_BROADCAST_4D_ mpp_broadcast_cmplx8_4d 555 #undef MPP_BROADCAST_5D_ 556 #define MPP_BROADCAST_5D_ mpp_broadcast_cmplx8_5d 558 #define MPP_TYPE_ complex(DOUBLE_KIND) 559 #undef MPP_TYPE_BYTELEN_ 560 #define MPP_TYPE_BYTELEN_ 16 562 #define MPI_TYPE_ MPI_DOUBLE_COMPLEX 567 #define MPP_TRANSMIT_ mpp_transmit_real4 568 #undef MPP_TRANSMIT_SCALAR_ 569 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_real4_scalar 570 #undef MPP_TRANSMIT_2D_ 571 #define MPP_TRANSMIT_2D_ mpp_transmit_real4_2d 572 #undef MPP_TRANSMIT_3D_ 573 #define MPP_TRANSMIT_3D_ mpp_transmit_real4_3d 574 #undef MPP_TRANSMIT_4D_ 575 #define MPP_TRANSMIT_4D_ mpp_transmit_real4_4d 576 #undef MPP_TRANSMIT_5D_ 577 #define MPP_TRANSMIT_5D_ mpp_transmit_real4_5d 579 #define MPP_RECV_ mpp_recv_real4 580 #undef MPP_RECV_SCALAR_ 581 #define MPP_RECV_SCALAR_ mpp_recv_real4_scalar 583 #define MPP_RECV_2D_ mpp_recv_real4_2d 585 #define MPP_RECV_3D_ mpp_recv_real4_3d 587 #define MPP_RECV_4D_ mpp_recv_real4_4d 589 #define MPP_RECV_5D_ mpp_recv_real4_5d 591 #define MPP_SEND_ mpp_send_real4 592 #undef MPP_SEND_SCALAR_ 593 #define MPP_SEND_SCALAR_ mpp_send_real4_scalar 595 #define MPP_SEND_2D_ mpp_send_real4_2d 597 #define MPP_SEND_3D_ mpp_send_real4_3d 599 #define MPP_SEND_4D_ mpp_send_real4_4d 601 #define MPP_SEND_5D_ mpp_send_real4_5d 602 #undef MPP_BROADCAST_ 603 #define MPP_BROADCAST_ mpp_broadcast_real4 604 #undef MPP_BROADCAST_SCALAR_ 605 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_real4_scalar 606 #undef MPP_BROADCAST_2D_ 607 #define MPP_BROADCAST_2D_ mpp_broadcast_real4_2d 608 #undef MPP_BROADCAST_3D_ 609 #define MPP_BROADCAST_3D_ mpp_broadcast_real4_3d 610 #undef MPP_BROADCAST_4D_ 611 #define MPP_BROADCAST_4D_ mpp_broadcast_real4_4d 612 #undef MPP_BROADCAST_5D_ 613 #define MPP_BROADCAST_5D_ mpp_broadcast_real4_5d 615 #define MPP_TYPE_ real(FLOAT_KIND) 616 #undef MPP_TYPE_BYTELEN_ 617 #define MPP_TYPE_BYTELEN_ 4 619 #define MPI_TYPE_ MPI_REAL4 624 #define MPP_TRANSMIT_ mpp_transmit_cmplx4 625 #undef MPP_TRANSMIT_SCALAR_ 626 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_cmplx4_scalar 627 #undef MPP_TRANSMIT_2D_ 628 #define MPP_TRANSMIT_2D_ mpp_transmit_cmplx4_2d 629 #undef MPP_TRANSMIT_3D_ 630 #define MPP_TRANSMIT_3D_ mpp_transmit_cmplx4_3d 631 #undef MPP_TRANSMIT_4D_ 632 #define MPP_TRANSMIT_4D_ mpp_transmit_cmplx4_4d 633 #undef MPP_TRANSMIT_5D_ 634 #define MPP_TRANSMIT_5D_ mpp_transmit_cmplx4_5d 636 #define MPP_RECV_ mpp_recv_cmplx4 637 #undef MPP_RECV_SCALAR_ 638 #define MPP_RECV_SCALAR_ mpp_recv_cmplx4_scalar 640 #define MPP_RECV_2D_ mpp_recv_cmplx4_2d 642 #define MPP_RECV_3D_ mpp_recv_cmplx4_3d 644 #define MPP_RECV_4D_ mpp_recv_cmplx4_4d 646 #define MPP_RECV_5D_ mpp_recv_cmplx4_5d 648 #define MPP_SEND_ mpp_send_cmplx4 649 #undef MPP_SEND_SCALAR_ 650 #define MPP_SEND_SCALAR_ mpp_send_cmplx4_scalar 652 #define MPP_SEND_2D_ mpp_send_cmplx4_2d 654 #define MPP_SEND_3D_ mpp_send_cmplx4_3d 656 #define MPP_SEND_4D_ mpp_send_cmplx4_4d 658 #define MPP_SEND_5D_ mpp_send_cmplx4_5d 659 #undef MPP_BROADCAST_ 660 #define MPP_BROADCAST_ mpp_broadcast_cmplx4 661 #undef MPP_BROADCAST_SCALAR_ 662 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_cmplx4_scalar 663 #undef MPP_BROADCAST_2D_ 664 #define MPP_BROADCAST_2D_ mpp_broadcast_cmplx4_2d 665 #undef MPP_BROADCAST_3D_ 666 #define MPP_BROADCAST_3D_ mpp_broadcast_cmplx4_3d 667 #undef MPP_BROADCAST_4D_ 668 #define MPP_BROADCAST_4D_ mpp_broadcast_cmplx4_4d 669 #undef MPP_BROADCAST_5D_ 670 #define MPP_BROADCAST_5D_ mpp_broadcast_cmplx4_5d 672 #define MPP_TYPE_ complex(FLOAT_KIND) 673 #undef MPP_TYPE_BYTELEN_ 674 #define MPP_TYPE_BYTELEN_ 8 676 #define MPI_TYPE_ MPI_COMPLEX 680 #ifndef no_8byte_integers 682 #define MPP_TRANSMIT_ mpp_transmit_int8 683 #undef MPP_TRANSMIT_SCALAR_ 684 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_int8_scalar 685 #undef MPP_TRANSMIT_2D_ 686 #define MPP_TRANSMIT_2D_ mpp_transmit_int8_2d 687 #undef MPP_TRANSMIT_3D_ 688 #define MPP_TRANSMIT_3D_ mpp_transmit_int8_3d 689 #undef MPP_TRANSMIT_4D_ 690 #define MPP_TRANSMIT_4D_ mpp_transmit_int8_4d 691 #undef MPP_TRANSMIT_5D_ 692 #define MPP_TRANSMIT_5D_ mpp_transmit_int8_5d 694 #define MPP_RECV_ mpp_recv_int8 695 #undef MPP_RECV_SCALAR_ 696 #define MPP_RECV_SCALAR_ mpp_recv_int8_scalar 698 #define MPP_RECV_2D_ mpp_recv_int8_2d 700 #define MPP_RECV_3D_ mpp_recv_int8_3d 702 #define MPP_RECV_4D_ mpp_recv_int8_4d 704 #define MPP_RECV_5D_ mpp_recv_int8_5d 706 #define MPP_SEND_ mpp_send_int8 707 #undef MPP_SEND_SCALAR_ 708 #define MPP_SEND_SCALAR_ mpp_send_int8_scalar 710 #define MPP_SEND_2D_ mpp_send_int8_2d 712 #define MPP_SEND_3D_ mpp_send_int8_3d 714 #define MPP_SEND_4D_ mpp_send_int8_4d 716 #define MPP_SEND_5D_ mpp_send_int8_5d 717 #undef MPP_BROADCAST_ 718 #define MPP_BROADCAST_ mpp_broadcast_int8 719 #undef MPP_BROADCAST_SCALAR_ 720 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_int8_scalar 721 #undef MPP_BROADCAST_2D_ 722 #define MPP_BROADCAST_2D_ mpp_broadcast_int8_2d 723 #undef MPP_BROADCAST_3D_ 724 #define MPP_BROADCAST_3D_ mpp_broadcast_int8_3d 725 #undef MPP_BROADCAST_4D_ 726 #define MPP_BROADCAST_4D_ mpp_broadcast_int8_4d 727 #undef MPP_BROADCAST_5D_ 728 #define MPP_BROADCAST_5D_ mpp_broadcast_int8_5d 730 #define MPP_TYPE_ integer(LONG_KIND) 731 #undef MPP_TYPE_BYTELEN_ 732 #define MPP_TYPE_BYTELEN_ 8 734 #define MPI_TYPE_ MPI_INTEGER8 739 #define MPP_TRANSMIT_ mpp_transmit_int4 740 #undef MPP_TRANSMIT_SCALAR_ 741 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_int4_scalar 742 #undef MPP_TRANSMIT_2D_ 743 #define MPP_TRANSMIT_2D_ mpp_transmit_int4_2d 744 #undef MPP_TRANSMIT_3D_ 745 #define MPP_TRANSMIT_3D_ mpp_transmit_int4_3d 746 #undef MPP_TRANSMIT_4D_ 747 #define MPP_TRANSMIT_4D_ mpp_transmit_int4_4d 748 #undef MPP_TRANSMIT_5D_ 749 #define MPP_TRANSMIT_5D_ mpp_transmit_int4_5d 751 #define MPP_RECV_ mpp_recv_int4 752 #undef MPP_RECV_SCALAR_ 753 #define MPP_RECV_SCALAR_ mpp_recv_int4_scalar 755 #define MPP_RECV_2D_ mpp_recv_int4_2d 757 #define MPP_RECV_3D_ mpp_recv_int4_3d 759 #define MPP_RECV_4D_ mpp_recv_int4_4d 761 #define MPP_RECV_5D_ mpp_recv_int4_5d 763 #define MPP_SEND_ mpp_send_int4 764 #undef MPP_SEND_SCALAR_ 765 #define MPP_SEND_SCALAR_ mpp_send_int4_scalar 767 #define MPP_SEND_2D_ mpp_send_int4_2d 769 #define MPP_SEND_3D_ mpp_send_int4_3d 771 #define MPP_SEND_4D_ mpp_send_int4_4d 773 #define MPP_SEND_5D_ mpp_send_int4_5d 774 #undef MPP_BROADCAST_ 775 #define MPP_BROADCAST_ mpp_broadcast_int4 776 #undef MPP_BROADCAST_SCALAR_ 777 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_int4_scalar 778 #undef MPP_BROADCAST_2D_ 779 #define MPP_BROADCAST_2D_ mpp_broadcast_int4_2d 780 #undef MPP_BROADCAST_3D_ 781 #define MPP_BROADCAST_3D_ mpp_broadcast_int4_3d 782 #undef MPP_BROADCAST_4D_ 783 #define MPP_BROADCAST_4D_ mpp_broadcast_int4_4d 784 #undef MPP_BROADCAST_5D_ 785 #define MPP_BROADCAST_5D_ mpp_broadcast_int4_5d 787 #define MPP_TYPE_ integer(INT_KIND) 788 #undef MPP_TYPE_BYTELEN_ 789 #define MPP_TYPE_BYTELEN_ 4 791 #define MPI_TYPE_ MPI_INTEGER4 794 #ifndef no_8byte_integers 796 #define MPP_TRANSMIT_ mpp_transmit_logical8 797 #undef MPP_TRANSMIT_SCALAR_ 798 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_logical8_scalar 799 #undef MPP_TRANSMIT_2D_ 800 #define MPP_TRANSMIT_2D_ mpp_transmit_logical8_2d 801 #undef MPP_TRANSMIT_3D_ 802 #define MPP_TRANSMIT_3D_ mpp_transmit_logical8_3d 803 #undef MPP_TRANSMIT_4D_ 804 #define MPP_TRANSMIT_4D_ mpp_transmit_logical8_4d 805 #undef MPP_TRANSMIT_5D_ 806 #define MPP_TRANSMIT_5D_ mpp_transmit_logical8_5d 808 #define MPP_RECV_ mpp_recv_logical8 809 #undef MPP_RECV_SCALAR_ 810 #define MPP_RECV_SCALAR_ mpp_recv_logical8_scalar 812 #define MPP_RECV_2D_ mpp_recv_logical8_2d 814 #define MPP_RECV_3D_ mpp_recv_logical8_3d 816 #define MPP_RECV_4D_ mpp_recv_logical8_4d 818 #define MPP_RECV_5D_ mpp_recv_logical8_5d 820 #define MPP_SEND_ mpp_send_logical8 821 #undef MPP_SEND_SCALAR_ 822 #define MPP_SEND_SCALAR_ mpp_send_logical8_scalar 824 #define MPP_SEND_2D_ mpp_send_logical8_2d 826 #define MPP_SEND_3D_ mpp_send_logical8_3d 828 #define MPP_SEND_4D_ mpp_send_logical8_4d 830 #define MPP_SEND_5D_ mpp_send_logical8_5d 831 #undef MPP_BROADCAST_ 832 #define MPP_BROADCAST_ mpp_broadcast_logical8 833 #undef MPP_BROADCAST_SCALAR_ 834 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_logical8_scalar 835 #undef MPP_BROADCAST_2D_ 836 #define MPP_BROADCAST_2D_ mpp_broadcast_logical8_2d 837 #undef MPP_BROADCAST_3D_ 838 #define MPP_BROADCAST_3D_ mpp_broadcast_logical8_3d 839 #undef MPP_BROADCAST_4D_ 840 #define MPP_BROADCAST_4D_ mpp_broadcast_logical8_4d 841 #undef MPP_BROADCAST_5D_ 842 #define MPP_BROADCAST_5D_ mpp_broadcast_logical8_5d 844 #define MPP_TYPE_ logical(LONG_KIND) 845 #undef MPP_TYPE_BYTELEN_ 846 #define MPP_TYPE_BYTELEN_ 8 848 #define MPI_TYPE_ MPI_INTEGER8 853 #define MPP_TRANSMIT_ mpp_transmit_logical4 854 #undef MPP_TRANSMIT_SCALAR_ 855 #define MPP_TRANSMIT_SCALAR_ mpp_transmit_logical4_scalar 856 #undef MPP_TRANSMIT_2D_ 857 #define MPP_TRANSMIT_2D_ mpp_transmit_logical4_2d 858 #undef MPP_TRANSMIT_3D_ 859 #define MPP_TRANSMIT_3D_ mpp_transmit_logical4_3d 860 #undef MPP_TRANSMIT_4D_ 861 #define MPP_TRANSMIT_4D_ mpp_transmit_logical4_4d 862 #undef MPP_TRANSMIT_5D_ 863 #define MPP_TRANSMIT_5D_ mpp_transmit_logical4_5d 865 #define MPP_RECV_ mpp_recv_logical4 866 #undef MPP_RECV_SCALAR_ 867 #define MPP_RECV_SCALAR_ mpp_recv_logical4_scalar 869 #define MPP_RECV_2D_ mpp_recv_logical4_2d 871 #define MPP_RECV_3D_ mpp_recv_logical4_3d 873 #define MPP_RECV_4D_ mpp_recv_logical4_4d 875 #define MPP_RECV_5D_ mpp_recv_logical4_5d 877 #define MPP_SEND_ mpp_send_logical4 878 #undef MPP_SEND_SCALAR_ 879 #define MPP_SEND_SCALAR_ mpp_send_logical4_scalar 881 #define MPP_SEND_2D_ mpp_send_logical4_2d 883 #define MPP_SEND_3D_ mpp_send_logical4_3d 885 #define MPP_SEND_4D_ mpp_send_logical4_4d 887 #define MPP_SEND_5D_ mpp_send_logical4_5d 888 #undef MPP_BROADCAST_ 889 #define MPP_BROADCAST_ mpp_broadcast_logical4 890 #undef MPP_BROADCAST_SCALAR_ 891 #define MPP_BROADCAST_SCALAR_ mpp_broadcast_logical4_scalar 892 #undef MPP_BROADCAST_2D_ 893 #define MPP_BROADCAST_2D_ mpp_broadcast_logical4_2d 894 #undef MPP_BROADCAST_3D_ 895 #define MPP_BROADCAST_3D_ mpp_broadcast_logical4_3d 896 #undef MPP_BROADCAST_4D_ 897 #define MPP_BROADCAST_4D_ mpp_broadcast_logical4_4d 898 #undef MPP_BROADCAST_5D_ 899 #define MPP_BROADCAST_5D_ mpp_broadcast_logical4_5d 901 #define MPP_TYPE_ logical(INT_KIND) 902 #undef MPP_TYPE_BYTELEN_ 903 #define MPP_TYPE_BYTELEN_ 4 905 #define MPI_TYPE_ MPI_INTEGER4 908 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
910 ! GLOBAL REDUCTION ROUTINES: mpp_max, mpp_sum, mpp_min !
912 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
913 #undef MPP_REDUCE_0D_ 914 #define MPP_REDUCE_0D_ mpp_max_real8_0d 915 #undef MPP_REDUCE_1D_ 916 #define MPP_REDUCE_1D_ mpp_max_real8_1d 918 #define MPP_TYPE_ real(DOUBLE_KIND) 919 #undef MPP_TYPE_BYTELEN_ 920 #define MPP_TYPE_BYTELEN_ 8 922 #define MPI_TYPE_ MPI_REAL8 924 #define MPI_REDUCE_ MPI_MAX 928 #undef MPP_REDUCE_0D_ 929 #define MPP_REDUCE_0D_ mpp_max_real4_0d 930 #undef MPP_REDUCE_1D_ 931 #define MPP_REDUCE_1D_ mpp_max_real4_1d 933 #define MPP_TYPE_ real(FLOAT_KIND) 934 #undef MPP_TYPE_BYTELEN_ 935 #define MPP_TYPE_BYTELEN_ 4 937 #define MPI_TYPE_ MPI_REAL4 939 #define MPI_REDUCE_ MPI_MAX 943 #ifndef no_8byte_integers 944 #undef MPP_REDUCE_0D_ 945 #define MPP_REDUCE_0D_ mpp_max_int8_0d 946 #undef MPP_REDUCE_1D_ 947 #define MPP_REDUCE_1D_ mpp_max_int8_1d 949 #define MPP_TYPE_ integer(LONG_KIND) 950 #undef MPP_TYPE_BYTELEN_ 951 #define MPP_TYPE_BYTELEN_ 8 953 #define MPI_TYPE_ MPI_INTEGER8 955 #define MPI_REDUCE_ MPI_MAX 959 #undef MPP_REDUCE_0D_ 960 #define MPP_REDUCE_0D_ mpp_max_int4_0d 961 #undef MPP_REDUCE_1D_ 962 #define MPP_REDUCE_1D_ mpp_max_int4_1d 964 #define MPP_TYPE_ integer(INT_KIND) 965 #undef MPP_TYPE_BYTELEN_ 966 #define MPP_TYPE_BYTELEN_ 4 968 #define MPI_TYPE_ MPI_INTEGER4 970 #define MPI_REDUCE_ MPI_MAX 973 #undef MPP_REDUCE_0D_ 974 #define MPP_REDUCE_0D_ mpp_min_real8_0d 975 #undef MPP_REDUCE_1D_ 976 #define MPP_REDUCE_1D_ mpp_min_real8_1d 978 #define MPP_TYPE_ real(DOUBLE_KIND) 979 #undef MPP_TYPE_BYTELEN_ 980 #define MPP_TYPE_BYTELEN_ 8 982 #define MPI_TYPE_ MPI_REAL8 984 #define MPI_REDUCE_ MPI_MIN 988 #undef MPP_REDUCE_0D_ 989 #define MPP_REDUCE_0D_ mpp_min_real4_0d 990 #undef MPP_REDUCE_1D_ 991 #define MPP_REDUCE_1D_ mpp_min_real4_1d 993 #define MPP_TYPE_ real(FLOAT_KIND) 994 #undef MPP_TYPE_BYTELEN_ 995 #define MPP_TYPE_BYTELEN_ 4 997 #define MPI_TYPE_ MPI_REAL4 999 #define MPI_REDUCE_ MPI_MIN 1003 #ifndef no_8byte_integers 1004 #undef MPP_REDUCE_0D_ 1005 #define MPP_REDUCE_0D_ mpp_min_int8_0d 1006 #undef MPP_REDUCE_1D_ 1007 #define MPP_REDUCE_1D_ mpp_min_int8_1d 1009 #define MPP_TYPE_ integer(LONG_KIND) 1010 #undef MPP_TYPE_BYTELEN_ 1011 #define MPP_TYPE_BYTELEN_ 8 1013 #define MPI_TYPE_ MPI_INTEGER8 1015 #define MPI_REDUCE_ MPI_MIN 1019 #undef MPP_REDUCE_0D_ 1020 #define MPP_REDUCE_0D_ mpp_min_int4_0d 1021 #undef MPP_REDUCE_1D_ 1022 #define MPP_REDUCE_1D_ mpp_min_int4_1d 1024 #define MPP_TYPE_ integer(INT_KIND) 1025 #undef MPP_TYPE_BYTELEN_ 1026 #define MPP_TYPE_BYTELEN_ 4 1028 #define MPI_TYPE_ MPI_INTEGER4 1030 #define MPI_REDUCE_ MPI_MIN 1034 #define MPP_SUM_ mpp_sum_real8 1035 #undef MPP_SUM_SCALAR_ 1036 #define MPP_SUM_SCALAR_ mpp_sum_real8_scalar 1038 #define MPP_SUM_2D_ mpp_sum_real8_2d 1040 #define MPP_SUM_3D_ mpp_sum_real8_3d 1042 #define MPP_SUM_4D_ mpp_sum_real8_4d 1044 #define MPP_SUM_5D_ mpp_sum_real8_5d 1046 #define MPP_TYPE_ real(DOUBLE_KIND) 1048 #define MPI_TYPE_ MPI_REAL8 1049 #undef MPP_TYPE_BYTELEN_ 1050 #define MPP_TYPE_BYTELEN_ 8 1055 #define MPP_SUM_ mpp_sum_cmplx8 1056 #undef MPP_SUM_SCALAR_ 1057 #define MPP_SUM_SCALAR_ mpp_sum_cmplx8_scalar 1059 #define MPP_SUM_2D_ mpp_sum_cmplx8_2d 1061 #define MPP_SUM_3D_ mpp_sum_cmplx8_3d 1063 #define MPP_SUM_4D_ mpp_sum_cmplx8_4d 1065 #define MPP_SUM_5D_ mpp_sum_cmplx8_5d 1067 #define MPP_TYPE_ complex(DOUBLE_KIND) 1069 #define MPI_TYPE_ MPI_DOUBLE_COMPLEX 1070 #undef MPP_TYPE_BYTELEN_ 1071 #define MPP_TYPE_BYTELEN_ 16 1077 #define MPP_SUM_ mpp_sum_real4 1078 #undef MPP_SUM_SCALAR_ 1079 #define MPP_SUM_SCALAR_ mpp_sum_real4_scalar 1081 #define MPP_SUM_2D_ mpp_sum_real4_2d 1083 #define MPP_SUM_3D_ mpp_sum_real4_3d 1085 #define MPP_SUM_4D_ mpp_sum_real4_4d 1087 #define MPP_SUM_5D_ mpp_sum_real4_5d 1089 #define MPP_TYPE_ real(FLOAT_KIND) 1091 #define MPI_TYPE_ MPI_REAL4 1092 #undef MPP_TYPE_BYTELEN_ 1093 #define MPP_TYPE_BYTELEN_ 4 1099 #define MPP_SUM_ mpp_sum_cmplx4 1100 #undef MPP_SUM_SCALAR_ 1101 #define MPP_SUM_SCALAR_ mpp_sum_cmplx4_scalar 1103 #define MPP_SUM_2D_ mpp_sum_cmplx4_2d 1105 #define MPP_SUM_3D_ mpp_sum_cmplx4_3d 1107 #define MPP_SUM_4D_ mpp_sum_cmplx4_4d 1109 #define MPP_SUM_5D_ mpp_sum_cmplx4_5d 1111 #define MPP_TYPE_ complex(FLOAT_KIND) 1113 #define MPI_TYPE_ MPI_COMPLEX 1114 #undef MPP_TYPE_BYTELEN_ 1115 #define MPP_TYPE_BYTELEN_ 8 1119 #ifndef no_8byte_integers 1121 #define MPP_SUM_ mpp_sum_int8 1122 #undef MPP_SUM_SCALAR_ 1123 #define MPP_SUM_SCALAR_ mpp_sum_int8_scalar 1125 #define MPP_SUM_2D_ mpp_sum_int8_2d 1127 #define MPP_SUM_3D_ mpp_sum_int8_3d 1129 #define MPP_SUM_4D_ mpp_sum_int8_4d 1131 #define MPP_SUM_5D_ mpp_sum_int8_5d 1133 #define MPP_TYPE_ integer(LONG_KIND) 1135 #define MPI_TYPE_ MPI_INTEGER8 1136 #undef MPP_TYPE_BYTELEN_ 1137 #define MPP_TYPE_BYTELEN_ 8 1142 #define MPP_SUM_ mpp_sum_int4 1143 #undef MPP_SUM_SCALAR_ 1144 #define MPP_SUM_SCALAR_ mpp_sum_int4_scalar 1146 #define MPP_SUM_2D_ mpp_sum_int4_2d 1148 #define MPP_SUM_3D_ mpp_sum_int4_3d 1150 #define MPP_SUM_4D_ mpp_sum_int4_4d 1152 #define MPP_SUM_5D_ mpp_sum_int4_5d 1154 #define MPP_TYPE_ integer(INT_KIND) 1156 #define MPI_TYPE_ MPI_INTEGER4 1157 #undef MPP_TYPE_BYTELEN_ 1158 #define MPP_TYPE_BYTELEN_ 4 1160 !--------------------------------
1162 #define MPP_SUM_AD_ mpp_sum_real8_ad 1163 #undef MPP_SUM_SCALAR_AD_ 1164 #define MPP_SUM_SCALAR_AD_ mpp_sum_real8_scalar_ad 1165 #undef MPP_SUM_2D_AD_ 1166 #define MPP_SUM_2D_AD_ mpp_sum_real8_2d_ad 1167 #undef MPP_SUM_3D_AD_ 1168 #define MPP_SUM_3D_AD_ mpp_sum_real8_3d_ad 1169 #undef MPP_SUM_4D_AD_ 1170 #define MPP_SUM_4D_AD_ mpp_sum_real8_4d_ad 1171 #undef MPP_SUM_5D_AD_ 1172 #define MPP_SUM_5D_AD_ mpp_sum_real8_5d_ad 1174 #define MPP_TYPE_ real(DOUBLE_KIND) 1176 #define MPI_TYPE_ MPI_REAL8 1177 #undef MPP_TYPE_BYTELEN_ 1178 #define MPP_TYPE_BYTELEN_ 8 1183 #define MPP_SUM_AD_ mpp_sum_cmplx8_ad 1184 #undef MPP_SUM_SCALAR_AD_ 1185 #define MPP_SUM_SCALAR_AD_ mpp_sum_cmplx8_scalar_ad 1186 #undef MPP_SUM_2D_AD_ 1187 #define MPP_SUM_2D_AD_ mpp_sum_cmplx8_2d_ad 1188 #undef MPP_SUM_3D_AD_ 1189 #define MPP_SUM_3D_AD_ mpp_sum_cmplx8_3d_ad 1190 #undef MPP_SUM_4D_AD_ 1191 #define MPP_SUM_4D_AD_ mpp_sum_cmplx8_4d_ad 1192 #undef MPP_SUM_5D_AD_ 1193 #define MPP_SUM_5D_AD_ mpp_sum_cmplx8_5d_ad 1195 #define MPP_TYPE_ complex(DOUBLE_KIND) 1197 #define MPI_TYPE_ MPI_DOUBLE_COMPLEX 1198 #undef MPP_TYPE_BYTELEN_ 1199 #define MPP_TYPE_BYTELEN_ 16 1205 #define MPP_SUM_AD_ mpp_sum_real4_ad 1206 #undef MPP_SUM_SCALAR_AD_ 1207 #define MPP_SUM_SCALAR_AD_ mpp_sum_real4_scalar_ad 1208 #undef MPP_SUM_2D_AD_ 1209 #define MPP_SUM_2D_AD_ mpp_sum_real4_2d_ad 1210 #undef MPP_SUM_3D_AD_ 1211 #define MPP_SUM_3D_AD_ mpp_sum_real4_3d_ad 1212 #undef MPP_SUM_4D_AD_ 1213 #define MPP_SUM_4D_AD_ mpp_sum_real4_4d_ad 1214 #undef MPP_SUM_5D_AD_ 1215 #define MPP_SUM_5D_AD_ mpp_sum_real4_5d_ad 1217 #define MPP_TYPE_ real(FLOAT_KIND) 1219 #define MPI_TYPE_ MPI_REAL4 1220 #undef MPP_TYPE_BYTELEN_ 1221 #define MPP_TYPE_BYTELEN_ 4 1227 #define MPP_SUM_AD_ mpp_sum_cmplx4_ad 1228 #undef MPP_SUM_SCALAR_AD_ 1229 #define MPP_SUM_SCALAR_AD_ mpp_sum_cmplx4_scalar_ad 1230 #undef MPP_SUM_2D_AD_ 1231 #define MPP_SUM_2D_AD_ mpp_sum_cmplx4_2d_ad 1232 #undef MPP_SUM_3D_AD_ 1233 #define MPP_SUM_3D_AD_ mpp_sum_cmplx4_3d_ad 1234 #undef MPP_SUM_4D_AD_ 1235 #define MPP_SUM_4D_AD_ mpp_sum_cmplx4_4d_ad 1236 #undef MPP_SUM_5D_AD_ 1237 #define MPP_SUM_5D_AD_ mpp_sum_cmplx4_5d_ad 1239 #define MPP_TYPE_ complex(FLOAT_KIND) 1241 #define MPI_TYPE_ MPI_COMPLEX 1242 #undef MPP_TYPE_BYTELEN_ 1243 #define MPP_TYPE_BYTELEN_ 8 1247 #ifndef no_8byte_integers 1249 #define MPP_SUM_AD_ mpp_sum_int8_ad 1250 #undef MPP_SUM_SCALAR_AD_ 1251 #define MPP_SUM_SCALAR_AD_ mpp_sum_int8_scalar_ad 1252 #undef MPP_SUM_2D_AD_ 1253 #define MPP_SUM_2D_AD_ mpp_sum_int8_2d_ad 1254 #undef MPP_SUM_3D_AD_ 1255 #define MPP_SUM_3D_AD_ mpp_sum_int8_3d_ad 1256 #undef MPP_SUM_4D_AD_ 1257 #define MPP_SUM_4D_AD_ mpp_sum_int8_4d_ad 1258 #undef MPP_SUM_5D_AD_ 1259 #define MPP_SUM_5D_AD_ mpp_sum_int8_5d_ad 1261 #define MPP_TYPE_ integer(LONG_KIND) 1263 #define MPI_TYPE_ MPI_INTEGER8 1264 #undef MPP_TYPE_BYTELEN_ 1265 #define MPP_TYPE_BYTELEN_ 8 1270 #define MPP_SUM_AD_ mpp_sum_int4_ad 1271 #undef MPP_SUM_SCALAR_AD_ 1272 #define MPP_SUM_SCALAR_AD_ mpp_sum_int4_scalar_ad 1273 #undef MPP_SUM_2D_AD_ 1274 #define MPP_SUM_2D_AD_ mpp_sum_int4_2d_ad 1275 #undef MPP_SUM_3D_AD_ 1276 #define MPP_SUM_3D_AD_ mpp_sum_int4_3d_ad 1277 #undef MPP_SUM_4D_AD_ 1278 #define MPP_SUM_4D_AD_ mpp_sum_int4_4d_ad 1279 #undef MPP_SUM_5D_AD_ 1280 #define MPP_SUM_5D_AD_ mpp_sum_int4_5d_ad 1282 #define MPP_TYPE_ integer(INT_KIND) 1284 #define MPI_TYPE_ MPI_INTEGER4 1285 #undef MPP_TYPE_BYTELEN_ 1286 #define MPP_TYPE_BYTELEN_ 4 1289 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1291 ! SCATTER AND GATHER ROUTINES: mpp_alltoall !
1293 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1295 #undef MPP_ALLTOALL_ 1296 #undef MPP_ALLTOALLV_ 1297 #undef MPP_ALLTOALLW_ 1299 #undef MPP_TYPE_BYTELEN_ 1301 #define MPP_ALLTOALL_ mpp_alltoall_int4 1302 #define MPP_ALLTOALLV_ mpp_alltoall_int4_v 1303 #define MPP_ALLTOALLW_ mpp_alltoall_int4_w 1304 #define MPP_TYPE_ integer(INT_KIND) 1305 #define MPP_TYPE_BYTELEN_ 4 1306 #define MPI_TYPE_ MPI_INTEGER4 1309 #undef MPP_ALLTOALL_ 1310 #undef MPP_ALLTOALLV_ 1311 #undef MPP_ALLTOALLW_ 1313 #undef MPP_TYPE_BYTELEN_ 1315 #define MPP_ALLTOALL_ mpp_alltoall_int8 1316 #define MPP_ALLTOALLV_ mpp_alltoall_int8_v 1317 #define MPP_ALLTOALLW_ mpp_alltoall_int8_w 1318 #define MPP_TYPE_ integer(LONG_KIND) 1319 #define MPP_TYPE_BYTELEN_ 8 1320 #define MPI_TYPE_ MPI_INTEGER8 1323 #undef MPP_ALLTOALL_ 1324 #undef MPP_ALLTOALLV_ 1325 #undef MPP_ALLTOALLW_ 1327 #undef MPP_TYPE_BYTELEN_ 1329 #define MPP_ALLTOALL_ mpp_alltoall_real4 1330 #define MPP_ALLTOALLV_ mpp_alltoall_real4_v 1331 #define MPP_ALLTOALLW_ mpp_alltoall_real4_w 1332 #define MPP_TYPE_ real(FLOAT_KIND) 1333 #define MPP_TYPE_BYTELEN_ 4 1334 #define MPI_TYPE_ MPI_REAL4 1337 #undef MPP_ALLTOALL_ 1338 #undef MPP_ALLTOALLV_ 1339 #undef MPP_ALLTOALLW_ 1341 #undef MPP_TYPE_BYTELEN_ 1343 #define MPP_ALLTOALL_ mpp_alltoall_real8 1344 #define MPP_ALLTOALLV_ mpp_alltoall_real8_v 1345 #define MPP_ALLTOALLW_ mpp_alltoall_real8_w 1346 #define MPP_TYPE_ real(DOUBLE_KIND) 1347 #define MPP_TYPE_BYTELEN_ 8 1348 #define MPI_TYPE_ MPI_REAL8 1351 #undef MPP_ALLTOALL_ 1352 #undef MPP_ALLTOALLV_ 1353 #undef MPP_ALLTOALLW_ 1355 #undef MPP_TYPE_BYTELEN_ 1357 #define MPP_ALLTOALL_ mpp_alltoall_logical4 1358 #define MPP_ALLTOALLV_ mpp_alltoall_logical4_v 1359 #define MPP_ALLTOALLW_ mpp_alltoall_logical4_w 1360 #define MPP_TYPE_ logical(INT_KIND) 1361 #define MPP_TYPE_BYTELEN_ 4 1362 #define MPI_TYPE_ MPI_INTEGER4 1365 #undef MPP_ALLTOALL_ 1366 #undef MPP_ALLTOALLV_ 1367 #undef MPP_ALLTOALLW_ 1369 #undef MPP_TYPE_BYTELEN_ 1371 #define MPP_ALLTOALL_ mpp_alltoall_logical8 1372 #define MPP_ALLTOALLV_ mpp_alltoall_logical8_v 1373 #define MPP_ALLTOALLW_ mpp_alltoall_logical8_w 1374 #define MPP_TYPE_ logical(LONG_KIND) 1375 #define MPP_TYPE_BYTELEN_ 8 1376 #define MPI_TYPE_ MPI_INTEGER8 1379 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1381 ! DATA TRANSFER TYPES: mpp_type_create, mpp_type_free !
1383 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1385 #define MPP_TYPE_CREATE_ mpp_type_create_int4 1386 #define MPP_TYPE_ integer(INT_KIND) 1387 #define MPI_TYPE_ MPI_INTEGER4 1390 #define MPP_TYPE_CREATE_ mpp_type_create_int8 1391 #define MPP_TYPE_ integer(LONG_KIND) 1392 #define MPI_TYPE_ MPI_INTEGER8 1395 #define MPP_TYPE_CREATE_ mpp_type_create_real4 1396 #define MPP_TYPE_ real(FLOAT_KIND) 1397 #define MPI_TYPE_ MPI_REAL4 1400 #define MPP_TYPE_CREATE_ mpp_type_create_real8 1401 #define MPP_TYPE_ real(DOUBLE_KIND) 1402 #define MPI_TYPE_ MPI_REAL8 1405 #define MPP_TYPE_CREATE_ mpp_type_create_logical4 1406 #define MPP_TYPE_ logical(INT_KIND) 1407 #define MPI_TYPE_ MPI_INTEGER4 1410 #define MPP_TYPE_CREATE_ mpp_type_create_logical8 1411 #define MPP_TYPE_ logical(LONG_KIND) 1412 #define MPI_TYPE_ MPI_INTEGER8 1415 ! Clear preprocessor flags
1418 #undef MPP_TYPE_CREATE_ 1420 ! NOTE: This should probably
not take
a pointer, but
for now we
do this.
1421 subroutine mpp_type_free(dtype)
1422 type(mpp_type), pointer, intent(inout) :: dtype
1425 call
mpp_error(FATAL,
'MPP_TYPE_FREE: You must first call mpp_init.')
1431 call
mpp_error(NOTE,
'MPP_TYPE_FREE: using MPI_Type_free...')
1433 ! Decrement the reference counter
1434 dtype%counter = dtype%counter - 1
1436 if (dtype%counter < 1) then
1437 ! De-register the datatype in MPI runtime
1438 call MPI_Type_free(dtype%
id,
error)
1441 dtype%prev => dtype%next
1443 ! Remove from memory
1444 if (allocated(dtype%sizes)) deallocate(dtype%sizes)
1445 if (allocated(dtype%subsizes)) deallocate(dtype%subsizes)
1446 if (allocated(dtype%starts)) deallocate(dtype%starts)
1453 call increment_current_clock(EVENT_TYPE_FREE, MPP_TYPE_BYTELEN_)
1455 end subroutine mpp_type_free
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version will
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine MPP_UPDATE_NEST_FINE_2D_(field, nest_domain, wbuffer, ebuffer, sbuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::field3D(size(field, 1), size(field, 2), 1) MPP_TYPE_ ::wbuffer3D(size(wbuffer, 1), size(wbuffer, 2), 1) MPP_TYPE_ ::ebuffer3D(size(ebuffer, 1), size(ebuffer, 2), 1) MPP_TYPE_ ::sbuffer3D(size(sbuffer, 1), size(sbuffer, 2), 1) MPP_TYPE_ ::nbuffer3D(size(nbuffer, 1), size(nbuffer, 2), 1) pointer(ptr, field3D) pointer(ptr_w, wbuffer3D) pointer(ptr_e, ebuffer3D) pointer(ptr_s, sbuffer3D) pointer(ptr_n, nbuffer3D) ptr=LOC(field) ptr_w=LOC(wbuffer) ptr_e=LOC(ebuffer) ptr_s=LOC(sbuffer) ptr_n=LOC(nbuffer) call mpp_update_nest_fine(field3D, nest_domain, wbuffer3D, ebuffer3D, sbuffer3D, nbuffer3D, &flags, complete, position, extra_halo, name, tile_count) returnend subroutine MPP_UPDATE_NEST_FINE_2D_subroutine MPP_UPDATE_NEST_FINE_3D_(field, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, &flags, complete, position, extra_halo, name, tile_count) MPP_TYPE_, intent(in) ::field(:,:,:) type(nest_domain_type), intent(inout) ::nest_domain MPP_TYPE_, intent(inout) ::wbuffer(:,:,:) MPP_TYPE_, intent(inout) ::ebuffer(:,:,:) MPP_TYPE_, intent(inout) ::sbuffer(:,:,:) MPP_TYPE_, intent(inout) ::nbuffer(:,:,:) integer, intent(in), optional ::flags logical, intent(in), optional ::complete integer, intent(in), optional ::position integer, intent(in), optional ::extra_halo character(len= *), intent(in), optional ::name integer, intent(in), optional ::tile_count MPP_TYPE_ ::d_type type(nestSpec), pointer ::update=> set_mismatch integer ::tile update_position nbuffersz l_size integer
l_size ! loop over number of fields ke do je do i
l_size ! loop over number of fields ke do je do ie to PE
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> unit
integer(long_kind) max_ticks
integer, parameter, public long
integer current_peset_num
subroutine, public copy(self, rhs)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT end
character(len=32) units
No description.
character(len=32) etcfile
integer, parameter, public single
l_size ! loop over number of fields ke do j
l_size ! loop over number of fields ke do je do ie to je n if(.NOT. d_comm%R_do_buf(list)) cycle from_pe
logical module_is_initialized
character(len=input_str_length), dimension(:), allocatable, target, public input_nml_file
character(len=128) version
l_size ! loop over number of fields ke do je do ie to is
type(communicator), dimension(:), allocatable peset
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) MPP_BROADCAST length
subroutine, private initialize
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE start
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call mpp_error(FATAL, 'MPP_TRANSMIT:you cannot transmit to ANY_PE using MPI.') else if(to_pe.NE.NULL_PE) then !no other valid cases except NULL_PE call mpp_error(FATAL
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this case
integer, parameter, public down
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> id
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call mpp_comm_private
integer current_peset_max
logical etc_unit_is_stderr
type(mpp_type_list) datatypes
character(len=32) configfile
logical function received(this, seqno)
type(field_def), target, save root
integer, dimension(:), allocatable request_recv
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************subroutine READ_RECORD_CORE_(unit, field, nwords, data, start, axsiz) integer, intent(in) ::unit type(fieldtype), intent(in) ::field integer, intent(in) ::nwords MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in) ::start(:), axsiz(:) integer(SHORT_KIND) ::i2vals(nwords)!rab used in conjunction with transfer intrinsic to determine size of a variable integer(KIND=1) ::one_byte(8) integer ::word_sz!#ifdef __sgi integer(INT_KIND) ::ivals(nwords) real(FLOAT_KIND) ::rvals(nwords)!#else! integer ::ivals(nwords)! real ::rvals(nwords)!#endif real(DOUBLE_KIND) ::r8vals(nwords) pointer(ptr1, i2vals) pointer(ptr2, ivals) pointer(ptr3, rvals) pointer(ptr4, r8vals) if(mpp_io_stack_size< nwords) call mpp_io_set_stack_size(nwords) call mpp_error(FATAL, 'MPP_READ currently requires use_netCDF option') end subroutine READ_RECORD_CORE_ subroutine READ_RECORD_(unit, field, nwords, data, time_level, domain, position, tile_count, start_in, axsiz_in)!routine that is finally called by all mpp_read routines to perform the read!a non-netCDF record contains:! field ID! a set of 4 coordinates(is:ie, js:je) giving the data subdomain! a timelevel and a timestamp(=NULLTIME if field is static)! 3D real data(stored as 1D)!if you are using direct access I/O, the RECL argument to OPEN must be large enough for the above!in a global direct access file, record position on PE is given by %record.!Treatment of timestamp:! We assume that static fields have been passed without a timestamp.! Here that is converted into a timestamp of NULLTIME.! For non-netCDF fields, field is treated no differently, but is written! with a timestamp of NULLTIME. There is no check in the code to prevent! the user from repeatedly writing a static field. integer, intent(in) ::unit, nwords type(fieldtype), intent(in) ::field MPP_TYPE_, intent(inout) ::data(nwords) integer, intent(in), optional ::time_level type(domain2D), intent(in), optional ::domain integer, intent(in), optional ::position, tile_count integer, intent(in), optional ::start_in(:), axsiz_in(:) integer, dimension(size(field%axes(:))) ::start, axsiz integer ::tlevel !, subdomain(4) integer ::i, error, is, ie, js, je, isg, ieg, jsg, jeg type(domain2d), pointer ::io_domain=> tlevel if(PRESENT(start_in) .AND. PRESENT(axsiz_in)) then if(size(start(! the data domain and compute domain must refer to the subdomain being passed ! In this ! since that attempts to gather all data on PE size(field%axes(:)) axsiz(i)
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) a
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call MPP_TRANSMIT get_len end if return end subroutine MPP_TRANSMIT_ ! MPP_BROADCAST ! subroutine but that doesn t allow !broadcast to a subset of PEs This version and mpp_transmit will remain !backward compatible intent(inout) i6
integer, dimension(:), allocatable size_recv
real(r8), dimension(cast_m, cast_n) t
integer, dimension(:), allocatable pelist
*f90 *************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If not
integer, dimension(:), allocatable request_send
integer(long_kind) ticks_per_sec
integer(long_kind) start_tick
type(mpp_type), target, public mpp_byte
type(clock), dimension(max_clocks), save clocks
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete error call increase mpp_nml request_multiply call tick
integer, dimension(:), allocatable type_recv