FV3 Bundle
threadloc.c
Go to the documentation of this file.
1 /***********************************************************************
2  * GNU Lesser General Public License
3  *
4  * This file is part of the GFDL Flexible Modeling System (FMS).
5  *
6  * FMS is free software: you can redistribute it and/or modify it under
7  * the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or (at
9  * your option) any later version.
10  *
11  * FMS is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14  * for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FMS. If not, see <http://www.gnu.org/licenses/>.
18  **********************************************************************/
19 /* Fortran-callable routine for returning the MLD ("brick"?) where
20  this thread/process is located. */
21 #include <stdio.h>
22 #include <unistd.h>
23 #ifdef use_libMPI
24 #include <mpi.h>
25 #endif
26 int pe, npes;
27 
28 #ifdef __sgi
29 #include <sys/pmo.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 
33 extern pmo_handle_t *mpi_sgi_mld;
34 extern int mpi_sgi_dsm_ppm;
35 
36 int find_nodenum(int mynodedev);
37 
38 int mld_id_() {
39 /* pmo_handle_t mymld; */
40 /* int mynodedev; */
41 /* int mymemorynode; */
42 #define SIZE 1000000
43  int array[SIZE];
44  pm_pginfo_t pginfo_buf;
45  int thisdev, thisnode;
46 
47  bzero( array, sizeof(array) ); /* zero to force allocation */
48 
49  __pm_get_page_info( array, 1, &pginfo_buf, 1 );
50  thisdev = pginfo_buf.node_dev;
51  thisnode = find_nodenum(thisdev);
52  return thisnode;
53 }
54 
55 int find_nodenum(int mynodedev) {
56  int i;
57  struct stat sbuf;
58  char buff[80];
59  for (i=0; ;i++) {
60  sprintf(buff,"/hw/nodenum/%d",i);
61  stat(buff, &sbuf);
62  if (sbuf.st_ino == mynodedev)
63  return(i);
64  }
65 
66 }
67 #else
68 int mld_id_() { /* dummy routine for portability */
69  return 0;
70 }
71 #endif /* sgi */
72 
73 #ifdef test_threadloc
74 void main(int argc, char **argv) {
75  MPI_Init( &argc, &argv );
76  MPI_Comm_rank( MPI_COMM_WORLD, &pe );
77  MPI_Comm_size( MPI_COMM_WORLD, &npes );
78 #ifdef _OPENMP
79 #pragma omp parallel
80  {
81  int thrnum = omp_get_thread_num();
82  printf( "pe=%d thrnum=%d mld=%d\n", pe, thrnum, mld_id_() );
83  }
84 #endif
85  printf( "pe=%d mld=%d\n", pe, mld_id_() );
86  MPI_Finalize();
87 }
88 #endif
l_size ! loop over number of fields ke do je do i
int pe
Definition: threadloc.c:26
int npes
Definition: threadloc.c:26
int mld_id_()
Definition: threadloc.c:68
************************************************************************GNU Lesser General Public License **This file is part of the GFDL Flexible Modeling System(FMS). ! *! *FMS is free software without even the implied warranty of MERCHANTABILITY or *FITNESS FOR A PARTICULAR PURPOSE See the GNU General Public License *for more details **You should have received a copy of the GNU Lesser General Public *License along with FMS If see< http:! ***********************************************************************!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! MPP_TRANSMIT !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! subroutine MPP_TRANSMIT_(put_data, put_len, to_pe, get_data, get_len, from_pe, block, tag, recv_request, send_request)!a message-passing routine intended to be reminiscent equally of both MPI and SHMEM!put_data and get_data are contiguous MPP_TYPE_ arrays!at each call, your put_data array is put to to_pe 's get_data! your get_data array is got from from_pe 's put_data!i.e we assume that typically(e.g updating halo regions) each PE performs a put _and_ a get!special PE designations:! NULL_PE:to disable a put or a get(e.g at boundaries)! ANY_PE:if remote PE for the put or get is to be unspecific! ALL_PES:broadcast and collect operations(collect not yet implemented)!ideally we would not pass length, but this f77-style call performs better(arrays passed by address, not descriptor)!further, this permits< length > contiguous words from an array of any rank to be passed(avoiding f90 rank conformance check)!caller is responsible for completion checks(mpp_sync_self) before and after integer, intent(in) ::put_len, to_pe, get_len, from_pe MPP_TYPE_, intent(in) ::put_data(*) MPP_TYPE_, intent(out) ::get_data(*) logical, intent(in), optional ::block integer, intent(in), optional ::tag integer, intent(out), optional ::recv_request, send_request logical ::block_comm integer ::i MPP_TYPE_, allocatable, save ::local_data(:) !local copy used by non-parallel code(no SHMEM or MPI) integer ::comm_tag integer ::rsize if(.NOT.module_is_initialized) call mpp_error(FATAL, 'MPP_TRANSMIT:You must first call mpp_init.') if(to_pe.EQ.NULL_PE .AND. from_pe.EQ.NULL_PE) return block_comm=.true. if(PRESENT(block)) block_comm=block if(debug) then call SYSTEM_CLOCK(tick) write(stdout_unit,'(a, i18, a, i6, a, 2i6, 2i8)')&'T=', tick, ' PE=', pe, ' MPP_TRANSMIT begin:to_pe, from_pe, put_len, get_len=', to_pe, from_pe, put_len, get_len end if comm_tag=DEFAULT_TAG if(present(tag)) comm_tag=tag!do put first and then get if(to_pe.GE.0 .AND. to_pe.LT.npes) then!use non-blocking sends if(debug .and.(current_clock.NE.0)) call SYSTEM_CLOCK(start_tick)!z1l:truly non-blocking send.! if(request(to_pe).NE.MPI_REQUEST_NULL) then !only one message from pe-> to_pe in queue *PE waiting for to_pe ! call error else get_len so only do gets but you cannot have a pure get with MPI call a get means do a wait to ensure put on remote PE is complete stat
program main
Definition: xgrid.F90:5439