CLIM_Init_Oasis |
prism_init_comp_proto |
enter MPI
environment call MPI_INIT(mpi_err) |
enter MPI
environment call MPI_INIT (mpi_err) |
call
MPI_COMM_SIZE(MPI_COMM_WORLD,inumproc,mpi_err) call MPI_COMM_RANK(MPI_COMM_WORLD,imyrank,mpi_err) |
call
MPI_Comm_Size(MPI_COMM_WORLD,mpi_size,mpi_err) call MPI_Comm_Rank(MPI_COMM_WORLD,mpi_rank,mpi_err) |
call
MPI_COMM_DUP(MPI_COMM_WORLD,mpi_comm,mpi_err) |
call
MPI_COMM_DUP(MPI_COMM_WORLD,mpi_comm,mpi_err) |
generates a
"color" from the model name call MPI_Allgather(cmynam,CLIM_Clength,MPI_CHARACTER, & cunames,CLIM_Clength,MPI_CHARACTER, mpi_comm,mpi_err) |
generates a
"color" from the model name call MPI_Allgather(cmynam,CLIM_Clength,MPI_CHARACTER, & cunames,CLIM_Clength,MPI_CHARACTER, mpi_comm,mpi_err) |
split
MPI_COMM_WORLD in local, disjoints, communicators call MPI_COMM_SPLIT(MPI_COMM_WORLD, icolor, ikey, kcomm_local, mpi_err) |
split
MPI_COMM_WORLD in local, disjoints, communicators call MPI_COMM_SPLIT(MPI_COMM_WORLD, icolor, ikey,ig_local_comm, mpi_err) |
Send to each
process (involved or not in coupling) its model number
"mynummod". This is obtained by comparing the "color" of this process with the list of colors icolmods call MPI_Recv(ibuff,1,MPI_INTEGER,jn,itagcol,mpi_comm, mpi_status, mpi_err) call MPI_Send(mynummod,1,MPI_INTEGER,jn,itagcol,mpi_comm, mpi_err) |
get the model
number from Oasis (proc 0 in global comm) call MPI_Send(icolor,1,MPI_INTEGER,0,itagcol,mpi_comm, mpi_err) call MPI_Recv(ibuff,1,MPI_INTEGER,0,itagcol,mpi_comm, mpi_status, mpi_err) |
send information
read in the namcouple to model processes call MPI_Send(knmods, 1, MPI_INTEGER, jn, itagcol, mpi_comm, mpi_err) (number of models) call MPI_Send(ig_clim_nfield, 1, MPI_INTEGER, jn, itagcol+1, mpi_comm, mpi_err) (total number of fields) call MPI_Send(il_clim_maxport, 1, MPI_INTEGER, jn, itagcol+1, mpi_comm, mpi_err) (maximum number of prism_def_var_proto called by one component in the coupled model ; optional if it is smaller than twice the number of fields exchanged by this OASIS process) call MPI_Send(rl_work, iposbuf, MPI_PACKED, jn, itagcol+2, mpi_comm, mpi_err) |
receive
information from Oasis call MPI_Recv(knmods, 1, MPI_INTEGER, 0, itagcol, mpi_comm, mpi_status, mpi_err) call MPI_Recv(ig_clim_nfield, 1, MPI_INTEGER, 0, itagcol+1, mpi_comm, mpi_status, mpi_err) call MPI_Recv(il_clim_maxport, 1, MPI_INTEGER, 0, itagcol+1, mpi_comm, mpi_status, mpi_err) call MPI_Recv ( rl_work, il_size, MPI_PACKED, 0, itagcol+2, mpi_comm, mpi_status, mpi_err ) |
send starting
flag to all model processors (grids_start = 0 nothing to do,
1 files .nc to write) call MPI_Send(grids_start, ilen, itype, ip, itag, mpi_comm, MPI_ERR) |
receive flag
'grids_start' stating whether or not grids writing is needed call MPI_Recv (grids_start, 1, MPI_INTEGER, 0, itagcol+3, mpi_comm, mpi_status, mpi_err) |
CLIM_Init_Oasis |
prism_start_grids_writing |
If grids_start == 1 call MPI_Send(cgrdnam, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (grids.nc) call MPI_Send(cmsknam, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (masks.nc) call MPI_Send(csurnam, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (areas.nc) call MPI_Send(cglonsuf, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (longitudes suffix) call MPI_Send(cglatsuf, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (latitudes suffix) call MPI_Send(crnlonsuf, ilen, itype, idproc, itag,mpi_comm, MPI_ERR) (corner longitudes suffix) call MPI_Send(crnlatsuf, ilen, itype, idproc, itag,mpi_comm, MPI_ERR) (corner latitudes suffix) call MPI_Send(cmsksuf, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (masks suffix) call MPI_Send(csursuf, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (areas suffix) call MPI_Send(cangsuf, ilen, itype, idproc, itag, mpi_comm, MPI_ERR) (angles suffix) |
IF (grids_start == 1)
THEN call MPI_Recv (cgrdnam, len, type, source, tag, mpi_comm, status,mpi_err) call MPI_Recv (cmsknam, len, type, source, tag, mpi_comm, status,mpi_err) call MPI_Recv (csurnam, len, type, source, tag, mpi_comm, status,mpi_err) call MPI_Recv (cglonsuf, len, type, source, tag, mpi_comm,status,mpi_err) call MPI_Recv (cglatsuf, len, type, source, tag, mpi_comm,status,mpi_err) call MPI_Recv (crnlonsuf, len, type, source, tag, mpi_comm,status,mpi_err) call MPI_Recv (crnlatsuf, len, type, source, tag, mpi_comm,status,mpi_err) call MPI_Recv (cmsksuf, len, type, source, tag, mpi_comm, status,mpi_err) call MPI_Recv (csursuf, len, type, source, tag, mpi_comm, status,mpi_err) call MPI_Recv (cangsuf, len, type, source, tag, mpi_comm, status,mpi_err) |
CLIM_Init_Oasis |
prism_terminate_grids_writing |
Wait
until writing is finished call MPI_Recv(grids_done, ilen, itype, idproc, itag, mpi_comm, mpi_status, MPI_ERR) |
grids_done==1 call MPI_Send (grids_done, len, type, dest, tag, mpi_comm, mpi_err) |