1 | include(dom.inc)
2 |
3 | SUBROUTINE slave_return(Gtot, Lbtot, Htot, Q_rtot, Qw, nnodes, &
4 | & nbfaces)
5 |
6 | ! ================================================================!
7 | ! !
8 | ! slave_return.F: Sends the resulting vectors to the master proc. !
9 | ! !
10 | ! out : !
11 | ! !
12 | ! author : J. AMAYA (october 2007) !
13 | ! !
14 | ! ================================================================!
15 |
16 | USE mod_pmm
17 | #ifdef USEPALM
18 | USE palmlib
19 | #endif
20 |
21 | IMPLICIT NONE
22 |
23 | include 'pmm_constants.h'
24 |
25 | ! IN
26 | DOM_INT :: nnodes, nbfaces
27 | DOM_REAL,DIMENSION(3,nnodes) :: Q_rtot
28 | DOM_REAL,DIMENSION(3,nbfaces) :: Qw
29 | DOM_REAL,DIMENSION(nnodes) :: Gtot, Lbtot
30 | DOM_REAL,DIMENSION(nbfaces) :: Htot
31 |
32 | ! LOCAL
33 | DOM_INT :: ierr
34 | DOM_INT :: i
35 |
36 | ! ----------------!
37 | ! Testing results !
38 | ! ----------------!
39 | ! DO i=1,nnodes
40 | ! print*, " (",pmm_rank,") Lbtot(",i,")=", Lbtot(i)
41 | ! ENDDO
42 | ! DO i=1,nnodes
43 | ! print*, " (",pmm_rank,") Gtot(",i,")=", Gtot(i)
44 | ! ENDDO
45 |
46 | ! --------------------------------------------------!
47 | ! Send vectors only if it is not the master process !
48 | ! --------------------------------------------------!
49 |
50 | IF (pmm_rank.ne.PMM_HOST) THEN
51 | ! print*, " (",pmm_rank,") MPI sending results"
52 |
53 | #ifdef USEPALM
54 | CALL MPI_SEND(pmm_rank, 1, MPI_INTEGER, PMM_HOST, PMM_RETURN, &
55 | & PL_COMM_EXEC , ierr)
56 | CALL MPI_SEND(Gtot, nnodes, MPI_DOUBLE_PRECISION, PMM_HOST, &
57 | & PMM_RETURN, PL_COMM_EXEC , ierr)
58 | CALL MPI_SEND(Htot, nbfaces, MPI_DOUBLE_PRECISION, &
59 | & PMM_HOST, PMM_RETURN, PL_COMM_EXEC , ierr)
60 | CALL MPI_SEND(Qw, nbfaces, MPI_DOUBLE_PRECISION, &
61 | & PMM_HOST, PMM_RETURN, PL_COMM_EXEC , ierr)
62 | CALL MPI_SEND(Lbtot, nnodes, MPI_DOUBLE_PRECISION, PMM_HOST, &
63 | & PMM_RETURN, PL_COMM_EXEC , ierr)
64 | CALL MPI_SEND(Q_rtot, 3*nnodes, MPI_DOUBLE_PRECISION, &
65 | & PMM_HOST, PMM_RETURN, PL_COMM_EXEC , ierr)
66 | #else
67 | CALL MPI_SEND(pmm_rank, 1, MPI_INTEGER, PMM_HOST, PMM_RETURN, &
68 | & MPI_COMM_WORLD, ierr)
69 | CALL MPI_SEND(Gtot, nnodes, MPI_DOUBLE_PRECISION, PMM_HOST, &
70 | & PMM_RETURN, MPI_COMM_WORLD, ierr)
71 | CALL MPI_SEND(Htot, nbfaces, MPI_DOUBLE_PRECISION, &
72 | & PMM_HOST, PMM_RETURN, MPI_COMM_WORLD, ierr)
73 | CALL MPI_SEND(Qw, nbfaces, MPI_DOUBLE_PRECISION, &
74 | & PMM_HOST, PMM_RETURN, MPI_COMM_WORLD, ierr)
75 | CALL MPI_SEND(Lbtot, nnodes, MPI_DOUBLE_PRECISION, PMM_HOST, &
76 | & PMM_RETURN, MPI_COMM_WORLD, ierr)
77 | CALL MPI_SEND(Q_rtot, 3*nnodes, MPI_DOUBLE_PRECISION, &
78 | & PMM_HOST, PMM_RETURN, MPI_COMM_WORLD, ierr)
79 | #endif
80 |
81 | ELSE
82 | ! print*, " MASTER (slave_return) >> Lbmax =", MAXVAL(Lbtot)
83 | CALL master_integrate(Gtot, Lbtot, Htot, Q_rtot, Qw, PMM_HOST)
84 | ENDIF
85 |
86 | END SUBROUTINE slave_return
slave_return.F could be called by: