/*============================================================================*
MPI INTERFACES
This module encapsulates functions of the Message Passing Interface (MPI) for
coordination among multiprocessors. All MPI calls should be collected here to
provide a degree of portability and documentation for parallel programs.
*/
#include
#include "mpi.h"
int cproc; //Current processor's number.
int nproc; //Total number of processors.
/*----------------------------------------------------------------------------*
1. OPEN MULTIPROCESSOR OPERATIONS
Each process must call this routine before beginning. It initializes
communications, determines the number of processors that are participating, and
assigns a number to the current processor.
ENTRY: No significant conditions.
EXIT: Multiprocessing operations have commenced.
'nproc' contains the total number of processors allocated.
'cproc' contains the number of the current processor, in the range 0 to
'nproc-1'.
*/
int mpOpen()
{ static int argc; static char **argv;
int n;
MPI_Init(&argc, &argv); //Initialize message processing.
MPI_Comm_rank(MPI_COMM_WORLD, &cproc); //Determine this processor's number.
MPI_Comm_size(MPI_COMM_WORLD, &nproc); //Determine the number of processors.
return 0; //Return this processor's number.
}
/*----------------------------------------------------------------------------*
2. CLOSE MULTIPROCESSING OPERATIONS
Each process should call this routine after all multiprocessing is complete,
just before ceasing operations.
ENTRY: Multiprocessing operations have closed.
EXIT: No significant conditions, other than that the program may exit.
*/
int mpClose()
{
MPI_Finalize();
return 0;
}
/*----------------------------------------------------------------------------*
3. SYNCHRONIZE DATA AMONG ALL PROCESSORS
This routine assembles an 'nproc' by 'w' matrix of data from all processors,
where 'nproc' is the number of processors and 'w' is the number of data elements
shared by each processor. It is typically called at the end of each processing
step to synchronize all processors and put them all a common data state.
Processing is delayed until all processors have called this routine. Therefore,
all processors must call at corresponding points in the cycle or operations will
deadlock.
ENTRY: 'local' is a vector of 'w' data elements (double precision floating
point) that are this processor's contribution to the global data set.
'global' is a 'nproc' by 'w' matrix to receive the values of 'local' from
all processors.
'w' contains the width of 'local' and 'global'.
EXIT: 'global[n]' contains a copy of the contents of 'local' from each
processor 'n', where 'n' ranges from 0 to 'nproc-1'. In particular,
this processsor's 'local', passed on entry, is in row 'global[cproc]'.
*/
int mpCommon(double *global, double *local, int w)
{
MPI_Allgather(local, w, MPI_DOUBLE,
global, w, MPI_DOUBLE,
MPI_COMM_WORLD);
return 0;
}
/*----------------------------------------------------------------------------*
4a. SEND DATA FROM MASTER PROCESSOR TO ALL PROCESSORS
This routine sends data from the master processor, numbered 0, to all
processors allocated, including itself. Data to be sent reside in an 'nproc' by
'w' matrix of data, where 'nproc' is the number of processors and 'w' is the
number of data elements shared with each processor. It is typically called at
the beginning of each processing step to synchronize all processors and give
each data to carry out the next step. The root process must call this routine
and all others must call the companion routine 'mpSubordinateReceive' at
corresponding points in the cycle or operations will deadlock.
ENTRY: 'cproc' is 0.
'global' is a 'nproc' by 'w' matrix containing values to be sent to each
processor 'n' in row 'global[n]'.
'w' contains the width of 'global[i]'.
EXIT: 'global[n]' has been sent to each processor 'n'.
*/
int mpMasterSend(double *global, int w)
{
double *temp = //Allocate a temporary area to receive
(double*) malloc(w*sizeof(double)); //the master's data back from itself.
MPI_Scatter(global, w, MPI_DOUBLE, //Send data from 'global[n]' to each
temp, w, MPI_DOUBLE, //processor 'n'.
0, MPI_COMM_WORLD);
free(temp); //Release the temporary area and
return 0; //return to caller.
}
/*
4b. RECEIVE DATA FROM MASTER PROCESSOR
This routine receives data from the master processor, numbered 0, from its
call to 'mpMasterSend'. All subordinate processors must call this routine at the
proper point in their cycle or operations will deadlock.
ENTRY: 'cproc' is not 0.
'local' is vector of 'w' elements to receive data from the master
processor.
EXIT: 'local' contains the data received.
*/
int mpSubordinateReceive(double *local, int w)
{
MPI_Scatter((double*)0, 0, MPI_DOUBLE, //Receive data from processor 0.
local, w, MPI_DOUBLE,
0, MPI_COMM_WORLD);
return 0; //Return to caller.
}
/*----------------------------------------------------------------------------*
5a. RECEIVE DATA FROM SUBORDINATE PROCESSORS
This routine receives data from subordinate processors, whose numbers are
greater than 0. Data are assembled in an 'nproc' by 'w' matrix of data, where
'nproc' is the number of processors and 'w' is the number of data elements
shared with each processor. It is typically called at the end of each processing
step to receive results back from all processors and compute the data for the
next step. The master process must call this routine and all others must call
the companion routine 'mpSubordinateSend' at corresponding points in the cycle
or operations will deadlock.
ENTRY: 'cproc' is 0.
'global[nproc][w]' is an area to receive values for each processor 'n' in
row 'global[n]'.
'global[0]' contains any results from the master processor, to be sent
back to itself.
'w' contains the width of 'global[i]'.
EXIT: 'global[n]' contains the results from each processor 'n'. 'global[0]' is
unchanged.
*/
int mpMasterReceive(double *global, int w)
{ int i;
double *temp = //Allocate a temporary area to receive
(double*) malloc(w*sizeof(double)); //the master's data back from itself.
for(i=0; i