You are on page 1of 13

..

: .., ..

/ . .. - :
-
, 2009. 22 .

.

, ,

.

13


.
-

2009

: ..


1. ......................................................4
2. .....................................................4
3. , ..................4
3.1. . ....................................................... 4
3.2 . ................................... 7
3.3 MPI
, ...... 10
4. .................................................19
5. .............................................22
5. ............................................................22
6. .................................................................22

1.

;

MPI-;


;

MPI.

2.
UNIX , MPI,

.
3. ,
3.1. .
MPI ,

,
,
,
Intel MPI,

Intel.
Open
source

MPI

mpich. mpich
2.
, mpich 2
, , , windows,
Linux.

rpm-based
CentOS.

SSH. SSH

windows

putty
(http://www.chiark.greenend.org.uk/~sgtatham/putty/download.ht
ml ). Host Name
( IP ) .
cluster410.ssau.ru, (. 1).
unicode SSH
putty Window->Translation
UTF-8, -
KOI8-R.

Linux
. WinSCP
http://winscp.net/eng/docs/lang:ru.
, mpi

,
SSH.
. Linux
,
, , nano vim.
, mcedit, ,
Midnight Commander
mc ( ). mc Far Manager
Windows.
Norton Commander for DOS. NC
mc .

1. ssh .
SSH
Linux ,
shell-,
.
ftp sftp,
WinSCP, ,
5

2. Midnight Commander.
,
. mc

Ctrl+O.

Shift+F4.
,
.

MS Windows.
mc
,
.
.
3.2 .
0. , ,
Hello_mpi.c
1. :
mpicc -o Hello_mpi Hello_mpi.c
mpicc ,
gcc
mpi.
2. :
qsub Hello_mpi.pbs,
, ,
.
MPIPBS, Hello_mpi.pbs.
.
3.
qstat.
[fursov@main mpi]$ qstat
Job id
Name
User
Time Use S Queue
------------------------- ---------------- --------------- -------- - ----236.main
Hello_mpi fursov
0
R dque
7


. Job id , Name
, Hello_mpi.pbs, Time
, S .
S : R , , E
, C
.
4.

_.o_

_.e_,
(STDOUT)
(STDERR).
.pbs
.pbs .
#PBS -N Hello_mpi
#PBS -l nodes=2:ppn=4
#PBS -l walltime=00:01:00
cd $PBS_O_WORKDIR
mpiexec-pbs -np 8 /home/fursov/mpi/Hello_mpi
:
#PBS -N Hello_mpi

, , 2 (nodes) 4
(ppn). ,
8 .
#PBS -l nodes=2:ppn=4
8


(walltime) ::,

.
#PBS -l walltime=00:01:00

, ,
. ,

walltime
. walltime
-,
60 .
. ,

, ,
.
cd $PBS_O_WORKDIR

3.3 MPI
,
.

()
.
,

, . .3 (

);


.

( MPI_Send
MPI_Recv).

, .
mpiexec-pbs -np 8 /home/fursov/mpi/Hello_mpi
,
.
#PBS -j oe
, pbs
qsub,
:
qdel _.

3. MPI .
10

MPI C
(int, char .),
MPI_INT, MPI_CHAR
.. (. .1).
1. MPI
.
MPI

C
MPI_INT
signed int
MPI_UNSIGNED
Unsigned int
MPI_SHORT
signed int
MPI_LONG
signed long int
MPI_UNSIGNED_SHORT
unsigned int
MPI_UNSIGNED_LONG
unsigned long int
MPI_FLOAT
Float
MPI_DOUBLE
Double
MPI_LONG_DOUBLE
long double
MPI_UNSIGNED_CHAR
unsigned char
MPI_CHAR
signed char
MPI
(, ),
MPI .
MPI-
(
); MPI
MPI_COMM_WORLD
0 size.
MPI
,
, ..
:
Hello world from process i of n

11

i , n .
,
helloworld.c:
:#include
#include "mpi.h"
int main( argc, argv )
int argc;
char **argv;
{
int rank, size;
MPI_Init( &argc, &argv );
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
printf( "Hello world from process %d of %d\n",
rank, size );
MPI_Finalize();
return 0;
}

4 MPI.
3 .
, ,
6 MPI,

. MPI
[5].
MPI- ()
MPI (
MPI_Init).
,
, ,
MPI_COMM_WORLD.
.
0
groupsize-1, groupsize .
groupsize ,
.
12

MPI_Init
:
C:
int MPI_Init(int *argc, char ***argv)

FORTRAN:
MPI_INIT(IERROR)
INTEGER IERROR


main,
.
IERROR .
MPI MPI_Finalize.
C:
int MPI_Finalize(void)

MPI-
.

MPI_Comm_size.
C:
int MPI_Comm_size(MPI_Comm comm, int *size)
IN
OUT

comm

- ;

size

-
comm.

(
IN ,
OUT , INOUT - ,
).

comm.


COMM MPI_COMM_WORLD
MPI_COMM_SELF,
MPI.
MPI_Comm_rank.
int MPI_Comm_rank(MPI_Comm comm, int *rank)

comm - ;
rank
- , .
,
. 0..size-1
( size
).
,
helloworld, , 4 ,
:
IN
OUT

Hello
Hello
Hello
Hello

world
world
world
world

from
from
from
from

process
process
process
process

0
3
1
2

of
of
of
of

4
4
4
4

,
MPI,
, ,
()
.
,
0 .
.

13

ring.c:
#include
#include "mpi.h"
int main( argc, argv )
int argc;
char **argv;
{
int
rank,
value,
size;
MPI_Status
status;
MPI_Init(
&argv );

&argc,

MPI_Comm_rank(
&rank
MPI_Comm_size(
&size ); do {

MPI_COMM_WORLD,
);
MPI_COMM_WORLD,

14

if (rank == 0) {
scanf( "%d", &value );
MPI_Send( &value, 1, MPI_INT, rank + 1,
0, MPI_COMM_WORLD );
}
else {
MPI_Recv( &value, 1, MPI_INT, rank - 1,
0, MPI_COMM_WORLD,
&status );
if (rank < size - 1)
MPI_Send( &value, 1, MPI_INT, rank +
1, 0, MPI_COMM_WORLD );
}
printf( "Process %d got %d\n", rank, value );
} while (value >= 0);

MPI_Recv.

MPI_Finalize( );
return 0;

6
MPI, .
9.
(MPI_Send, MPI_Recv),
.
MPI_Send.

IN

count datatype
tag dest
comm. buf - ,
, .
count = 1.
MPI_Send,
.
,
MPI_Bcast,
.
, MPI_Send,
, .

int MPI_Send(void* buf, int count,


MPI_Datatype datatype, int
dest, int tag, MPI_Comm
comm)

buf

-
;
IN count
- ;
IN datatype - ;
IN dest
- - ,
comm;
IN tag
- (

);
IN comm
- .
15

OUT

int MPI_Recv(void* buf, int count,


MPI_Datatype datatype, int
source, int tag, MPI_Comm comm,
MPI_Status *status)

buf

-
;
IN count
-
;
IN datatype
- ;
IN source
- -;
IN tag
- ;
IN comm
- ;
OUT status
- .
count datatype
tag source
comm.
MPI_Recv

MPI_ANY_SOURCE (" "),



MPI_ANY_TAG (" ").
-, MPI
,
16


0 32767.
,
MPI_Recv ,
-.

,

(MPI_Probe
MPI_Iprobe),
.
, " ",
MPI_Recv ,



.
ring.c .
Process
-1
Process
Process
Process
Process
Process
Process
Process

0 got 10
0
3
3
2
2
1
1

got
got
got
got
got
got
got

-1
10
-1
10
-1
10
-1


. ,
,
,

,
.
-
:
. MPI ,
.
,
,
17

, . C
MPI_Send/MPI_Recv

.
,
MPI,


MPI.
, ,
[4].
,
,
.

..
( deadlock, );

.
deadlock-
.
1. 1 :
Recv ( 2 )
Send ( 2 )
2 :
Recv ( 1 )
Send ( 1 )
1 deadlock
, ..
, ; -

, ...
.. .
2. 1 :
18

Send ( 2 )
Recv ( 2 )
2 :
Send ( 1 )
Recv ( 1 )

1.
MPI- ring.c,
.

, (
,
) deadlock
. MPI
: MPI_Send,
( MPI_Recv),
,

. MPI_Recv

(..
MPI

). .. (.
)
(
)
.

2.

(MPI_Send MPI_Recv)
MPI_Bcast,

.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
//
//
void DataInitialization(double *x, int N){
int i;
for(i = 0; i++; i < N){
x[i] = rand();
}
}

4.

MPI-, ,

.

int main(int argc, char* argv[]){


int i;
double x[100], TotalSum = 0.0, ProcSum = 0.0;
int ProcRank, ProcNum, N=100, k, i1, i2;
MPI_Status Status;
// MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&ProcNum);
MPI_Comm_rank(MPI_COMM_WORLD,&ProcRank);
20

19

//
if ( ProcRank == 0 ) DataInitialization(x,N);

//
MPI_Bcast(x, N, MPI_DOUBLE, 0,
MPI_COMM_WORLD);

5.
1.
1. ring.c.

//
// x
// i1 i2

2.
1. .
2.

.
3.

k = N / ProcNum;
i1 = k * ProcRank;
i2 = k * ( ProcRank + 1 );
if ( ProcRank == ProcNum-1 ) i2 = N;
for ( i = i1; i < i2; i++ )
ProcSum = ProcSum + x[i];
// 0
if ( ProcRank == 0 ) {
TotalSum = ProcSum;
for ( i = 1; i < ProcNum; i++ ) {
MPI_Recv(&ProcSum,1,MPI_DOUBLE,MPI_ANY_SOU
RCE,0, MPI_COMM_WORLD, &Status);
TotalSum = TotalSum + ProcSum;
}
}
else //
MPI_Send(&ProcSum, 1, MPI_DOUBLE, 0, 0,
MPI_COMM_WORLD);
//
if ( ProcRank == 0 )
printf("\nTotal Sum = %10.2f",TotalSum);
MPI_Finalize();
21

return 0;

5.
:
1.
?
2.
?
3. ?
4. MPI
?
5. MPI
?
6. MPI ?
6.
1. .., ..
. .: -, 2002 .
2. ..
: / .. .- .:
22

- ;
. , 2007. 423 .
3. .. .
I. . . , . 2002, 92 .
4. .. .
: . / .: .., .., ..,
.., ..; . , .
. . -. . 2000. 87 .

23