00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
#include "seq-mpi.h"
00060
#include <stdlib.h>
00061
00062
#include "simri3d.h"
00063
#include "experience.h"
00064
00065
#ifdef HAVE_MPI
00066
#include <mpi.h>
00067
00068
#ifdef MPI_DEBUG
00069
00076
static void
00077 mpi_context_print_stats (
EXPERIENCE3D * expr,
int x)
00078 {
00079
float min1, max1, avg1=0;
00080
long nb1=0;
00081
int ix, iy, iz, i;
00082
float e;
00083
int slice_width;
00084
00085 slice_width=mpi_get_slice_width(expr, x);
00086
00087 printf (
"x=%d, ",x);
00088
00089 min1 = max1 = expr->
magstate[0][
x][0][0];
00090
for (i=0;i<3;i++)
00091
for (ix=
x; ix<
x+slice_width;ix++)
00092
for (iy=0;iy<expr->
nfovy;iy++)
00093
for (iz=0;iz<expr->
nfovz;iz++) {
00094 e = expr->
magstate[i][ix][iy][iz];
00095 nb1++;
00096 avg1 += e;
00097
if (e > max1) max1 = e;
00098
if (e < min1) min1 = e;
00099 }
00100 avg1 = avg1 / nb1;
00101 printf (
"etat->[%.f,%.f,%.f], ",min1,avg1,max1);
00102 }
00103
#endif
00104
00111
static void
00112 mpi_recv_context(
EXPERIENCE3D * expr,
int x,
int src, MPI_Status status)
00113 {
00114
int ix, iy;
00115
static int run_nb=0;
00116
static int buffer_size=0;
00117
static double *buffer;
00118
double *index;
00119
int slice_width;
00120
#ifdef MPI_DEBUG
00121
int myrank;
00122
#endif
00123
00124
#ifdef MPI_DEBUG
00125
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00126
#endif
00127
00128
00129
00130
00131
00132
00133
if (run_nb==0) {
00134 buffer_size=expr->
nfovz*3*expr->
nfovy*expr->mpi_slice_width;
00135 buffer = (
double *)malloc(buffer_size*
sizeof(
double));
00136 }
00137
00138
00139
00140
00141 slice_width=mpi_get_slice_width(expr, x);
00142 buffer_size=expr->
nfovz*3*expr->
nfovy*slice_width;
00143
00144 MPI_Recv(buffer, buffer_size, MPI_DOUBLE,
00145 src, WORKTAG, MPI_COMM_WORLD, &status);
00146
00147
#ifdef MPI_DEBUG
00148
printf(
"[node %d]: mpi_recv_context from %d : before recv() ",
00149 myrank, status.MPI_SOURCE);
00150 mpi_context_print_stats (expr,x);
00151
#endif
00152
00153 index=buffer;
00154
for (ix=
x; ix<
x+slice_width; ix++)
00155
for (iy=0; iy<expr->
nfovy; iy++) {
00156 memcpy(expr->
magstate[0][ix][iy],index,expr->
nfovz*
sizeof(
double));
00157 index += expr->
nfovz;
00158 memcpy(expr->
magstate[1][ix][iy],index,expr->
nfovz*
sizeof(
double));
00159 index += expr->
nfovz;
00160 memcpy(expr->
magstate[2][ix][iy],index,expr->
nfovz*
sizeof(
double));
00161 index += expr->
nfovz;
00162 }
00163
00164
#ifdef MPI_DEBUG
00165
printf(
"[node %d]: mpi_recv_context from %d : after recv() ",
00166 myrank, status.MPI_SOURCE);
00167 mpi_context_print_stats (expr,x);
00168
#endif
00169
00170 run_nb++;
00171 }
00172
00179
static void
00180 mpi_recv_and_update_signal(
EXPERIENCE3D * expr,
int src,
00181 MPI_Status status)
00182 {
00183
int ix,iy,iz;
00184
static int run_nb=0;
00185
static int buffer_size=0;
00186
static float *buffer;
00187
float *index;
00188
#ifdef MPI_DEBUG
00189
int myrank;
00190
#endif
00191
00192
#ifdef MPI_DEBUG
00193
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00194
#endif
00195
00196
00197
00198
00199
00200
00201
if (run_nb==0) {
00202 buffer_size=3*expr->
ntx*expr->
nty*expr->
ntz;
00203 buffer = (
float *)malloc(buffer_size*
sizeof(
float));
00204 }
00205
00206
#ifdef MPI_DEBUG
00207
printf(
"[node %d]: mpi_recv_signal from %d : before recv() ",
00208 myrank, status.MPI_SOURCE);
00209
#endif
00210
MPI_Recv(buffer, buffer_size, MPI_FLOAT,
00211 src, WORKTAG, MPI_COMM_WORLD, &status);
00212
00213 index=buffer;
00214
for (ix=0; ix<expr->
ntx; ix++)
00215
for (iy=0; iy<expr->
nty; iy++) {
00216
for (iz=0; iz<expr->
ntz; iz++)
00217 expr->
sgn[0][ix][iy][iz] += *(index++);
00218
for (iz=0; iz<expr->
ntz; iz++)
00219 expr->
sgn[1][ix][iy][iz] += *(index++);
00220
for (iz=0; iz<expr->
ntz; iz++)
00221 expr->
sgn[2][ix][iy][iz] += *(index++);
00222 }
00223
#ifdef MPI_DEBUG
00224
printf(
"[node %d]: mpi_recv_signal from %d : after recv() ",
00225 myrank, status.MPI_SOURCE);
00226
#endif
00227
00228 run_nb++;
00229 }
00230
00237
static void
00238 mpi_send_context(
EXPERIENCE3D * expr,
int x,
int dst)
00239 {
00240
int ix, iy;
00241
static int run_nb=0;
00242
static int buffer_size=0;
00243
static double *buffer;
00244
double *index;
00245
int slice_width;
00246
#ifdef MPI_DEBUG
00247
int myrank;
00248
#endif
00249
00250
#ifdef MPI_DEBUG
00251
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00252
#endif
00253
00254
if (run_nb==0) {
00255 buffer_size=expr->
nfovz*3*expr->
nfovy*expr->mpi_slice_width;
00256 buffer = (
double *)malloc(buffer_size*
sizeof(
double));
00257 }
00258
00259 slice_width = mpi_get_slice_width (expr, x);
00260 buffer_size=expr->
nfovz*3*expr->
nfovy*slice_width;
00261
00262 index=buffer;
00263
for (ix=
x; ix<
x+slice_width; ix++)
00264
for (iy=0; iy<expr->
nfovy; iy++) {
00265 memcpy(index,expr->
magstate[0][ix][iy],expr->
nfovz*
sizeof(
double));
00266 index += expr->
nfovz;
00267 memcpy(index,expr->
magstate[1][ix][iy],expr->
nfovz*
sizeof(
double));
00268 index += expr->
nfovz;
00269 memcpy(index,expr->
magstate[2][ix][iy],expr->
nfovz*
sizeof(
double));
00270 index += expr->
nfovz;
00271 }
00272
00273
#ifdef MPI_DEBUG
00274
printf(
"[node %d]: mpi_send_context to node %d : ", myrank, dst);
00275 mpi_context_print_stats (expr,x);
00276
#endif
00277
00278 MPI_Send(buffer, buffer_size, MPI_DOUBLE, dst, WORKTAG, MPI_COMM_WORLD);
00279 run_nb++;
00280 }
00281
00287
static void
00288 mpi_send_signal(
EXPERIENCE3D * expr,
int dst)
00289 {
00290
int ix, iy;
00291
static int run_nb=0;
00292
static int buffer_size=0;
00293
static float *buffer;
00294
float *index;
00295
#ifdef MPI_DEBUG
00296
int myrank;
00297
#endif
00298
00299
#ifdef MPI_DEBUG
00300
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00301
#endif
00302
00303
if (run_nb==0) {
00304 buffer_size=3*expr->
ntx*expr->
nty*expr->
ntz;
00305 buffer = (
float *)malloc(buffer_size*
sizeof(
float));
00306 }
00307
00308 index=buffer;
00309
for (ix=0; ix<expr->
ntx; ix++)
00310
for (iy=0; iy<expr->
nty; iy++) {
00311 memcpy(index,expr->
sgn[0][ix][iy],expr->
ntz*
sizeof(
float));
00312 index += expr->
ntz;
00313 memcpy(index,expr->
sgn[1][ix][iy],expr->
ntz*
sizeof(
float));
00314 index += expr->
ntz;
00315 memcpy(index,expr->
sgn[2][ix][iy],expr->
ntz*
sizeof(
float));
00316 index += expr->
ntz;
00317 }
00318
#ifdef MPI_DEBUG
00319
printf(
"[node %d]: mpi_send_signal to node %d : ", myrank, dst);
00320
#endif
00321
MPI_Send(buffer, buffer_size, MPI_FLOAT, dst, WORKTAG, MPI_COMM_WORLD);
00322 run_nb++;
00323 }
00324
00337
void mpi_job_master (SEQPARAM *seqparam,
EXPERIENCE3D * expr)
00338 {
00339 MPI_Status status;
00340
int assigned_node_nb;
00341
int completed_node_nb;
00342
int node_nb;
00343
int max_node_nb;
00344
int assigned_task_nb;
00345
int completed_task_nb;
00346
int died_task_nb=0;
00347
int dummy;
00348
static short *task_table = NULL;
00349
int i;
00350
int value;
00351
#ifdef MPI_DEBUG
00352
int myrank;
00353
#endif
00354
00355
#ifdef MPI_DEBUG
00356
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00357
#endif
00358
00359 MPI_Comm_size(MPI_COMM_WORLD, &node_nb);
00360
00361
if (task_table == NULL)
00362 task_table = (
short *)calloc(expr->
nfovx,
sizeof(
short));
00363
00364 max_node_nb = node_nb-1;
00365
if (max_node_nb * expr->mpi_slice_width > expr->
nfovx)
00366 max_node_nb = ceil((
double)expr->
nfovx / expr->mpi_slice_width);
00367
#ifdef MPI_DEBUG
00368
printf(
"[node %d]: max_node_nb=%d\n",myrank, max_node_nb);
00369
#endif
00370
00371
00372
00373
00374
for (assigned_task_nb = 0, assigned_node_nb = 1;
00375 assigned_node_nb <= max_node_nb;
00376 assigned_task_nb += expr->mpi_slice_width, assigned_node_nb++) {
00377
#ifdef MPI_DEBUG
00378
printf(
"[node %d]: assign task %d to node %d\n", myrank,
00379 assigned_task_nb, assigned_node_nb);
00380
#endif
00381
MPI_Send(&assigned_task_nb, 1, MPI_INT,
00382 assigned_node_nb, WORKTAG, MPI_COMM_WORLD);
00383
00384
00385
00386 mpi_send_context (expr, assigned_task_nb,
00387 assigned_node_nb);
00388
#ifdef MPI_DEBUG
00389
printf (
"[node %d]: sent current etat[] "
00390
"for task %d to node %d\n",
00391 myrank, assigned_task_nb, assigned_node_nb);
00392
#endif
00393
task_table[assigned_task_nb]=assigned_node_nb;
00394 }
00395
00396
for (;;) {
00397 MPI_Recv(&value, 1, MPI_INT,
00398 MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
00399
00400
00401
00402
if (status.MPI_TAG == WORKTAG) {
00403
00404
00405
00406 mpi_recv_context(expr, value, status.MPI_SOURCE, status);
00407
#ifdef MPI_DEBUG
00408
printf (
"[node %d]: received current etat[]"
00409
"from node %d\n", myrank, status.MPI_SOURCE);
00410
#endif
00411
00412
00413
00414
00415
00416 mpi_recv_and_update_signal (expr, status.MPI_SOURCE,
00417 status);
00418 completed_task_nb = value;
00419 completed_node_nb = status.MPI_SOURCE;
00420
#ifdef MPI_DEBUG
00421
printf(
"[node %d]: node %d completed task %d\n",
00422 myrank, completed_node_nb, completed_task_nb);
00423
#endif
00424
00425
00426
00427
if (assigned_task_nb < expr->
nfovx) {
00428
#ifdef MPI_DEBUG
00429
printf(
"[node %d]: assign task %d to node %d\n",
00430 myrank, assigned_task_nb, completed_node_nb);
00431
#endif
00432
MPI_Send(&assigned_task_nb, 1, MPI_INT, completed_node_nb,
00433 WORKTAG, MPI_COMM_WORLD);
00434
00435
00436
00437 mpi_send_context (expr, assigned_task_nb,
00438 completed_node_nb);
00439
#ifdef MPI_DEBUG
00440
printf (
"[node %d]: sent current etat[] "
00441
"for task %d to node %d\n",
00442 myrank, assigned_task_nb, completed_node_nb);
00443
#endif
00444
task_table[assigned_task_nb]=completed_node_nb;
00445 assigned_task_nb += expr->mpi_slice_width;
00446 }
else {
00447
00448
00449
00450
#ifdef MPI_DEBUG
00451
printf(
"[node %d]: asking node %d to give up\n",
00452 myrank, completed_node_nb);
00453
#endif
00454
MPI_Send(&dummy, 1, MPI_INT, completed_node_nb,
00455 DIETAG, MPI_COMM_WORLD);
00456 died_task_nb++;
00457
if (died_task_nb == max_node_nb) {
00458
if (died_task_nb < node_nb-1) {
00459
00460
00461
00462
00463
for (i=died_task_nb+1; i<node_nb; i++) {
00464 MPI_Send(&dummy, 1, MPI_INT, i, DIETAG, MPI_COMM_WORLD);
00465
#ifdef MPI_DEBUG
00466
printf(
"[node %d]: asking node %d to give up\n",
00467 myrank, i);
00468
#endif
00469
}
00470
00471 }
00472
#ifdef MPI_DEBUG
00473
printf(
"[node %d]: all slave nodes finished,quitting...\n",
00474 myrank);
00475
#endif
00476
return;
00477 }
00478 }
00479 }
00480 }
00481 }
00482
00494
void mpi_job_slave (SEQPARAM *seqparam,
EXPERIENCE3D *expr,
00495
task_cb task)
00496 {
00497 MPI_Status status;
00498
int x0;
00499
int k, ix, iy, iz;
00500
#ifdef MPI_DEBUG
00501
int myrank;
00502
#endif
00503
00504
#ifdef MPI_DEBUG
00505
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00506
#endif
00507
00508
while (1)
00509 {
00510 MPI_Recv(&x0, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
00511
00512
00513
00514
if (status.MPI_TAG == DIETAG) {
00515
#ifdef MPI_DEBUG
00516
printf(
"[node %d]: quitting\n",myrank);
00517
#endif
00518
break;
00519 }
00520
#ifdef MPI_DEBUG
00521
printf(
"[node %d]: received jobs x=%d\n",myrank, x0);
00522
#endif
00523
00524
00525
00526
if (status.MPI_TAG == WORKTAG) {
00527
00528
00529
00530 mpi_recv_context (expr, x0, 0, status);
00531
00532
00533
00534
for (k=0; k<3; k++)
00535
for (ix=0; ix<expr->
ntx; ix++)
00536
for (iy=0; iy<expr->
nty; iy++)
00537
for (iz=0; iz<expr->
ntz; iz++)
00538 expr->
sgn[k][ix][iy][iz] = 0.0;
00539
#ifdef MPI_DEBUG
00540
printf (
"[node %d]: received current etat[]"
00541
"from node %d\n", myrank, status.MPI_SOURCE);
00542
#endif
00543
}
00544
00545 expr->mpi_slice_x0 = x0;
00546
00547
00548
00549 task(seqparam, expr);
00550
00551
00552
00553 MPI_Send(&x0, 1, MPI_INT, 0, WORKTAG, MPI_COMM_WORLD);
00554
00555
00556
00557 mpi_send_context (expr, x0, 0);
00558
#ifdef MPI_DEBUG
00559
printf (
"[node %d]: sent current etat[] to node %d.\n",
00560 myrank, 0);
00561
#endif
00562
00563
00564
00565 mpi_send_signal (expr, 0);
00566 }
00567 }
00568
00580
void mpi_job (SEQPARAM *seqparam,
EXPERIENCE3D * expr,
00581 task_cb task)
00582 {
00583
int myrank;
00584
00585 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
00586
00587 expr->mpi_slice_width = SLICE_WIDTH;
00588
00589
if (myrank == 0)
00590 mpi_job_master(seqparam, expr);
00591
else
00592 mpi_job_slave(seqparam, expr, task);
00593 }
00594
00603
int mpi_get_slice_width (
EXPERIENCE3D *expr,
int x)
00604 {
00605
00606
00607
00608
if (
x + expr->mpi_slice_width > expr->
nfovx)
00609
return expr->
nfovx -
x;
00610
else
00611
return expr->mpi_slice_width;
00612 }
00613
00614
#else
00615 void mpi_job (SEQPARAM *seqparam,
EXPERIENCE3D * expr,task_cb task)
00616 {
00617 task(seqparam, expr);
00618 }
00619
#endif