MPI_BARRIER 不起作用

发布于 2024-10-16 04:11:29 字数 4031 浏览 2 评论 0原文

为什么这里的屏障不起作用?如果我使用它,程序会被阻止,否则我会以奇怪的顺序得到输出:

Number of worker tasks = 4
   sending 1-th element q=0.011000 to task 1
   sending 2-th element q=0.012000 to task 2
   received 1-th element q=0.011000 in task 1
   processed 1-th element q=6.105000 in task 1
   sending 3-th element q=0.013000 to task 3
   received 2-th element q=0.012000 in task 2
   processed 2-th element q=13.320000 in task 2
   sending 4-th element q=0.014000 to task 4
   received 3-th element q=0.013000 in task 3
   processed 3-th element q=21.645000 in task 3
 starting to get data in MASTER
   MASTER received 0-th element q=6.105000 from task 1
   MASTER received 0-th element q=13.320000 from task 2
   received 4-th element q=0.014000 in task 4
   processed 4-th element q=31.080000 in task 4
   MASTER received 0-th element q=21.645000 from task 3
   MASTER received 0-th element q=31.080000 from task 4
 end

代码:

#include "mpi.h"               /* required MPI library */
#include <stdio.h>
#include <math.h>

#define NRRR 16               /* number of rows in matrix A */
#define NLLL 16                 /* number of columns in matrix A */
#define MASTER 0               /* taskid of first task */
#define FROM_MASTER 1          /* setting a message type */
#define FROM_WORKER 2          /* setting a message type */

int main(argc,argv)
int argc;
char *argv[];
{
    int numtasks,              /* number of tasks in partition */
    taskid,                /* a task identifier */
    numworkers,            /* number of worker tasks */
    source,                /* task id of message source */
    dest,                  /* task id of message destination */
    mtype,
    i,j,
    rc;                 /* message type */
    double  qr[NRRR],
    ql[NLLL],
    element_r[NRRR][3],
    element_l[NLLL][3];           


    MPI_Status status;  
    rc = MPI_Init(&argc,&argv);
    rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks);   
    rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid); 

    if (rc != 0)
        printf ("error initializing MPI and obtaining task ID info\n");

    numworkers = numtasks-1;

    // MASTER
    if (taskid == MASTER)
    {
        printf("\n\n\n\nNumber of worker tasks = %d\n",numworkers);

        // init element_r and element_l
        for(j=0;j<NRRR;j++){
            element_r[j][0]=j;
            element_r[j][1]=j+1;
            element_r[j][2]=j+2;
            qr[j] = j*1e-4+1e-3;
        }

        for(i=0;i<NLLL;i++){
            element_l[i][0]=12000+i;
            element_l[i][1]=12000+i+1;
            element_l[i][2]=12000+i+2;
            ql[i] = i*1e-3 +1e-2 ;
        }

        mtype = FROM_MASTER;
        for (dest=1; dest<=numworkers; dest++)
        {
            printf("   sending %d-th element q=%f to task %d\n",dest,ql[dest],dest);
            MPI_Send(&ql[dest], 1, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);                        
        }


        mtype = FROM_WORKER;
        printf(" starting to get data in MASTER\n");
        for (i=1; i<=numworkers; i++)
        {
            source = i;
            MPI_Recv(&ql[source], 1, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status);
        } 
        MPI_Barrier(MPI_COMM_WORLD); 
        for (i=1; i<=numworkers; i++)
        {
            source = i;
            printf("   MASTER received %d-th element q=%f from task %d\n",taskid,ql[source],source);            
        } 
        printf(" end\n");

    }

    // WORKER
    if (taskid > MASTER)
    {
        mtype = FROM_MASTER;        
        MPI_Recv(&ql, 1, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
        printf("   received %d-th element q=%f in task %d\n",taskid,ql[0],taskid);
        ql[0]=ql[0]*555*taskid;
        printf("   processed %d-th element q=%f in task %d\n",taskid,ql[0],taskid);

        mtype = FROM_WORKER;
        MPI_Send(&ql, 1, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
    }

    MPI_Finalize();

}

Why is here the barrier not working? If I use it, the program gets blocked, otherwise I get the output in a weird order:

Number of worker tasks = 4
   sending 1-th element q=0.011000 to task 1
   sending 2-th element q=0.012000 to task 2
   received 1-th element q=0.011000 in task 1
   processed 1-th element q=6.105000 in task 1
   sending 3-th element q=0.013000 to task 3
   received 2-th element q=0.012000 in task 2
   processed 2-th element q=13.320000 in task 2
   sending 4-th element q=0.014000 to task 4
   received 3-th element q=0.013000 in task 3
   processed 3-th element q=21.645000 in task 3
 starting to get data in MASTER
   MASTER received 0-th element q=6.105000 from task 1
   MASTER received 0-th element q=13.320000 from task 2
   received 4-th element q=0.014000 in task 4
   processed 4-th element q=31.080000 in task 4
   MASTER received 0-th element q=21.645000 from task 3
   MASTER received 0-th element q=31.080000 from task 4
 end

Code:

#include "mpi.h"               /* required MPI library */
#include <stdio.h>
#include <math.h>

#define NRRR 16               /* number of rows in matrix A */
#define NLLL 16                 /* number of columns in matrix A */
#define MASTER 0               /* taskid of first task */
#define FROM_MASTER 1          /* setting a message type */
#define FROM_WORKER 2          /* setting a message type */

int main(argc,argv)
int argc;
char *argv[];
{
    int numtasks,              /* number of tasks in partition */
    taskid,                /* a task identifier */
    numworkers,            /* number of worker tasks */
    source,                /* task id of message source */
    dest,                  /* task id of message destination */
    mtype,
    i,j,
    rc;                 /* message type */
    double  qr[NRRR],
    ql[NLLL],
    element_r[NRRR][3],
    element_l[NLLL][3];           


    MPI_Status status;  
    rc = MPI_Init(&argc,&argv);
    rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks);   
    rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid); 

    if (rc != 0)
        printf ("error initializing MPI and obtaining task ID info\n");

    numworkers = numtasks-1;

    // MASTER
    if (taskid == MASTER)
    {
        printf("\n\n\n\nNumber of worker tasks = %d\n",numworkers);

        // init element_r and element_l
        for(j=0;j<NRRR;j++){
            element_r[j][0]=j;
            element_r[j][1]=j+1;
            element_r[j][2]=j+2;
            qr[j] = j*1e-4+1e-3;
        }

        for(i=0;i<NLLL;i++){
            element_l[i][0]=12000+i;
            element_l[i][1]=12000+i+1;
            element_l[i][2]=12000+i+2;
            ql[i] = i*1e-3 +1e-2 ;
        }

        mtype = FROM_MASTER;
        for (dest=1; dest<=numworkers; dest++)
        {
            printf("   sending %d-th element q=%f to task %d\n",dest,ql[dest],dest);
            MPI_Send(&ql[dest], 1, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);                        
        }


        mtype = FROM_WORKER;
        printf(" starting to get data in MASTER\n");
        for (i=1; i<=numworkers; i++)
        {
            source = i;
            MPI_Recv(&ql[source], 1, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status);
        } 
        MPI_Barrier(MPI_COMM_WORLD); 
        for (i=1; i<=numworkers; i++)
        {
            source = i;
            printf("   MASTER received %d-th element q=%f from task %d\n",taskid,ql[source],source);            
        } 
        printf(" end\n");

    }

    // WORKER
    if (taskid > MASTER)
    {
        mtype = FROM_MASTER;        
        MPI_Recv(&ql, 1, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
        printf("   received %d-th element q=%f in task %d\n",taskid,ql[0],taskid);
        ql[0]=ql[0]*555*taskid;
        printf("   processed %d-th element q=%f in task %d\n",taskid,ql[0],taskid);

        mtype = FROM_WORKER;
        MPI_Send(&ql, 1, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
    }

    MPI_Finalize();

}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

会发光的星星闪亮亮i 2024-10-23 04:11:29

MPI_Barrier 是一个集体通信调用:它将阻塞,直到参数中提供的通信器中的所有进程都调用它。由于您的工作进程永远不会调用 MPI_Barrier,因此主进程会无限期地阻塞。

MPI_Barrier is a collective communication call: it will block until all processes in the communicator that was supplied in the parameter called it. Since your worker processes never call MPI_Barrier, the master blocks indefinitely.

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文