MPI_WAITALL和非阻止接收无效

发布于 2025-02-06 14:21:01 字数 1502 浏览 2 评论 0原文

我获得了一个0xc0000005错误代码等待等级的所有接收!= 0进程。即时消息,请从!= 0进程发送一个封锁发送,并希望在for循环中为每个过程接收它们。稍后,我等待所有收到的服务员,但这是当我遇到错误时。

if (rank == 0)
{
    grids* sudokuList;
    
    sudokuList = initParallel(process_count);

    int* data;
    data = (int*)malloc((process_count - 1) * sizeof(int));
    MPI_Request* recvRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
    MPI_Request* sendRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
    grids* ptr;
    ptr = sudokuList;

    for (int i = 1; i < process_count; i++)
    {
        MPI_Isend(ptr->grid.sudoku, N * N, MPI_INT, i, 0, MPI_COMM_WORLD, &sendRequests[i - 1]);
        MPI_Irecv(data+i, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &recvRequests[i - 1]);
        ptr = ptr->next;
    }
    if (ptr != NULL)
    {
        lookForSolution(ptr->grid, rank);
        ptr = ptr->next;
    }
    MPI_Status* sendStatuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
    MPI_Waitall(process_count - 1, sendRequests, sendStatuses);

    // CRITICAL CODE
    // 

    MPI_Status* statuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
    MPI_Waitall(process_count - 1, recvRequests, statuses);

    free(recvRequests);
    free(sendStatuses);
    free(sendRequests);
    free(data);
    delete_grids(sudokuList);

}
else
{
    lookForSolution(recvGrid(), rank);

    // CRITICAL CODE
    MPI_Send(1, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
}

I get a 0xc0000005 error code waiting for all receives from rank != 0 processes. Im sending a blocking send from != 0 processes and want to receive them non blocking for each process in the for loop. I later wait for all receives with waitall but thats when I get an error.

if (rank == 0)
{
    grids* sudokuList;
    
    sudokuList = initParallel(process_count);

    int* data;
    data = (int*)malloc((process_count - 1) * sizeof(int));
    MPI_Request* recvRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
    MPI_Request* sendRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
    grids* ptr;
    ptr = sudokuList;

    for (int i = 1; i < process_count; i++)
    {
        MPI_Isend(ptr->grid.sudoku, N * N, MPI_INT, i, 0, MPI_COMM_WORLD, &sendRequests[i - 1]);
        MPI_Irecv(data+i, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &recvRequests[i - 1]);
        ptr = ptr->next;
    }
    if (ptr != NULL)
    {
        lookForSolution(ptr->grid, rank);
        ptr = ptr->next;
    }
    MPI_Status* sendStatuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
    MPI_Waitall(process_count - 1, sendRequests, sendStatuses);

    // CRITICAL CODE
    // 

    MPI_Status* statuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
    MPI_Waitall(process_count - 1, recvRequests, statuses);

    free(recvRequests);
    free(sendStatuses);
    free(sendRequests);
    free(data);
    delete_grids(sudokuList);

}
else
{
    lookForSolution(recvGrid(), rank);

    // CRITICAL CODE
    MPI_Send(1, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文