MPI_WAITALL和非阻止接收无效
我获得了一个0xc0000005错误代码等待等级的所有接收!= 0进程。即时消息,请从!= 0进程发送一个封锁发送,并希望在for循环中为每个过程接收它们。稍后,我等待所有收到的服务员,但这是当我遇到错误时。
if (rank == 0)
{
grids* sudokuList;
sudokuList = initParallel(process_count);
int* data;
data = (int*)malloc((process_count - 1) * sizeof(int));
MPI_Request* recvRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
MPI_Request* sendRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
grids* ptr;
ptr = sudokuList;
for (int i = 1; i < process_count; i++)
{
MPI_Isend(ptr->grid.sudoku, N * N, MPI_INT, i, 0, MPI_COMM_WORLD, &sendRequests[i - 1]);
MPI_Irecv(data+i, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &recvRequests[i - 1]);
ptr = ptr->next;
}
if (ptr != NULL)
{
lookForSolution(ptr->grid, rank);
ptr = ptr->next;
}
MPI_Status* sendStatuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
MPI_Waitall(process_count - 1, sendRequests, sendStatuses);
// CRITICAL CODE
//
MPI_Status* statuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
MPI_Waitall(process_count - 1, recvRequests, statuses);
free(recvRequests);
free(sendStatuses);
free(sendRequests);
free(data);
delete_grids(sudokuList);
}
else
{
lookForSolution(recvGrid(), rank);
// CRITICAL CODE
MPI_Send(1, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
}
I get a 0xc0000005 error code waiting for all receives from rank != 0 processes. Im sending a blocking send from != 0 processes and want to receive them non blocking for each process in the for loop. I later wait for all receives with waitall but thats when I get an error.
if (rank == 0)
{
grids* sudokuList;
sudokuList = initParallel(process_count);
int* data;
data = (int*)malloc((process_count - 1) * sizeof(int));
MPI_Request* recvRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
MPI_Request* sendRequests = (MPI_Request*)malloc((process_count - 1) * sizeof(MPI_Request));
grids* ptr;
ptr = sudokuList;
for (int i = 1; i < process_count; i++)
{
MPI_Isend(ptr->grid.sudoku, N * N, MPI_INT, i, 0, MPI_COMM_WORLD, &sendRequests[i - 1]);
MPI_Irecv(data+i, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &recvRequests[i - 1]);
ptr = ptr->next;
}
if (ptr != NULL)
{
lookForSolution(ptr->grid, rank);
ptr = ptr->next;
}
MPI_Status* sendStatuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
MPI_Waitall(process_count - 1, sendRequests, sendStatuses);
// CRITICAL CODE
//
MPI_Status* statuses = (MPI_Status*)malloc((process_count - 1) * sizeof(MPI_Status));
MPI_Waitall(process_count - 1, recvRequests, statuses);
free(recvRequests);
free(sendStatuses);
free(sendRequests);
free(data);
delete_grids(sudokuList);
}
else
{
lookForSolution(recvGrid(), rank);
// CRITICAL CODE
MPI_Send(1, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论