I want to write a parallel code that works on a 3D matrix where each processes has it's own sub matrix but for doing their jobs they need some information about their neighbouring processes' sub matrix (just boundary planes). I send these informations with point to point communication but I know that for large matrix it is not a good idea so I decide to use derived data type for communication. I have problem with mpi_type_vector
: for example I have a NX*NY*NZ
matrix and I want to send plane with constant NY
to another process I write these lines for doing this:
MPI_Datatype sub;
MPI_Type_vector(NX, NZ, NY*NZ, MPI_DOUBLE, &sub);
MPI_Type_commit(&sub);
but it doesn't work (can not send my desired plane). What is wrong? my test code is here:
#include <mpi.h>
#include <iostream>
using namespace std;
int main(int argc,char ** argv)
{
int const IE=100,JE=25,KE=100;
int size,rank;
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Datatype sub;
MPI_Type_vector(KE,IE,IE+(JE-1)*IE,MPI_DOUBLE,&sub);
MPI_Type_commit(&sub);
if (rank==0){
double*** a=new double**[IE];
for(int i=0;i<IE;i++){
a[i]=new double *[JE];
for(int j=0;j<JE;j++){
a[i][j]=new double [KE];
}
}
for(int i=0;i<IE;i++){
for(int j=0;j<JE;j++){
for(int k=0;k<KE;k++){
a[i][j][k]=2;
}}}
for(int i=0;i<IE;i++){
for(int j=0;j<JE;j++){
a[i][j][0]=2;
}}
MPI_Send(&a[0][0][0],1,sub,1,52,MPI_COMM_WORLD);
}
if (rank==1){
double*** b=new double**[IE];
for(int i=0;i<IE;i++){
b[i]=new double *[JE];
for(int j=0;j<JE;j++){
b[i][j]=new double [KE];
}
}
for(int i=0;i<IE;i++){
for(int j=0;j<JE;j++){
for(int k=0;k<KE;k++){
b[i][j][k]=0;
}}}
MPI_Recv(&b[0][0][0][0],1,sub,0,52,MPI_COMM_WORLD,&status);
for(int i=0;i<IE;i++){
for(int j=0;j<JE;j++){
for(int k=0;k<KE;k++){
if(b[i][j][k]>0){
cout<<"b["<<i<<"]["<<j<<"]["<<k<<"]="<<b[i][j][k]<<endl;
}}}}
}
MPI_Finalize();
}
With a 3d matrix, in general you'd have to use a vector of vectors (because there are two strides involved) - which is possible, but much simpler is to use MPI_Type_create_subarray() which just lets you carve out the slab of a multidimensional array you want.
Update: One problem in the above code is that the 3d array you allocate isn't contiguous; it's a collection of IE*JE allocated 1d arrays which may or may not be anywhere near each other. So there's no reliable way of extracting a plane of data out of it.
You need to do something like this:
double ***alloc3d(int l, int m, int n) {
double *data = new double [l*m*n];
double ***array = new double **[l];
for (int i=0; i<l; i++) {
array[i] = new double *[m];
for (int j=0; j<m; j++) {
array[i][j] = &(data[(i*m+j)*n]);
}
}
return array;
}
Then the data is in one big cube, like you'd expect, with an array of pointers pointing into it. This - the fact that C doesn't have real multidimensional arrays - comes up all the time with C + MPI.
Thanks to Jonathan Dursi. Here I want to publish complete code that creates a 3d matrix and uses derived datatypes for communication (only plane with constant y will be sent from one process to another).I used Jonathan Dursi's function posted above.
#include <mpi.h>
#include <iostream>
#include <math.h>
#include <fstream>
#include <vector>
using namespace std;
#define IE 100
#define JE 50
#define KE 100
#define JE_loc 52
double ***alloc3d(int l, int m, int n) {
double *data = new double [l*m*n];
double ***array = new double **[l];
for (int i=0; i<l; i++) {
array[i] = new double *[m];
for (int j=0; j<m; j++) {
array[i][j] = &(data[(i*m+j)*n]);
}
}
return array;
}
int main(int argc ,char ** argv)
{
//////////////////////declartion/////////////////////////////
int const NFREQS=100,ia=7,ja=7,ka=7;
double const pi=3.14159;
int i,j,size,rank,k;
//MPI_Status status[10];
MPI_Status status;
MPI_Request request[10];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Datatype sub;
MPI_Type_vector(KE,IE,IE+(JE-1)*IE,MPI_DOUBLE,&sub);
MPI_Type_commit(&sub);
double ***a=alloc3d(IE,JE,KE);
for (i=0; i<IE; i++) {
for (j=0; j<JE; j++) {
for (k=0; k<KE; k++) {
a[i][j][k]=0.0;
}
}
}
if (rank==0) {
for (i=0; i<IE; i++) {
for (j=0; j<JE; j++) {
for (k=0; k<KE; k++) {
a[i][j][k]=2;
}
}
}
MPI_Send(&a[0][0][0],1,sub,1,52,MPI_COMM_WORLD);
}
if (rank==1) {
MPI_Recv(&a[0][49][0],1,sub,0,52,MPI_COMM_WORLD,&status);
for (i=0; i<IE; i++) {
for (j=0; j<JE; j++) {
for (k=0; k<KE; k++) {
if (a[i][j][k]>0) {
cout<<"a["<<i<<"]["<<j<<"]["<<k<<"]="<<a[i][j][k]<<endl;
}
}
}
}
}
MPI_Finalize();
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With