I need to pass a matrix or complex matrix type defined by Armadillo C++ Matrix Library over MPI. What is a good way to go about this? I thought of trying to:
Write the matrix to some sort of array and then sending
rows/columns of that, with methods to de/re-construct the arrays either side of a MPI_send
/recv
Using something like MPI_BYTE
type?
Thanks
So I was trying to implement the other scheme, by sending and receiving, for a simple example, on one node.
translate.cpp
#include <mpi.h>
#include <armadillo>
#include <vector>
#include <cstdlib>
using namespace std;
using namespace arma;
using std::vector;
class ArmadilloMPI
{
public:
ArmadilloMPI(int nRows, int nCols)
{
this->nRows = nRows;
this->nCols = nCols;
realArray = (double **)malloc(nCols * nRows * sizeof(double*));
imArray = (double **)malloc(nCols * nRows * sizeof(double*));
}
~ArmadilloMPI()
{
free(realArray[0]);
free(realArray);
free(imArray[0]);
free(imArray);
}
double **realArray;
double **imArray;
int nCols;
int nRows;
cx_mat matConstructRecv(int src, int tag)
{
cx_mat A(nRows, nCols);
MPI_Recv(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);
MPI_Recv(&(realArray[0][0]),nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);
for(int i = 0; i < nRows; ++i )
{
for(int j = 0; i < nCols; ++j)
{
real(A(i,j)) = *realArray[i * nRows + j];
imag(A(i,j)) = *imArray[i * nRows + j];
}
}
return A;
}
void matDestroySend(cx_mat &A, int dest, int tag)
{
for(int i = 0; i < nRows; ++i )
{
for(int j = 0; i < nCols; ++j)
{
realArray[i * nRows + j] = &real(A(i,j));
imArray[i * nRows + j] = &imag(A(i,j));
}
}
MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
}
};
int main(int argc, char** argv)
{
MPI::Init(argc, argv);
int size = MPI::COMM_WORLD.Get_size();
int rank = MPI::COMM_WORLD.Get_rank();
cout << "test"<<endl;
vector<cx_mat> world;
for(int i = 0; i < size; ++i )
{
world.push_back(randu<cx_mat>(4,4));
}
cx_mat A;
A = randu<cx_mat>(4,4);
ArmadilloMPI* armaMPI = new ArmadilloMPI(4,4);
if(rank==0)
{
for(int i = 1; i < size; i++)
{
cout << "A is now " << A << endl;
A += armaMPI->matConstructRecv(i, 0);
}
}
else
{
armaMPI->matDestroySend(world[rank], 1, 0);
}
cout << A << endl;
delete armaMPI;
MPI::Finalize();
}
But we have a seg fault.
*** Process received signal ***
Signal: Segmentation fault: 11 (11)
Signal code: (0)
Failing at address: 0x0 translate(1032,0x7fff747ad310) malloc: *** error for object 0x41434d5f49504d4f: pointer being freed was not allocated
Thoughts?
There are a couple of issues :
In c and c++, array and vector start at 0, not 1. So the following code will fail :
vector<cx_mat> world;
world.resize(1);
world[1] = randu<cx_mat>(4,4); //problem to come !
You may change for :
vector<cx_mat> world;
world.push_back(randu<cx_mat>(4,4));
Dynamic allocation of 2D array with contiguous memory. You need one new
for an array of double, and another new
for array of pointers to double. Then set each pointer to point to the first item of the row.
double *data=new double[nCols * nRows ];
realArray = new double*[( nRows )];
for(int i=0;i<nRows;i++){
realArray[i]=&data[i*nCols];
}
You could guess this one...Why don't compilers warn about this kind of stuff ? Because it could make sense, but not here.
for(int j = 0; i < nCols; ++j)
You may add a different tag to each message to avoid switching the real part and the imaginary part
MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag+1, MPI_COMM_WORLD);
The code becomes :
#include <mpi.h>
#include <armadillo>
#include <vector>
#include <iostream>
#include <cstdlib>
using namespace std;
using namespace arma;
using std::vector;
class ArmadilloMPI
{
public:
ArmadilloMPI(int nRows, int nCols)
{
this->nRows = nRows;
this->nCols = nCols;
double *data=new double[nCols * nRows ];
realArray = new double*[( nRows )];
for(int i=0;i<nRows;i++){
realArray[i]=&data[i*nCols];
}
double *datai=new double[(nCols * nRows )];
imArray =new double*[( nRows )];
for(int i=0;i<nRows;i++){
imArray[i]=&datai[i*nCols];
}
}
~ArmadilloMPI()
{
delete[] realArray[0];
delete[] realArray;
delete[] imArray[0];
delete[] imArray;
}
double **realArray;
double **imArray;
int nCols;
int nRows;
cx_mat matConstructRecv(int tag, int src)
{
cx_mat A(nRows, nCols);
MPI_Recv(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, src, tag+1, MPI_COMM_WORLD,0);
MPI_Recv(&(realArray[0][0]),nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);
for(int i = 0; i < nRows; ++i )
{
for(int j = 0; j < nCols; ++j)
{
real(A(i,j)) = realArray[i][j];
imag(A(i,j)) = imArray[i][j];
}
}
return A;
}
void matDestroySend(cx_mat &A, int dest, int tag)
{
for(int i = 0; i < nRows; ++i )
{
for(int j = 0; j < nCols; ++j)
{
realArray[i][j] = real((A(i,j)));
imArray[i][j] = imag((A(i,j)));
}
}
MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag+1, MPI_COMM_WORLD);
}
};
int main(int argc, char **argv)
{
int rank;
int size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand (time(NULL)+rank);
vector<cx_mat> world;
world.push_back(randu<cx_mat>(4,4));
cx_mat A;
ArmadilloMPI* armaMPI = new ArmadilloMPI(4,4);
if(rank==0)
{
world[0].print("world[0] on 0:");
armaMPI->matDestroySend(world[0], 1, 0);
}
if(rank==1){
A = armaMPI->matConstructRecv(0, 0);
A.print("A on 1:");
}
delete armaMPI;
MPI_Finalize();
}
To compile :
mpiCC -O2 -o main main.cpp -larmadillo -llapack -lblas -Wall
To run :
mpiexec -np 2 main
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With