OpenMPI: I want to read a file on the root node and send the contents of that file to all other nodes. I have found that MPI_Bcast does that :
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm)
All the examples that I have found have the count
value already known, but in my case, the count value is primarily known on the root. Other examples say the same call of MPI_Bcast retrieves data on the other nodes.
I've added this:
typedef short Descriptor[128];
MPI_Datatype descriptorType;
MPI_Type_contiguous(sizeof(Descriptor), MPI_SHORT, &descriptorType);
MPI_Type_commit(&descriptorType);
if(world_rank == 0) {
struct stat finfo;
if(stat(argv[1], &finfo) == 0) {
querySize = finfo.st_size/sizeof(Descriptor);
}
{
//read binary query
queryDescriptors = new Descriptor[querySize];
fstream qFile(argv[1], ios::in | ios::binary);
qFile.read((char*)queryDescriptors, querySize*sizeof(Descriptor));
qFile.close();
}
}
MPI_Bcast((void*)&querySize, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (world_rank != 0)
{
queryDescriptors = new Descriptor[querySize];
}
MPI_Bcast((void*)queryDescriptors, querySize, descriptorType, 0, MPI_COMM_WORLD);
When I call it like this : mpirun -np 2 ./mpi_hello_world
it works ok, but when I call it with more than 2, I get this:
mpi_hello_world: malloc.c:3096: sYSMALLOc: Assertion `(old_top == (((mbinptr) (((char *) &((av)->bins[((1) - 1) * 2])) - __builtin_offsetof (struct malloc_chunk, fd)))) && old_size == 0) || ((unsigned long) (old_size) >= (unsigned long)((((__builtin_offsetof (struct malloc_chunk, fd_nextsize))+((2 * (sizeof(size_t))) - 1)) & ~((2 * (sizeof(size_t))) - 1))) && ((old_top)->size & 0x1) && ((unsigned long)old_end & pagemask) == 0)' failed.
mpi_hello_world: malloc.c:3096: sYSMALLOc: Assertion `(old_top == (((mbinptr) (((char *) &((av)->bins[((1) - 1) * 2])) - __builtin_offsetof (struct malloc_chunk, fd)))) && old_size == 0) || ((unsigned long) (old_size) >= (unsigned long)((((__builtin_offsetof (struct malloc_chunk, fd_nextsize))+((2 * (sizeof(size_t))) - 1)) & ~((2 * (sizeof(size_t))) - 1))) && ((old_top)->size & 0x1) && ((unsigned long)old_end & pagemask) == 0)' failed.
If qFile.read(...)
is not enclosed in a if(rank==0){}
test, all processes will read the file. And queryDescriptors = new Descriptor[querySize];
should be called after the first MPI_Bcast()
for all processes except 0 : before, querySize
is meaningless on these processes.
Process 0 must :
Other processes must :
Here is a example of how to read an array of float and use dynamic allocation :
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <mpi.h>
using namespace std;
int main (int argc, char *argv[])
{
int rank;
int size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank == 0)
{
//creating the file
ofstream myfile;
myfile.open ("example.txt", ios::out |ios::binary);
int nbitem=42;
myfile.write((char*)&nbitem,sizeof(int));
float a=0;
for(int i=0;i<nbitem;i++){
myfile.write((char*)&a,sizeof(float));
a+=2;
}
myfile.close();
}
//now reading the file
int nbitemread=0;
float* buffer;
if(rank==0){
ifstream file ("example.txt", ios::in |ios::binary);
file.read ((char*)&nbitemread, sizeof(int));
buffer=new float[nbitemread];
file.read ((char*)buffer,nbitemread* sizeof(float));
file.close();
//communication
MPI_Bcast(&nbitemread, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(buffer, nbitemread, MPI_FLOAT, 0, MPI_COMM_WORLD);
}else{
MPI_Bcast(&nbitemread, 1, MPI_INT, 0, MPI_COMM_WORLD);
//nbitemread is meaningfull now
buffer=new float[nbitemread];
MPI_Bcast(buffer, nbitemread, MPI_FLOAT, 0, MPI_COMM_WORLD);
}
//printing...
cout<<"on rank "<<rank<<" rode "<<buffer[nbitemread/2]<<" on position "<<nbitemread/2<<endl;
delete[] buffer;
MPI_Finalize();
return 0;
}
Compile it with mpiCC main.cpp -o main
and run by mpirun -np 2 main
Another issue in your code is MPI_Type_contiguous(sizeof(Descriptor), MPI_SHORT, &descriptorType);
. It should be MPI_Type_contiguous(sizeof(Descriptor), MPI_CHAR, &descriptorType);
Here is a piece of code based on yours that should do the trick :
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <mpi.h>
using namespace std;
int main (int argc, char *argv[])
{
int world_rank;
int size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int querySize;
typedef short Descriptor[128];
MPI_Datatype descriptorType;
MPI_Type_contiguous(sizeof(Descriptor), MPI_CHAR, &descriptorType);
MPI_Type_commit(&descriptorType);
Descriptor* queryDescriptors;
if(world_rank == 0) {
struct stat finfo;
if(stat(argv[1], &finfo) == 0) {
cout<<"st_size "<<finfo.st_size<<" descriptor "<<sizeof(Descriptor)<< endl;
querySize = finfo.st_size/sizeof(Descriptor);
cout<<"querySize "<<querySize<<endl;
}else{
cout<<"stat error"<<endl;
}
{
//read binary query
queryDescriptors = new Descriptor[querySize];
fstream qFile(argv[1], ios::in | ios::binary);
qFile.read((char*)queryDescriptors, querySize*sizeof(Descriptor));
qFile.close();
}
}
MPI_Bcast((void*)&querySize, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (world_rank != 0)
{
queryDescriptors = new Descriptor[querySize];
}
MPI_Bcast((void*)queryDescriptors, querySize, descriptorType, 0, MPI_COMM_WORLD);
cout<<"on rank "<<world_rank<<" rode "<<queryDescriptors[querySize/2][12]<<" on position "<<querySize/2<<endl;
delete[] queryDescriptors;
MPI_Finalize();
return 0;
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With