3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH 4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH 38 template<
typename Type,
typename BinaryFunction>
47 op = shared_ptr<MPI_Op>(
new MPI_Op);
48 MPI_Op_create((
void (*)(
void*,
void*,
int*, MPI_Datatype*))&operation,
true,op.get());
53 static void operation (Type *in, Type *inout,
int *len, MPI_Datatype*)
57 for (
int i=0; i< *len; ++i, ++in, ++inout) {
59 temp = func(*in, *inout);
65 static shared_ptr<MPI_Op> op;
69 template<
typename Type,
typename BinaryFunction>
72 #define ComposeMPIOp(type,func,op) \ 74 class Generic_MPI_Op<type, func<type> >{ \ 76 static MPI_Op get(){ \ 81 Generic_MPI_Op (const Generic_MPI_Op & ) {}\ 100 ComposeMPIOp(
unsigned short, std::multiplies, MPI_PROD);
152 if(communicator!=MPI_COMM_NULL) {
154 MPI_Initialized(&initialized);
156 DUNE_THROW(
ParallelError,
"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI CollectiveCommunication!");
157 MPI_Comm_rank(communicator,&me);
158 MPI_Comm_size(communicator,&procs);
182 allreduce<std::plus<T> >(&in,&out,1);
188 int sum (T* inout,
int len)
const 190 return allreduce<std::plus<T> >(inout,len);
198 allreduce<std::multiplies<T> >(&in,&out,1);
204 int prod (T* inout,
int len)
const 206 return allreduce<std::multiplies<T> >(inout,len);
214 allreduce<Min<T> >(&in,&out,1);
220 int min (T* inout,
int len)
const 222 return allreduce<Min<T> >(inout,len);
231 allreduce<Max<T> >(&in,&out,1);
237 int max (T* inout,
int len)
const 239 return allreduce<Max<T> >(inout,len);
245 return MPI_Barrier(communicator);
258 int gather (T* in, T* out,
int len,
int root)
const 267 int gatherv (T* in,
int sendlen, T* out,
int* recvlen,
int* displ,
int root)
const 277 int scatter (T* send, T* recv,
int len,
int root)
const 286 int scatterv (T* send,
int* sendlen,
int* displ, T* recv,
int recvlen,
int root)
const 294 operator MPI_Comm ()
const 300 template<
typename T,
typename T1>
310 int allgatherv (T* in,
int sendlen, T* out,
int* recvlen,
int* displ)
const 318 template<
typename BinaryFunction,
typename Type>
321 Type* out =
new Type[len];
322 int ret = allreduce<BinaryFunction>(inout,out,len);
323 std::copy(out, out+len, inout);
329 template<
typename BinaryFunction,
typename Type>
337 MPI_Comm communicator;
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicollectivecommunication.hh:166
T prod(T &in) const
Compute the product of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:195
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:237
T sum(T &in) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:179
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:204
int allgatherv(T *in, int sendlen, T *out, int *recvlen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:310
Traits classes for mapping types onto MPI_Datatype.
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicollectivecommunication.hh:243
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicollectivecommunication.hh:319
int scatterv(T *send, int *sendlen, int *displ, T *recv, int recvlen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicollectivecommunication.hh:286
T min(T &in) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:211
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicollectivecommunication.hh:250
This file implements the class shared_ptr (a reference counting pointer), for those systems that don'...
int allgather(T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:301
Various helper classes derived from from std::binary_function for stl-style functional programming...
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:220
int gatherv(T *in, int sendlen, T *out, int *recvlen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicollectivecommunication.hh:267
int scatter(T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicollectivecommunication.hh:277
char c
Definition: alignment.hh:33
A few common exception classes.
int gather(T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicollectivecommunication.hh:258
ComposeMPIOp(char, std::plus, MPI_SUM)
#define DUNE_THROW(E, m)
Definition: exceptions.hh:243
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:188
Implements an utility class that provides collective communication methods for sequential programs...
Default exception if an error in the parallel communication of the programm occured.
Definition: exceptions.hh:312
Collective communication interface and sequential default implementation.
Definition: collectivecommunication.hh:72
Definition: mpicollectivecommunication.hh:39
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: mpicollectivecommunication.hh:228
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: bigunsignedint.hh:30
int allreduce(Type *in, Type *out, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicollectivecommunication.hh:330
int size() const
Number of processes in set, is greater than 0.
Definition: mpicollectivecommunication.hh:172
Dune namespace.
Definition: alignment.hh:9
CollectiveCommunication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicollectivecommunication.hh:149