#ifdef _MPI
#include <mpi.h>
using namespace std;

void Reduce(int my_rank, int Master, int mpi_size, function1D<double>& histogram, function2D<dcomplex>& Gd,
	    function2D<double>& AverageProbability, function1D<double>& nlc,
	    function1D<double>& kaver, function2D<double>& susc,
	    function2D<double>& Gtau)
{
  function2D<double> cAverageProbability;
  function1D<double> chistogram;
  function1D<double> cnlc;
  function1D<double> ckaver;
  function2D<dcomplex> cGd;
  function2D<double> cSusc;
  function2D<double> cGtau;
  
  if (my_rank==Master){
    chistogram.resize(histogram.size());
    //    cmom0.resize(mom0.size());
    cnlc.resize(nlc.size());
    cAverageProbability.resize(AverageProbability.fullsize_N(),AverageProbability.fullsize_Nd());
    cGd.resize(Gd.fullsize_N(),Gd.fullsize_Nd());
    cGtau.resize(Gtau.fullsize_N(),Gtau.fullsize_Nd());
    ckaver.resize(kaver.size());
    cSusc.resize(susc.fullsize_N(), susc.fullsize_Nd());
  }
  
  MPI::COMM_WORLD.Reduce(histogram.MemPt(), chistogram.MemPt(), histogram.size(), MPI_DOUBLE, MPI_SUM, Master);

  MPI::COMM_WORLD.Reduce(AverageProbability.MemPt(), cAverageProbability.MemPt(), AverageProbability.fullsize2(), MPI_DOUBLE, MPI_SUM, Master);
  
  MPI::COMM_WORLD.Reduce(nlc.MemPt(), cnlc.MemPt(), nlc.size(), MPI_DOUBLE, MPI_SUM, Master);
  
  MPI::COMM_WORLD.Reduce(kaver.MemPt(), ckaver.MemPt(), kaver.size(), MPI_DOUBLE, MPI_SUM, Master);
  
  MPI::COMM_WORLD.Reduce(Gtau.MemPt(), cGtau.MemPt(), Gtau.fullsize2(), MPI_DOUBLE, MPI_SUM, Master);

  MPI::COMM_WORLD.Reduce(Gd.MemPt(), cGd.MemPt(), Gd.fullsize2()*2, MPI_DOUBLE, MPI_SUM, Master);

  MPI::COMM_WORLD.Reduce(susc.MemPt(), cSusc.MemPt(), susc.fullsize2(), MPI_DOUBLE, MPI_SUM, Master);
  
  if (my_rank==Master){
    histogram = chistogram;
    histogram *= (1./mpi_size);
    AverageProbability = cAverageProbability;
    AverageProbability *= (1./mpi_size);
    nlc = cnlc;
    nlc *= (1./mpi_size);
    kaver = ckaver;
    kaver *= (1./mpi_size);
    Gd = cGd;
    Gd *= (1./mpi_size);
    Gtau = cGtau;
    Gtau *= (1./mpi_size);
    susc = cSusc;
    susc *= (1./mpi_size);
  }
}

void MPI_Init(int argc, char* argv[], int& my_rank, int& mpi_size, int& Master)
{
  MPI::Init(argc, argv);
  my_rank = MPI::COMM_WORLD.Get_rank();
  mpi_size = MPI::COMM_WORLD.Get_size();
  Master = 0;
  
  std::cout << "Hello World! I am " << my_rank << " of " << mpi_size << std::endl;
}

void MPI_finalize()
{MPI::Finalize();}

#else

using namespace std;

void Reduce(int my_rank, int Master, int mpi_size, function1D<double>& histogram, function2D<dcomplex>& Gd,
	    function2D<double>& AverageProbability, function1D<double>& nlc,
	    function1D<double>& kaver, function2D<double>& susc, function2D<double>& Gtau){}

void MPI_Init(int argc, char* argv[], int& my_rank, int& mpi_size, int& Master)
{
  my_rank = 0;
  mpi_size = 1;
  Master = 0;
  std::cout<<"Not parallel!"<<std::endl;
}

void MPI_finalize(){}
#endif
