29MPI::Intracomm *MPIProcess::fgCommunicator = 0;
 
   30int MPIProcess::fgIndexComm = -1; 
 
   31MPI::Intracomm *MPIProcess::fgCommunicators[2] = {0};
 
   32unsigned int MPIProcess::fgIndicesComm[2] = {0};
 
   50         std::cout << 
"Info --> MPIProcess::MPIProcess: Declare cartesian Topology (" << 
fgCartSizeX << 
"x" 
   66         std::cerr << 
"Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!" 
   68         MPI::COMM_WORLD.Abort(-1);
 
   82         std::cout << 
"Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!" 
   84         std::cout << 
"Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
 
   85         std::cout << 
"Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
 
  104         std::cerr << 
"Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
 
  105         MPI::COMM_WORLD.Abort(-1);
 
  112         std::cout << 
"Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
 
  113         std::cout << 
"Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
 
  131      std::cerr << 
"Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
 
  132      MPI::COMM_WORLD.Abort(-1);
 
 
  161      std::cerr << 
"Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!" 
  163      std::cerr << 
"Error --> MPIProcess::SyncVector: no MPI synchronization is possible!" << std::endl;
 
  179   for (
unsigned int i = 0; i < 
fNelements; i++) {
 
  187   std::cerr << 
"Error --> MPIProcess::SyncVector: no MPI synchronization is possible!" << std::endl;
 
 
  202         << 
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!" 
  204      std::cerr << 
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI synchronization is possible!" << std::endl;
 
  234   for (
unsigned int i = 0; i < 
fNelements; i++) {
 
  248   std::cerr << 
"Error --> MPIProcess::SyncMatrix: no MPI synchronization is possible!" << std::endl;
 
 
  261   for (
unsigned int i = 1; i < 
fSize; i++) {
 
  263      offsets[i] = 
nconts[i - 1] + offsets[i - 1];
 
  272      std::cout << 
"Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..." 
  277      std::cout << 
"Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
 
  284      std::cout << 
"Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!" 
  286      std::cout << 
"Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
 
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Class describing a symmetric matrix of size n.
unsigned int fNumElements4JobIn
bool SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
static bool SetCartDimension(unsigned int dimX, unsigned int dimY)
MPIProcess(unsigned int nelements, unsigned int indexComm)
static unsigned int fgCartSizeY
unsigned int fNumElements4JobOut
static unsigned int fgGlobalRank
unsigned int NumElements4Job(unsigned int rank) const
unsigned int StartElementIndex() const
bool SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
static unsigned int fgGlobalSize
static unsigned int fgCartDimension
static unsigned int fgCartSizeX
static bool SetDoFirstMPICall(bool doFirstMPICall=true)
unsigned int EndElementIndex() const
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
std::vector< std::string > Split(std::string_view str, std::string_view delims, bool skipEmpty=false)
Splits a string at each character in delims.