Loading...
Searching...
No Matches
UPstream Class Reference

Inter-processor communications stream. More...

#include <UPstream.H>

Inheritance diagram for UPstream:
Collaboration diagram for UPstream:

Classes

class  commsStruct
 Structure for communicating between processors. More...
class  commsStructList
 Collection of communication structures. More...
class  communicator
 Wrapper for internally indexed communicator label. Always invokes UPstream::allocateCommunicatorComponents() and UPstream::freeCommunicatorComponents(). More...
class  Communicator
 An opaque wrapper for MPI_Comm with a vendor-independent representation without any <mpi.h> header. More...
class  Request
 An opaque wrapper for MPI_Request with a vendor-independent representation without any <mpi.h> header. More...
class  File
 An opaque wrapper for MPI_File methods without any <mpi.h> header dependency. More...
class  Window
 An opaque wrapper for MPI_Win with a vendor-independent representation and without any <mpi.h> header dependency. More...

Public Types

enum class  commsTypes : char { buffered , scheduled , nonBlocking , blocking = buffered }
 Communications types. More...
enum class  sendModes : char { normal , sync }
 Different MPI-send modes (ignored for commsTypes::buffered). More...
enum class  dataTypes : char {
  Basic_begin , type_byte = Basic_begin , type_int16 , type_int32 ,
  type_int64 , type_uint16 , type_uint32 , type_uint64 ,
  type_float , type_double , type_long_double , Basic_end ,
  User_begin = Basic_end , type_3float = User_begin , type_3double , type_6float ,
  type_6double , type_9float , type_9double , invalid ,
  User_end = invalid , DataTypes_end = invalid
}
 Mapping of some fundamental and aggregate types to MPI data types. More...
enum class  opCodes : char {
  Basic_begin , op_min = Basic_begin , op_max , op_sum ,
  op_prod , op_bool_and , op_bool_or , op_bool_xor ,
  op_bit_and , op_bit_or , op_bit_xor , Basic_end ,
  Extra_begin = Basic_end , op_replace = Extra_begin , op_no_op , invalid ,
  Extra_end = invalid , OpCodes_end = invalid
}
 Mapping of some MPI op codes. More...
enum class  topoControls : int {
  broadcast = 1 , reduce = 2 , gather = 4 , combine = 16 ,
  mapGather = 32 , gatherList = 64
}
 Some bit masks corresponding to topology controls. More...
typedef IntRange< int > rangeType
 Int ranges are used for MPI ranks (processes).

Public Member Functions

 ClassName ("UPstream")
 Declare name of the class and its debug switch.
 UPstream (const commsTypes commsType) noexcept
 Construct for given communication type.
commsTypes commsType () const noexcept
 Get the communications type of the stream.
commsTypes commsType (const commsTypes ct) noexcept
 Set the communications type of the stream.

Static Public Member Functions

static bool usingTopoControl (UPstream::topoControls ctrl) noexcept
 Test for selection of given topology-aware routine.
static constexpr int commGlobal () noexcept
 Communicator for all ranks, irrespective of any local worlds.
static constexpr int commSelf () noexcept
 Communicator within the current rank only.
static int commConstWorld () noexcept
 Communicator for all ranks (respecting any local worlds).
static label commWorld () noexcept
 Communicator for all ranks (respecting any local worlds).
static label commWorld (const label communicator) noexcept
 Set world communicator. Negative values are a no-op.
static label commWarn (const label communicator) noexcept
 Alter communicator debugging setting. Warns for use of any communicator differing from specified. Negative values disable.
static label nComms () noexcept
 Number of currently defined communicators.
static void printCommTree (int communicator, bool linear=false)
 Debugging: print the communication tree.
static int commInterNode () noexcept
 Communicator between nodes/hosts (respects any local worlds).
static int commLocalNode () noexcept
 Communicator within the node/host (respects any local worlds).
static bool hasNodeCommunicators () noexcept
 Both inter-node and local-node communicators have been created.
static bool usingNodeComms (const int communicator)
 True if node topology-aware routines have been enabled, it is running in parallel, the starting point is the world-communicator and it is not an odd corner case (ie, all processes on one node, all processes on different nodes).
static label newCommunicator (const label parent, const labelRange &subRanks, const bool withComponents=true)
 Create new communicator with sub-ranks on the parent communicator.
static label newCommunicator (const label parent, const labelUList &subRanks, const bool withComponents=true)
 Create new communicator with sub-ranks on the parent communicator.
static label dupCommunicator (const label parent)
 Duplicate the parent communicator.
static label splitCommunicator (const label parent, const int colour, const bool two_step=true)
 Allocate a new communicator by splitting the parent communicator on the given colour.
static void freeCommunicator (const label communicator, const bool withComponents=true)
 Free a previously allocated communicator.
static int baseProcNo (label comm, int procID)
 Return physical processor number (i.e. processor number in worldComm) given communicator and processor.
static label procNo (const label comm, const int baseProcID)
 Return processor number in communicator (given physical processor number) (= reverse of baseProcNo).
static label procNo (const label comm, const label currentComm, const int currentProcID)
 Return processor number in communicator (given processor number and communicator).
static void addValidParOptions (HashTable< string > &validParOptions)
 Add the valid option this type of communications library adds/requires on the command line.
static bool init (int &argc, char **&argv, const bool needsThread)
 Initialisation function called from main.
static bool initNull ()
 Special purpose initialisation function.
static void barrier (const int communicator, UPstream::Request *req=nullptr)
 Impose a synchronisation barrier (optionally non-blocking).
static void send_done (const int toProcNo, const int communicator, const int tag=UPstream::msgType()+1970)
 Impose a point-to-point synchronisation barrier by sending a zero-byte "done" message to given rank.
static int wait_done (const int fromProcNo, const int communicator, const int tag=UPstream::msgType()+1970)
 Impose a point-to-point synchronisation barrier by receiving a zero-byte "done" message from given rank.
static std::pair< int, int64_t > probeMessage (const UPstream::commsTypes commsType, const int fromProcNo, const int tag=UPstream::msgType(), const int communicator=worldComm)
 Probe for an incoming message.
static void printNodeCommsControl (Ostream &os)
 Report the node-communication settings.
static void printTopoControl (Ostream &os)
 Report the topology routines settings.
static label nRequests () noexcept
 Number of outstanding requests (on the internal list of requests).
static void resetRequests (const label n)
 Truncate outstanding requests to given length, which is expected to be in the range [0 to nRequests()].
static void addRequest (UPstream::Request &req)
 Transfer the (wrapped) MPI request to the internal global list and invalidate the parameter (ignores null requests).
static void cancelRequest (const label i)
 Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_free().
static void cancelRequest (UPstream::Request &req)
 Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_free().
static void cancelRequests (UList< UPstream::Request > &requests)
 Non-blocking comms: cancel and free outstanding requests. Corresponds to MPI_Cancel() + MPI_Request_free().
static void removeRequests (label pos, label len=-1)
 Non-blocking comms: cancel/free outstanding requests (from position onwards) and remove from internal list of requests. Corresponds to MPI_Cancel() + MPI_Request_free().
static void freeRequest (UPstream::Request &req)
 Non-blocking comms: free outstanding request. Corresponds to MPI_Request_free().
static void freeRequests (UList< UPstream::Request > &requests)
 Non-blocking comms: free outstanding requests. Corresponds to MPI_Request_free().
static void waitRequests (label pos, label len=-1)
 Wait until all requests (from position onwards) have finished. Corresponds to MPI_Waitall().
static void waitRequests (UList< UPstream::Request > &requests)
 Wait until all requests have finished. Corresponds to MPI_Waitall().
static bool waitAnyRequest (label pos, label len=-1)
 Wait until any request (from position onwards) has finished. Corresponds to MPI_Waitany().
static bool waitSomeRequests (label pos, label len=-1, DynamicList< int > *indices=nullptr)
 Wait until some requests (from position onwards) have finished. Corresponds to MPI_Waitsome().
static bool waitSomeRequests (UList< UPstream::Request > &requests, DynamicList< int > *indices=nullptr)
 Wait until some requests have finished. Corresponds to MPI_Waitsome().
static int waitAnyRequest (UList< UPstream::Request > &requests)
 Wait until any request has finished and return its index. Corresponds to MPI_Waitany().
static void waitRequest (const label i)
 Wait until request i has finished. Corresponds to MPI_Wait().
static void waitRequest (UPstream::Request &req)
 Wait until specified request has finished. Corresponds to MPI_Wait().
static bool activeRequest (const label i)
 Is request i active (!= MPI_REQUEST_NULL)?
static bool activeRequest (const UPstream::Request &req)
 Is request active (!= MPI_REQUEST_NULL)?
static bool finishedRequest (const label i)
 Non-blocking comms: has request i finished? Corresponds to MPI_Test().
static bool finishedRequest (UPstream::Request &req)
 Non-blocking comms: has request finished? Corresponds to MPI_Test().
static bool finishedRequests (label pos, label len=-1)
 Non-blocking comms: have all requests (from position onwards) finished? Corresponds to MPI_Testall().
static bool finishedRequests (UList< UPstream::Request > &requests)
 Non-blocking comms: have all requests finished? Corresponds to MPI_Testall().
static bool finishedRequestPair (label &req0, label &req1)
 Non-blocking comms: have both requests finished? Corresponds to pair of MPI_Test().
static void waitRequestPair (label &req0, label &req1)
 Non-blocking comms: wait for both requests to finish. Corresponds to pair of MPI_Wait().
static bool parRun (const bool on) noexcept
 Set as parallel run on/off.
static bool & parRun () noexcept
 Test if this a parallel run.
static bool haveThreads () noexcept
 Have support for threads.
static constexpr int masterNo () noexcept
 Relative rank for the master process - is always 0.
static label nProcs (const label communicator=worldComm)
 Number of ranks in parallel run (for given communicator). It is 1 for serial run.
static int myProcNo (const label communicator=worldComm)
 Rank of this process in the communicator (starting from masterNo()). Negative if the process is not a rank in the communicator.
static bool master (const label communicator=worldComm)
 True if process corresponds to the master rank in the communicator.
static bool is_rank (const label communicator=worldComm)
 True if process corresponds to any rank (master or sub-rank) in the given communicator.
static bool is_subrank (const label communicator=worldComm)
 True if process corresponds to a sub-rank in the given communicator.
static bool is_parallel (const label communicator=worldComm)
 True if parallel algorithm or exchange is required.
static int numNodes () noexcept
 The number of shared/host nodes in the (const) world communicator.
static label parent (int communicator)
 The parent communicator.
static List< int > & procID (int communicator)
 The list of ranks within a given communicator.
static bool sameProcs (int communicator1, int communicator2)
 Test for communicator equality.
template<typename T1, typename = std::void_t <std::enable_if_t<std::is_integral_v<T1>>>>
static bool sameProcs (int communicator, const UList< T1 > &procs)
 Test equality of communicator procs with the given list of ranks. Includes a guard for the communicator index.
template<typename T1, typename T2, typename = std::void_t < std::enable_if_t<std::is_integral_v<T1>>, std::enable_if_t<std::is_integral_v<T2>> >>
static bool sameProcs (const UList< T1 > &procs1, const UList< T2 > &procs2)
 Test the equality of two lists of ranks.
static const wordListallWorlds () noexcept
 All worlds.
static const labelListworldIDs () noexcept
 The indices into allWorlds for all processes.
static label myWorldID ()
 My worldID.
static const wordmyWorld ()
 My world.
static rangeType allProcs (const label communicator=worldComm)
 Range of process indices for all processes.
static rangeType subProcs (const label communicator=worldComm)
 Range of process indices for sub-processes.
static const List< int > & interNode_offsets ()
 Processor offsets corresponding to the inter-node communicator.
static const rangeTypelocalNode_parentProcs ()
 The range (start/size) of the commLocalNode ranks in terms of the (const) world communicator processors.
static const commsStructListlinearCommunication (int communicator)
 Linear communication schedule (special case) for given communicator.
static const commsStructListtreeCommunication (int communicator)
 Tree communication schedule (standard case) for given communicator.
static const commsStructListwhichCommunication (const int communicator, bool linear=false)
 Communication schedule for all-to-master (proc 0) as linear/tree/none with switching based on UPstream::nProcsSimpleSum, the is_parallel() state and the optional linear parameter.
static int & msgType () noexcept
 Message tag of standard messages.
static int msgType (int val) noexcept
 Set the message tag for standard messages.
static int incrMsgType (int val=1) noexcept
 Increment the message tag for standard messages.
static void shutdown (int errNo=0)
 Shutdown (finalize) MPI as required.
static void abort (int errNo=1)
 Call MPI_Abort with no other checks or cleanup.
static void exit (int errNo=1)
 Shutdown (finalize) MPI as required and exit program with errNo.
static void allToAll (const UList< int32_t > &sendData, UList< int32_t > &recvData, const int communicator=UPstream::worldComm)
 Exchange int32_t data with all ranks in communicator.
static void allToAllConsensus (const UList< int32_t > &sendData, UList< int32_t > &recvData, const int tag, const int communicator=UPstream::worldComm)
 Exchange non-zero int32_t data between ranks [NBX].
static void allToAllConsensus (const Map< int32_t > &sendData, Map< int32_t > &recvData, const int tag, const int communicator=UPstream::worldComm)
 Exchange int32_t data between ranks [NBX].
static Map< int32_t > allToAllConsensus (const Map< int32_t > &sendData, const int tag, const int communicator=UPstream::worldComm)
 Exchange int32_t data between ranks [NBX].
static void allToAll (const UList< int64_t > &sendData, UList< int64_t > &recvData, const int communicator=UPstream::worldComm)
 Exchange int64_t data with all ranks in communicator.
static void allToAllConsensus (const UList< int64_t > &sendData, UList< int64_t > &recvData, const int tag, const int communicator=UPstream::worldComm)
 Exchange non-zero int64_t data between ranks [NBX].
static void allToAllConsensus (const Map< int64_t > &sendData, Map< int64_t > &recvData, const int tag, const int communicator=UPstream::worldComm)
 Exchange int64_t data between ranks [NBX].
static Map< int64_t > allToAllConsensus (const Map< int64_t > &sendData, const int tag, const int communicator=UPstream::worldComm)
 Exchange int64_t data between ranks [NBX].
template<class Type>
static void mpiGather (const Type *sendData, Type *recvData, int count, const int communicator=UPstream::worldComm)
 Receive identically-sized (contiguous) data from all ranks.
template<class Type>
static void mpiScatter (const Type *sendData, Type *recvData, int count, const int communicator=UPstream::worldComm)
 Send identically-sized (contiguous) data to all ranks.
template<class Type>
static void mpiAllGather (Type *allData, int count, const int communicator=UPstream::worldComm)
 Gather/scatter identically-sized data.
template<class Type>
static void mpiGatherv (const Type *sendData, int sendCount, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, const int communicator=UPstream::worldComm)
 Receive variable length data from all ranks.
template<class Type>
static void mpiScatterv (const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, int recvCount, const int communicator=UPstream::worldComm)
 Send variable length data to all ranks.
template<class T>
static List< TallGatherValues (const T &localValue, const int communicator=UPstream::worldComm)
 Allgather individual values into list locations.
template<class T>
static List< TlistGatherValues (const T &localValue, const int communicator=UPstream::worldComm)
 Gather individual values into list locations.
template<class T>
static T listScatterValues (const UList< T > &allValues, const int communicator=UPstream::worldComm)
 Scatter individual values from list locations.
template<class Type>
static bool broadcast (Type *buffer, std::streamsize count, const int communicator, const int root=UPstream::masterNo())
 Broadcast buffer contents (contiguous types) to all ranks (default: from rank=0). The sizes must match on all processes.
template<class Type, unsigned N>
static bool broadcast (FixedList< Type, N > &list, const int communicator, const int root=UPstream::masterNo())
 Broadcast fixed-list content (contiguous types) to all ranks (default: from rank=0). The sizes must match on all processes.
template<class T>
static void mpiReduce (T values[], int count, const UPstream::opCodes opCodeId, const int communicator)
 MPI_Reduce (blocking) for known operators.
template<UPstream::opCodes opCode, class T>
static void mpiReduce (T values[], int count, const int communicator)
 MPI_Reduce (blocking) for known operators.
template<class T>
static void mpiAllReduce (T values[], int count, const UPstream::opCodes opCodeId, const int communicator)
 MPI_Allreduce (blocking) for known operators.
template<UPstream::opCodes opCode, class T>
static void mpiAllReduce (T values[], int count, const int communicator)
 MPI_Allreduce (blocking) for known operators.
template<class T>
static void mpiAllReduce (T values[], int count, const UPstream::opCodes opCodeId, const int communicator, UPstream::Request &req)
 MPI_Iallreduce (non-blocking) for known operators.
template<UPstream::opCodes opCode, class T>
static void mpiAllReduce (T values[], int count, const int communicator, UPstream::Request &req)
 MPI_Iallreduce (non-blocking) for known operators.
template<Foam::UPstream::opCodes opCode, class T>
static void mpiScan (T values[], int count, const int communicator, const bool exclusive=false)
 Inclusive/exclusive scan (in-place).
template<Foam::UPstream::opCodes opCode, class T>
static T mpiScan (const T &localValue, const int communicator, const bool exclusive=false)
 Inclusive/exclusive scan returning the result. In exclusive mode, the degenerate value on rank=0 has no meaning but is treated like non-exclusive mode (ie, original values).
template<class T>
static void mpiScan_min (T values[], int count, const int communicator, const bool exclusive=false)
 Inclusive/exclusive min scan (in-place).
template<class T>
static void mpiExscan_min (T values[], int count, const int communicator)
 Exclusive min scan (in-place).
template<class T>
static T mpiScan_min (const T &value, const int communicator, const bool exclusive=false)
 Inclusive/exclusive min scan returning result.
template<class T>
static T mpiExscan_min (const T &value, const int communicator)
 Exclusive min scan returning result.
template<class T>
static void mpiScan_max (T values[], int count, const int communicator, const bool exclusive=false)
 Inclusive/exclusive max scan (in-place).
template<class T>
static void mpiExscan_max (T values[], int count, const int communicator)
 Exclusive max scan (in-place).
template<class T>
static T mpiScan_max (const T &value, const int communicator, const bool exclusive=false)
 Inclusive/exclusive max scan returning result.
template<class T>
static T mpiExscan_max (const T &value, const int communicator)
 Exclusive max scan returning result.
template<class T>
static void mpiScan_sum (T values[], int count, const int communicator, const bool exclusive=false)
 Inclusive/exclusive sum scan (in-place).
template<class T>
static void mpiExscan_sum (T values[], int count, const int communicator)
 Exclusive sum scan (in-place).
template<class T>
static T mpiScan_sum (const T &value, const int communicator, const bool exclusive=false)
 Inclusive/exclusive sum scan returning result.
template<class T>
static T mpiExscan_sum (const T &value, const int communicator)
 Exclusive sum scan returning result.
static void reduceAnd (bool &value, const int communicator=worldComm)
 Logical (and) reduction (MPI_AllReduce).
static void reduceOr (bool &value, const int communicator=worldComm)
 Logical (or) reduction (MPI_AllReduce).
static int find_first (bool condition, int communicator)
 Locate the first rank for which the condition is true, or -1 if no ranks satisfy the condition.
static int find_last (bool condition, int communicator)
 Locate the last rank for which the condition is true, or -1 if no ranks satisfy the condition.
static label allocateCommunicator (const label parent, const labelRange &subRanks, const bool withComponents=true)
static label allocateCommunicator (const label parent, const labelUList &subRanks, const bool withComponents=true)
static label commInterHost () noexcept
 Communicator between nodes (respects any local worlds).
static label commIntraHost () noexcept
 Communicator within the node (respects any local worlds).
static void waitRequests ()
 Wait for all requests to finish.
template<class Type>
static void gather (const Type *send, int count, Type *recv, const UList< int > &counts, const UList< int > &offsets, const int comm=UPstream::worldComm)
template<class Type>
static void scatter (const Type *send, const UList< int > &counts, const UList< int > &offsets, Type *recv, int count, const int comm=UPstream::worldComm)

Static Public Attributes

static const Enum< commsTypescommsTypeNames
 Enumerated names for the communication types.
static int nodeCommsControl_
 Use of host/node topology-aware routines.
static int nodeCommsMin_
 Minimum number of nodes before topology-aware routines are enabled.
static int topologyControl_
 Selection of topology-aware routines as a bitmask combination of the topoControls enumerations.
static bool floatTransfer
 Should compact transfer be used in which floats replace doubles reducing the bandwidth requirement at the expense of some loss in accuracy.
static int nProcsSimpleSum
 Number of processors to change from linear to tree communication.
static int nProcsNonblockingExchange
 Number of processors to change to nonBlocking consensual exchange (NBX). Ignored for zero or negative values.
static int nPollProcInterfaces
 Number of polling cycles in processor updates.
static commsTypes defaultCommsType
 Default commsType.
static int maxCommsSize
 Optional maximum message size (bytes).
static int tuning_NBX_
 Tuning parameters for non-blocking exchange (NBX).
static const int mpiBufferSize
 MPI buffer-size (bytes).
static label worldComm
 Communicator for all ranks. May differ from commGlobal() if local worlds are in use.
static label warnComm
 Debugging: warn for use of any communicator differing from warnComm.

Static Protected Member Functions

static bool mpi_broadcast (void *buf, std::streamsize count, const UPstream::dataTypes dataTypeId, const int communicator, const int root=0)
 Broadcast buffer contents to all ranks (default: from rank=0). The sizes must match on all processes.
static void mpi_reduce (void *values, int count, const UPstream::dataTypes dataTypeId, const UPstream::opCodes opCodeId, const int communicator, UPstream::Request *req=nullptr)
 In-place reduction of values with result on rank 0.
static void mpi_allreduce (void *values, int count, const UPstream::dataTypes dataTypeId, const UPstream::opCodes opCodeId, const int communicator, UPstream::Request *req=nullptr)
 In-place reduction of values with same result on all ranks.
static void mpi_scan_reduce (void *values, int count, const UPstream::dataTypes dataTypeId, const UPstream::opCodes opCodeId, const int communicator, const bool exclusive)
 In-place scan/exscan reduction of values.
static bool mpi_send (const UPstream::commsTypes commsType, const void *buf, std::streamsize count, const UPstream::dataTypes dataTypeId, const int toProcNo, const int tag, const int communicator, UPstream::Request *req=nullptr, const UPstream::sendModes sendMode=UPstream::sendModes::normal)
 Send buffer contents of specified data type to given processor.
static std::streamsize mpi_receive (const UPstream::commsTypes commsType, void *buf, std::streamsize count, const UPstream::dataTypes dataTypeId, const int fromProcNo, const int tag, const int communicator, UPstream::Request *req=nullptr)
 Receive buffer contents of specified data type from given processor.
static void mpi_gather (const void *sendData, void *recvData, int count, const UPstream::dataTypes dataTypeId, const int communicator, UPstream::Request *req=nullptr)
 Receive identically-sized (contiguous) data from all ranks, placing the result on rank 0.
static void mpi_scatter (const void *sendData, void *recvData, int count, const UPstream::dataTypes dataTypeId, const int communicator, UPstream::Request *req=nullptr)
 Send identically-sized (contiguous) data from rank 0 to all other ranks.
static void mpi_allgather (void *allData, int count, const UPstream::dataTypes dataTypeId, const int communicator, UPstream::Request *req=nullptr)
 Gather/scatter identically-sized data.
static void mpi_gatherv (const void *sendData, int sendCount, void *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, const UPstream::dataTypes dataTypeId, const int communicator)
 Receive variable length data from all ranks, placing the result on rank 0. (caution: known to scale poorly).
static void mpi_scatterv (const void *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, void *recvData, int recvCount, const UPstream::dataTypes dataTypeId, const int communicator)
 Send variable length data from rank 0 to all ranks. (caution: known to scale poorly).

Detailed Description

Inter-processor communications stream.

Source files

Definition at line 68 of file UPstream.H.

Member Typedef Documentation

◆ rangeType

typedef IntRange<int> rangeType

Int ranges are used for MPI ranks (processes).

Definition at line 75 of file UPstream.H.

Member Enumeration Documentation

◆ commsTypes

enum class commsTypes : char
strong

Communications types.

Enumerator
buffered 

"buffered" : (MPI_Bsend, MPI_Recv)

scheduled 

"scheduled" (MPI standard) : (MPI_Send, MPI_Recv)

nonBlocking 

"nonBlocking" (immediate) : (MPI_Isend, MPI_Irecv)

blocking 

compatibility name for buffered

Definition at line 80 of file UPstream.H.

◆ sendModes

enum class sendModes : char
strong

Different MPI-send modes (ignored for commsTypes::buffered).

Enumerator
normal 

(MPI_Send, MPI_Isend)

sync 

(MPI_Ssend, MPI_Issend)

Definition at line 97 of file UPstream.H.

◆ dataTypes

enum class dataTypes : char
strong

Mapping of some fundamental and aggregate types to MPI data types.

Enumerator
Basic_begin 

(internal use) begin marker [basic/all types]

type_byte 

byte, char, unsigned char, ...

type_int16 
type_int32 
type_int64 
type_uint16 
type_uint32 
type_uint64 
type_float 
type_double 
type_long_double 
Basic_end 

(internal use) end marker [basic types]

User_begin 

(internal use) begin marker [user types]

type_3float 

3*float (eg, floatVector)

type_3double 

3*double (eg, doubleVector)

type_6float 

6*float (eg, floatSymmTensor, complex vector)

type_6double 

6*double (eg, doubleSymmTensor, complex vector)

type_9float 

9*float (eg, floatTensor)

type_9double 

9*double (eg, doubleTensor)

invalid 

invalid type (NULL)

User_end 

(internal use) end marker [user types]

DataTypes_end 

(internal use) end marker [all types]

Definition at line 106 of file UPstream.H.

◆ opCodes

enum class opCodes : char
strong

Mapping of some MPI op codes.

Currently excluding min/max location until they are needed

Enumerator
Basic_begin 

(internal use) begin marker [reduce/window types]

op_min 

min(x,y)

op_max 

max(x,y)

op_sum 

(x + y)

op_prod 

(x * y)

op_bool_and 

Logical and.

op_bool_or 

Logical or.

op_bool_xor 

Logical xor.

op_bit_and 

Bit-wise and for (unsigned) integral types.

op_bit_or 

Bit-wise or for (unsigned) integral types.

op_bit_xor 

Bit-wise xor for (unsigned) integral types.

Basic_end 

(internal use) end marker [reduce types]

Extra_begin 

(internal use) begin marker [window types]

op_replace 

Replace (window only).

op_no_op 

No-op (window only).

invalid 

invalid op (NULL)

Extra_end 

(internal use) end marker [window types]

OpCodes_end 

(internal use) end marker [all types]

Definition at line 148 of file UPstream.H.

◆ topoControls

enum class topoControls : int
strong

Some bit masks corresponding to topology controls.

These selectively enable topology-aware handling

Enumerator
broadcast 

broadcast [MPI]

reduce 

reduce/all-reduce [MPI]

gather 

gather/all-gather [MPI]

combine 

combine/gather (reduction) [manual algorithm]

mapGather 

mapGather (reduction) [manual algorithm]

gatherList 

gatherList [manual algorithm]

Definition at line 187 of file UPstream.H.

Constructor & Destructor Documentation

◆ UPstream()

Member Function Documentation

◆ mpi_broadcast()

bool mpi_broadcast ( void * buf,
std::streamsize count,
const UPstream::dataTypes dataTypeId,
const int communicator,
const int root = 0 )
staticprotected

Broadcast buffer contents to all ranks (default: from rank=0). The sizes must match on all processes.

For non-parallel : do nothing.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller.
Returns
True on success
Parameters
rootThe broadcast root (usually 0 == master)

Definition at line 25 of file UPstreamBroadcast.C.

◆ mpi_reduce()

void mpi_reduce ( void * values,
int count,
const UPstream::dataTypes dataTypeId,
const UPstream::opCodes opCodeId,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

In-place reduction of values with result on rank 0.

Includes internal parallel guard and checks on data types, opcode.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller.
Parameters
[out]reqrequest information (for non-blocking)

Definition at line 56 of file UPstreamReduce.C.

◆ mpi_allreduce()

void mpi_allreduce ( void * values,
int count,
const UPstream::dataTypes dataTypeId,
const UPstream::opCodes opCodeId,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

In-place reduction of values with same result on all ranks.

Includes internal parallel guard and checks on data types, opcode.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller.
Parameters
[out]reqrequest information (for non-blocking)

Definition at line 68 of file UPstreamReduce.C.

Referenced by find_first(), and find_last().

Here is the caller graph for this function:

◆ mpi_scan_reduce()

void mpi_scan_reduce ( void * values,
int count,
const UPstream::dataTypes dataTypeId,
const UPstream::opCodes opCodeId,
const int communicator,
const bool exclusive )
staticprotected

In-place scan/exscan reduction of values.

Includes internal parallel guard and checks on data types, opcode.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller.
Parameters
exclusiveUse exclusive scan

Definition at line 80 of file UPstreamReduce.C.

◆ mpi_send()

bool mpi_send ( const UPstream::commsTypes commsType,
const void * buf,
std::streamsize count,
const UPstream::dataTypes dataTypeId,
const int toProcNo,
const int tag,
const int communicator,
UPstream::Request * req = nullptr,
const UPstream::sendModes sendMode = UPstream::sendModes::normal )
staticprotected

Send buffer contents of specified data type to given processor.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller.
Returns
True on success (or Fatal)
Parameters
[out]reqrequest information (for non-blocking)
sendModeoptional send mode (normal | sync)

Definition at line 35 of file UOPstreamWrite.C.

References commsType(), and NotImplemented.

Here is the call graph for this function:

◆ mpi_receive()

std::streamsize mpi_receive ( const UPstream::commsTypes commsType,
void * buf,
std::streamsize count,
const UPstream::dataTypes dataTypeId,
const int fromProcNo,
const int tag,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

Receive buffer contents of specified data type from given processor.

Note
The method uses a void pointer and the required data type (as per MPI). This means it should almost never be called directly but always via a compile-time checked caller. The commsType will be ignored if UPstream::Request is specified.
Returns
number of elements read. May change in the future
Parameters
[out]reqrequest information (for non-blocking)

Definition at line 26 of file UIPstreamRead.C.

References commsType(), and NotImplemented.

Here is the call graph for this function:

◆ mpi_gather()

void mpi_gather ( const void * sendData,
void * recvData,
int count,
const UPstream::dataTypes dataTypeId,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

Receive identically-sized (contiguous) data from all ranks, placing the result on rank 0.

Includes internal parallel guard. For non-parallel, does not copy any data. If needed, this must be done by the caller.

Parameters
sendDataAll ranks: location of individual value to send
recvDataMaster: receive buffer with all values. Other ranks: ignored
countNumber of send/recv data per rank. Globally consistent!
[out]reqrequest information (for non-blocking)

Definition at line 25 of file UPstreamGatherScatter.C.

◆ mpi_scatter()

void mpi_scatter ( const void * sendData,
void * recvData,
int count,
const UPstream::dataTypes dataTypeId,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

Send identically-sized (contiguous) data from rank 0 to all other ranks.

Includes internal parallel guard.

Parameters
sendDataMaster: send buffer with all values. Other ranks: ignored
recvDataAll ranks: location to receive individual value
countNumber of send/recv data per rank. Globally consistent!
[out]reqrequest information (for non-blocking)

Definition at line 38 of file UPstreamGatherScatter.C.

◆ mpi_allgather()

void mpi_allgather ( void * allData,
int count,
const UPstream::dataTypes dataTypeId,
const int communicator,
UPstream::Request * req = nullptr )
staticprotected

Gather/scatter identically-sized data.

Send data from proc slot, receive into all slots

Parameters
allDataAll ranks: the base of the data locations
countNumber of send/recv data per rank. Globally consistent!
[out]reqrequest information (for non-blocking)

Definition at line 51 of file UPstreamGatherScatter.C.

◆ mpi_gatherv()

void mpi_gatherv ( const void * sendData,
int sendCount,
void * recvData,
const UList< int > & recvCounts,
const UList< int > & recvOffsets,
const UPstream::dataTypes dataTypeId,
const int communicator )
staticprotected

Receive variable length data from all ranks, placing the result on rank 0. (caution: known to scale poorly).

Parameters
sendCountIgnored on master if recvCount[0] == 0
recvDataIgnored on non-root rank
recvCountsIgnored on non-root rank
recvOffsetsIgnored on non-root rank

Definition at line 65 of file UPstreamGatherScatter.C.

◆ mpi_scatterv()

void mpi_scatterv ( const void * sendData,
const UList< int > & sendCounts,
const UList< int > & sendOffsets,
void * recvData,
int recvCount,
const UPstream::dataTypes dataTypeId,
const int communicator )
staticprotected

Send variable length data from rank 0 to all ranks. (caution: known to scale poorly).

Parameters
sendDataIgnored on non-root rank
sendCountsIgnored on non-root rank
sendOffsetsIgnored on non-root rank

Definition at line 79 of file UPstreamGatherScatter.C.

◆ ClassName()

ClassName ( "UPstream" )

Declare name of the class and its debug switch.

◆ usingTopoControl()

bool usingTopoControl ( UPstream::topoControls ctrl)
inlinestaticnoexcept

Test for selection of given topology-aware routine.

Definition at line 1014 of file UPstream.H.

References topologyControl_.

◆ commGlobal()

constexpr int commGlobal ( )
inlinestaticconstexprnoexcept

Communicator for all ranks, irrespective of any local worlds.

This value never changes during a simulation.

Definition at line 1081 of file UPstream.H.

References Foam::noexcept.

Referenced by multiWorldConnections::createComms(), fileOperation::getManagedComm(), myWorld(), myWorldID(), and syncObjects::sync().

Here is the caller graph for this function:

◆ commSelf()

constexpr int commSelf ( )
inlinestaticconstexprnoexcept

Communicator within the current rank only.

This value never changes during a simulation.

Definition at line 1088 of file UPstream.H.

References Foam::noexcept.

Referenced by Foam::getCommPattern(), fileOperation::getManagedComm(), UIPstream::read(), and UOPstreamBase::UOPstreamBase().

Here is the caller graph for this function:

◆ commConstWorld()

int commConstWorld ( )
inlinestaticnoexcept

Communicator for all ranks (respecting any local worlds).

This value never changes after startup. Unlike the commWorld() which can be temporarily overriden.

Definition at line 1096 of file UPstream.H.

References Foam::noexcept.

◆ commWorld() [1/2]

◆ commWorld() [2/2]

label commWorld ( const label communicator)
inlinestaticnoexcept

Set world communicator. Negative values are a no-op.

Returns
old world communicator index

Definition at line 1108 of file UPstream.H.

References worldComm.

◆ commWarn()

◆ nComms()

label nComms ( )
inlinestaticnoexcept

Number of currently defined communicators.

Definition at line 1132 of file UPstream.H.

References Foam::noexcept.

◆ printCommTree()

void printCommTree ( int communicator,
bool linear = false )
static

Debugging: print the communication tree.

Definition at line 736 of file UPstream.C.

References Foam::Info, master(), and whichCommunication().

Here is the call graph for this function:

◆ commInterNode()

int commInterNode ( )
inlinestaticnoexcept

Communicator between nodes/hosts (respects any local worlds).

Definition at line 1145 of file UPstream.H.

References Foam::noexcept.

Referenced by commInterHost().

Here is the caller graph for this function:

◆ commLocalNode()

int commLocalNode ( )
inlinestaticnoexcept

Communicator within the node/host (respects any local worlds).

Definition at line 1153 of file UPstream.H.

References Foam::noexcept.

Referenced by commIntraHost().

Here is the caller graph for this function:

◆ hasNodeCommunicators()

bool hasNodeCommunicators ( )
inlinestaticnoexcept

Both inter-node and local-node communicators have been created.

Definition at line 1161 of file UPstream.H.

References Foam::noexcept.

◆ usingNodeComms()

bool usingNodeComms ( const int communicator)
static

True if node topology-aware routines have been enabled, it is running in parallel, the starting point is the world-communicator and it is not an odd corner case (ie, all processes on one node, all processes on different nodes).

Definition at line 751 of file UPstream.C.

References nodeCommsControl_, and nodeCommsMin_.

Referenced by printNodeCommsControl(), and UPstream::commsStruct::reset().

Here is the caller graph for this function:

◆ newCommunicator() [1/2]

Foam::label newCommunicator ( const label parent,
const labelRange & subRanks,
const bool withComponents = true )
static

Create new communicator with sub-ranks on the parent communicator.

Parameters
parentThe parent communicator
subRanksThe contiguous sub-ranks of parent to use
withComponentsCall allocateCommunicatorComponents()

Definition at line 271 of file UPstream.C.

References IntRange< IntType >::contains(), IntRange< IntType >::empty(), Foam::endl(), masterNo(), Foam::nl, parRun(), Foam::Perr, IntRange< IntType >::size(), and IntRange< IntType >::start().

Referenced by eagerGAMGProcAgglomeration::agglomerate(), manualGAMGProcAgglomeration::agglomerate(), masterCoarsestGAMGProcAgglomeration::agglomerate(), procFacesGAMGProcAgglomeration::agglomerate(), allocateCommunicator(), allocateCommunicator(), UPstream::communicator::communicator(), UPstream::communicator::communicator(), Foam::getCommPattern(), UPstream::communicator::reset(), and UPstream::communicator::reset().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ newCommunicator() [2/2]

Foam::label newCommunicator ( const label parent,
const labelUList & subRanks,
const bool withComponents = true )
static

Create new communicator with sub-ranks on the parent communicator.

Parameters
parentThe parent communicator
subRanksThe sub-ranks of parent to use (ignore negative values)
withComponentsCall allocateCommunicatorComponents()

Definition at line 321 of file UPstream.C.

References UList< T >::contains(), UList< T >::empty(), Foam::endl(), Foam::flatOutput(), masterNo(), Foam::nl, parRun(), Foam::Perr, and UList< T >::size().

Here is the call graph for this function:

◆ dupCommunicator()

Foam::label dupCommunicator ( const label parent)
static

Duplicate the parent communicator.

Always calls dupCommunicatorComponents() internally

Parameters
parentThe parent communicator

Definition at line 400 of file UPstream.C.

References Foam::endl(), Foam::exit(), Foam::FatalError, FatalErrorInFunction, FOAM_UNLIKELY, parRun(), and Foam::Perr.

Referenced by UPstream::communicator::duplicate().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ splitCommunicator()

Foam::label splitCommunicator ( const label parent,
const int colour,
const bool two_step = true )
static

Allocate a new communicator by splitting the parent communicator on the given colour.

Always calls splitCommunicatorComponents() internally

Parameters
parentThe parent communicator
colourThe colouring to select which ranks to include. Negative values correspond to 'ignore'
two_stepUse MPI_Allgather+MPI_Comm_create_group vs MPI_Comm_split

Definition at line 438 of file UPstream.C.

References Foam::endl(), Foam::exit(), Foam::FatalError, FatalErrorInFunction, FOAM_UNLIKELY, parRun(), and Foam::Perr.

Referenced by UPstream::communicator::split().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ freeCommunicator()

◆ baseProcNo()

int baseProcNo ( label comm,
int procID )
static

Return physical processor number (i.e. processor number in worldComm) given communicator and processor.

Definition at line 657 of file UPstream.C.

References parent(), and procID().

Referenced by procNo().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ procNo() [1/2]

Foam::label procNo ( const label comm,
const int baseProcID )
static

Return processor number in communicator (given physical processor number) (= reverse of baseProcNo).

Definition at line 670 of file UPstream.C.

References parent(), procID(), and procNo().

Referenced by procNo(), and procNo().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ procNo() [2/2]

Foam::label procNo ( const label comm,
const label currentComm,
const int currentProcID )
static

Return processor number in communicator (given processor number and communicator).

Definition at line 686 of file UPstream.C.

References baseProcNo(), and procNo().

Here is the call graph for this function:

◆ addValidParOptions()

void addValidParOptions ( HashTable< string > & validParOptions)
static

Add the valid option this type of communications library adds/requires on the command line.

Definition at line 26 of file UPstream.C.

◆ init()

bool init ( int & argc,
char **& argv,
const bool needsThread )
static

Initialisation function called from main.

Spawns sub-processes and initialises inter-communication

Definition at line 40 of file UPstream.C.

References Foam::endl(), Foam::exit(), Foam::FatalError, and FatalErrorInFunction.

Referenced by UPstream::commsStructList::init(), and ParRunControl::runPar().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ initNull()

bool initNull ( )
static

Special purpose initialisation function.

Performs a basic MPI_Init without any other setup. Only used for applications that need MPI communication when OpenFOAM is running in a non-parallel mode.

Note
Behaves as a no-op if MPI has already been initialized. Fatal if MPI has already been finalized.

Definition at line 30 of file UPstream.C.

References Foam::endl(), and WarningInFunction.

Referenced by zoltanRenumber::renumber().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ barrier()

void barrier ( const int communicator,
UPstream::Request * req = nullptr )
static

Impose a synchronisation barrier (optionally non-blocking).

Definition at line 106 of file UPstream.C.

◆ send_done()

void send_done ( const int toProcNo,
const int communicator,
const int tag = UPstream::msgType()+1970 )
static

Impose a point-to-point synchronisation barrier by sending a zero-byte "done" message to given rank.

A no-op for non-parallel

Parameters
toProcNoThe destination rank
communicatorThe communicator index (eg, UPstream::worldComm)
tagMessage tag (must match on receiving side)

Definition at line 110 of file UPstream.C.

◆ wait_done()

int wait_done ( const int fromProcNo,
const int communicator,
const int tag = UPstream::msgType()+1970 )
static

Impose a point-to-point synchronisation barrier by receiving a zero-byte "done" message from given rank.

A no-op for non-parallel

Returns
the source rank (useful for ANY_SOURCE messages) or -1 for non-parallel
Parameters
fromProcNoThe source rank (negative == ANY_SOURCE)
communicatorThe communicator index (eg, UPstream::worldComm)
tagMessage tag (must match on sending side)

Definition at line 119 of file UPstream.C.

◆ probeMessage()

std::pair< int, int64_t > probeMessage ( const UPstream::commsTypes commsType,
const int fromProcNo,
const int tag = UPstream::msgType(),
const int communicator = worldComm )
static

Probe for an incoming message.

Parameters
commsTypeNon-blocking or not
fromProcNoThe source rank (negative == ANY_SOURCE)
tagThe source message tag
communicatorThe communicator index
Returns
source rank and message size (bytes) and (-1, 0) on failure

Definition at line 131 of file UPstream.C.

References commsType().

Referenced by decomposedBlockData::readBlocks(), and decomposedBlockData::readBlocks().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ printNodeCommsControl()

void printNodeCommsControl ( Ostream & os)
static

Report the node-communication settings.

Definition at line 56 of file UPstream.C.

References nodeCommsControl_, nodeCommsMin_, nProcs(), numNodes(), os(), usingNodeComms(), and worldComm.

Referenced by argList::parse().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ printTopoControl()

void printTopoControl ( Ostream & os)
static

Report the topology routines settings.

Definition at line 100 of file UPstream.C.

References broadcast(), gather(), os(), PrintControl, Foam::reduce(), and topologyControl_.

Referenced by argList::parse().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ nRequests()

Foam::label nRequests ( )
staticnoexcept

Number of outstanding requests (on the internal list of requests).

Definition at line 54 of file UPstreamRequest.C.

References Foam::noexcept.

Referenced by volPointInterpolationAdjoint::addSeparated(), GAMGAgglomeration::agglomerateLduAddressing(), LduMatrix< Type, DType, LUType >::Amul(), lduMatrix::Amul(), motionSmootherAlgo::correctBoundaryConditions(), metisLikeDecomp::decomposeGeneral(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluate(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluate_if(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluateLocal(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluateSelected(), mapDistributeBase::exchangeMasks(), GAMGProcAgglomeration::globalCellCells(), lduPrimitiveMesh::globalCellCells(), calculatedProcessorFvPatchField< Type >::initEvaluate(), processorFaPatchField< Type >::initEvaluate(), processorFvPatchField< Type >::initEvaluate(), calculatedProcessorFvPatchField< Type >::initInterfaceMatrixUpdate(), calculatedProcessorGAMGInterfaceField::initInterfaceMatrixUpdate(), lduCalculatedProcessorField< Type >::initInterfaceMatrixUpdate(), processorFaPatchField< Type >::initInterfaceMatrixUpdate(), processorFaPatchField< Type >::initInterfaceMatrixUpdate(), processorFvPatchField< Type >::initInterfaceMatrixUpdate(), processorFvPatchField< Type >::initInterfaceMatrixUpdate(), processorGAMGInterfaceField::initInterfaceMatrixUpdate(), faceAreaPairGAMGAgglomeration::movePoints(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), LduMatrix< Type, DType, LUType >::residual(), lduMatrix::residual(), mapDistributeBase::send(), motionSmootherAlgo::setDisplacementPatchFields(), GaussSeidelSmoother::smooth(), nonBlockingGaussSeidelSmoother::smooth(), symGaussSeidelSmoother::smooth(), TGaussSeidelSmoother< Type, DType, LUType >::smooth(), faMatrix< Type >::solve(), LUscalarMatrix::solve(), fvMatrix< Type >::solveSegregated(), syncTools::syncBoundaryFaceList(), syncTools::syncFaceList(), LduMatrix< Type, DType, LUType >::Tmul(), lduMatrix::Tmul(), distributedDILUPreconditioner::updateMatrixInterfaces(), OFstreamCollator::write(), and decomposedBlockData::writeBlocks().

Here is the caller graph for this function:

◆ resetRequests()

void resetRequests ( const label n)
static

Truncate outstanding requests to given length, which is expected to be in the range [0 to nRequests()].

A no-op for out-of-range values.

Definition at line 56 of file UPstreamRequest.C.

References n.

◆ addRequest()

void addRequest ( UPstream::Request & req)
static

Transfer the (wrapped) MPI request to the internal global list and invalidate the parameter (ignores null requests).

A no-op for non-parallel

Definition at line 58 of file UPstreamRequest.C.

◆ cancelRequest() [1/2]

void cancelRequest ( const label i)
static

Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_free().

A no-op if parRun() == false if there are no pending requests, or if the index is out-of-range (0 to nRequests)

Definition at line 60 of file UPstreamRequest.C.

Referenced by UPstream::Request::cancel().

Here is the caller graph for this function:

◆ cancelRequest() [2/2]

void cancelRequest ( UPstream::Request & req)
static

Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_free().

A no-op for non-parallel

Definition at line 61 of file UPstreamRequest.C.

◆ cancelRequests()

void cancelRequests ( UList< UPstream::Request > & requests)
static

Non-blocking comms: cancel and free outstanding requests. Corresponds to MPI_Cancel() + MPI_Request_free().

A no-op if parRun() == false or list is empty

Definition at line 62 of file UPstreamRequest.C.

Referenced by distributedDILUPreconditioner::wait().

Here is the caller graph for this function:

◆ removeRequests()

void removeRequests ( label pos,
label len = -1 )
static

Non-blocking comms: cancel/free outstanding requests (from position onwards) and remove from internal list of requests. Corresponds to MPI_Cancel() + MPI_Request_free().

A no-op if parRun() == false, if the position is out-of-range [0 to nRequests()], or the internal list of requests is empty.

Parameters
posstarting position within the internal list of requests
lenlength of slice to remove (negative = until the end)

Definition at line 64 of file UPstreamRequest.C.

References Foam::pos().

Here is the call graph for this function:

◆ freeRequest()

void freeRequest ( UPstream::Request & req)
static

Non-blocking comms: free outstanding request. Corresponds to MPI_Request_free().

A no-op if parRun() == false

Definition at line 66 of file UPstreamRequest.C.

Referenced by UPstream::Request::free().

Here is the caller graph for this function:

◆ freeRequests()

void freeRequests ( UList< UPstream::Request > & requests)
static

Non-blocking comms: free outstanding requests. Corresponds to MPI_Request_free().

A no-op if parRun() == false or list is empty

Definition at line 67 of file UPstreamRequest.C.

◆ waitRequests() [1/3]

void waitRequests ( label pos,
label len = -1 )
static

Wait until all requests (from position onwards) have finished. Corresponds to MPI_Waitall().

A no-op if parRun() == false, if the position is out-of-range [0 to nRequests()], or the internal list of requests is empty.

If checking a trailing portion of the list, it will also trim the list of outstanding requests as a side-effect. This is a feature (not a bug) to conveniently manange the list.

Parameters
posstarting position within the internal list of requests
lenlength of slice to check (negative = until the end)

Definition at line 69 of file UPstreamRequest.C.

References Foam::pos().

Referenced by LduMatrix< Type, DType, LUType >::updateMatrixInterfaces(), and waitRequests().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ waitRequests() [2/3]

void waitRequests ( UList< UPstream::Request > & requests)
static

Wait until all requests have finished. Corresponds to MPI_Waitall().

A no-op if parRun() == false, or the list is empty.

Definition at line 70 of file UPstreamRequest.C.

◆ waitAnyRequest() [1/2]

bool waitAnyRequest ( label pos,
label len = -1 )
static

Wait until any request (from position onwards) has finished. Corresponds to MPI_Waitany().

A no-op and returns false if parRun() == false, if the position is out-of-range [0 to nRequests()], or the internal list of requests is empty.

Returns
true if any pending request completed.
false if all requests have already been handled.
Parameters
posstarting position within the internal list of requests
lenlength of slice to check (negative = until the end)

Definition at line 72 of file UPstreamRequest.C.

References Foam::pos().

Here is the call graph for this function:

◆ waitSomeRequests() [1/2]

bool waitSomeRequests ( label pos,
label len = -1,
DynamicList< int > * indices = nullptr )
static

Wait until some requests (from position onwards) have finished. Corresponds to MPI_Waitsome().

A no-op and returns false if parRun() == false, if the position is out-of-range [0 to nRequests], or the internal list of requests is empty.

Returns
true if some pending requests completed.
false if all requests have already been handled
Parameters
posstarting position within the internal list of requests
lenlength of slice to check (negative = until the end)
[out]indicesthe completed request indices relative to the starting position. This is an optional parameter that can be used to recover the indices or simply to avoid reallocations when calling within a loop.

Definition at line 77 of file UPstreamRequest.C.

References DynamicList< T, SizeMin >::clear(), and Foam::pos().

Referenced by mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), LduMatrix< Type, DType, LUType >::updateMatrixInterfaces(), and lduMatrix::updateMatrixInterfaces().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ waitSomeRequests() [2/2]

bool waitSomeRequests ( UList< UPstream::Request > & requests,
DynamicList< int > * indices = nullptr )
static

Wait until some requests have finished. Corresponds to MPI_Waitsome().

A no-op and returns false if parRun() == false, the list is empty, or if all the requests have already been handled.

Parameters
requeststhe requests
[out]indicesthe completed request indices relative to the starting position. This is an optional parameter that can be used to recover the indices or simply to avoid reallocations when calling within a loop.

Definition at line 88 of file UPstreamRequest.C.

References DynamicList< T, SizeMin >::clear().

Here is the call graph for this function:

◆ waitAnyRequest() [2/2]

int waitAnyRequest ( UList< UPstream::Request > & requests)
static

Wait until any request has finished and return its index. Corresponds to MPI_Waitany().

Returns -1 if parRun() == false, or the list is empty, or if all the requests have already been handled

Definition at line 98 of file UPstreamRequest.C.

◆ waitRequest() [1/2]

void waitRequest ( const label i)
static

Wait until request i has finished. Corresponds to MPI_Wait().

A no-op if parRun() == false, if there are no pending requests, or if the index is out-of-range (0 to nRequests)

void Foam::UPstream::waitRequests ( UPstream::Request& req0, UPstream::Request& req1 ) { // No-op for non-parallel if (!UPstream::parRun()) { return; }

int count = 0; MPI_Request mpiRequests[2];

mpiRequests[count] = PstreamUtils::Cast::to_mpi(req0); if (MPI_REQUEST_NULL != mpiRequests[count]) { ++count; }

mpiRequests[count] = PstreamUtils::Cast::to_mpi(req1); if (MPI_REQUEST_NULL != mpiRequests[count]) { ++count; }

// Flag in advance as being handled req0 = UPstream::Request(MPI_REQUEST_NULL); req1 = UPstream::Request(MPI_REQUEST_NULL);

if (!count) { return; }

profilingPstream::beginTiming();

// On success: sets each request to MPI_REQUEST_NULL if (count == 1) { // On success: sets request to MPI_REQUEST_NULL if (MPI_Wait(mpiRequests, MPI_STATUS_IGNORE)) { FatalErrorInFunction << "MPI_Wait returned with error" << Foam::abort(FatalError); } } else // (count > 1) { // On success: sets each request to MPI_REQUEST_NULL if (MPI_Waitall(count, mpiRequests, MPI_STATUSES_IGNORE)) { FatalErrorInFunction << "MPI_Waitall returned with error" << Foam::abort(FatalError); } }

profilingPstream::addWaitTime(); }

Definition at line 103 of file UPstreamRequest.C.

Referenced by calculatedProcessorFvPatchField< Type >::evaluate(), processorFaPatchField< Type >::evaluate(), processorFvPatchField< Type >::evaluate(), calculatedProcessorFvPatchField< Type >::updateInterfaceMatrix(), calculatedProcessorGAMGInterfaceField::updateInterfaceMatrix(), lduCalculatedProcessorField< Type >::updateInterfaceMatrix(), processorFaPatchField< Type >::updateInterfaceMatrix(), processorFaPatchField< Type >::updateInterfaceMatrix(), processorFvPatchField< Type >::updateInterfaceMatrix(), processorFvPatchField< Type >::updateInterfaceMatrix(), processorGAMGInterfaceField::updateInterfaceMatrix(), UPstream::Request::wait(), and decomposedBlockData::writeBlocks().

Here is the caller graph for this function:

◆ waitRequest() [2/2]

void waitRequest ( UPstream::Request & req)
static

Wait until specified request has finished. Corresponds to MPI_Wait().

A no-op if parRun() == false or for a null-request

Definition at line 104 of file UPstreamRequest.C.

◆ activeRequest() [1/2]

bool activeRequest ( const label i)
static

Is request i active (!= MPI_REQUEST_NULL)?

False if there are no pending requests, or if the index is out-of-range (0 to nRequests)

Definition at line 106 of file UPstreamRequest.C.

◆ activeRequest() [2/2]

bool activeRequest ( const UPstream::Request & req)
static

Is request active (!= MPI_REQUEST_NULL)?

Definition at line 107 of file UPstreamRequest.C.

◆ finishedRequest() [1/2]

◆ finishedRequest() [2/2]

bool finishedRequest ( UPstream::Request & req)
static

Non-blocking comms: has request finished? Corresponds to MPI_Test().

A no-op and returns true if parRun() == false or for a null-request

Definition at line 110 of file UPstreamRequest.C.

◆ finishedRequests() [1/2]

bool finishedRequests ( label pos,
label len = -1 )
static

Non-blocking comms: have all requests (from position onwards) finished? Corresponds to MPI_Testall().

A no-op and returns true if parRun() == false, if there are no pending requests, or if the index is out-of-range (0 to nRequests) or the addressed range is empty etc.

Parameters
posstarting position within the internal list of requests
lenlength of slice to check (negative = until the end)

Definition at line 112 of file UPstreamRequest.C.

References Foam::pos().

Referenced by cyclicACMIFvPatchField< Type >::ready(), cyclicACMIGAMGInterfaceField::ready(), cyclicAMIFvPatchField< Type >::ready(), and cyclicAMIGAMGInterfaceField::ready().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ finishedRequests() [2/2]

bool finishedRequests ( UList< UPstream::Request > & requests)
static

Non-blocking comms: have all requests finished? Corresponds to MPI_Testall().

A no-op and returns true if parRun() == false or list is empty

Definition at line 118 of file UPstreamRequest.C.

◆ finishedRequestPair()

bool finishedRequestPair ( label & req0,
label & req1 )
static

Non-blocking comms: have both requests finished? Corresponds to pair of MPI_Test().

A no-op and returns true if parRun() == false, if there are no pending requests, or if the indices are out-of-range (0 to nRequests) Each finished request parameter is set to -1 (ie, done).

Definition at line 124 of file UPstreamRequest.C.

Referenced by calculatedProcessorFvPatchField< Type >::all_ready(), and lduCalculatedProcessorField< Type >::all_ready().

Here is the caller graph for this function:

◆ waitRequestPair()

void waitRequestPair ( label & req0,
label & req1 )
static

Non-blocking comms: wait for both requests to finish. Corresponds to pair of MPI_Wait().

A no-op if parRun() == false, if there are no pending requests, or if the indices are out-of-range (0 to nRequests) Each finished request parameter is set to -1 (ie, done).

Definition at line 132 of file UPstreamRequest.C.

◆ parRun() [1/2]

bool parRun ( const bool on)
inlinestaticnoexcept

Set as parallel run on/off.

Returns
the previous value

Definition at line 1669 of file UPstream.H.

References Foam::noexcept, and parRun().

Referenced by snappyLayerDriver::addLayers(), snappyLayerDriver::addLayersSinglePass(), masterUncollatedFileOperation::addWatch(), regIOobject::addWatch(), unwatchedIOdictionary::addWatch(), decompositionGAMGAgglomeration::agglomerate(), meshRefinement::balance(), AMIInterpolation::calcDistribution(), addPatchCellLayer::calcExtrudeInfo(), processorCyclicPolyPatch::calcGeometry(), processorFaPatch::calcGeometry(), processorPolyPatch::calcGeometry(), GlobalOffset< label >::calculate(), GlobalOffset< label >::calculate(), surfaceNoise::calculate(), call_window_allocate(), call_window_create(), FaceCellWave< Type, TrackingData >::cellToFace(), OppositeFaceCellWave< Type, TrackingData >::cellToFace(), designVariablesUpdate::checkConvergence(), faBoundaryMesh::checkParallelSync(), polyBoundaryMesh::checkParallelSync(), ZoneMesh< ZoneType, MeshType >::checkParallelSync(), AMIInterpolation::checkSymmetricWeights(), regionSplit::ClassName(), fieldValue::combineFields(), limitTurbulenceViscosity::correct(), meshRefinement::countEdgeFaces(), cyclicACMIFvsPatchField< Type >::coupled(), decompositionMethod::decompose(), simpleGeomDecomp::decompose(), metisLikeDecomp::decomposeGeneral(), conformalVoronoiMesh::decomposition(), processorFaPatch::delta(), processorFvPatch::delta(), masterUncollatedFileOperation::dirPath(), distributedTriSurfaceMesh::distribute(), fvMeshDistribute::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), refinementHistory::distribute(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), faMeshBoundaryHalo::distributeSparse(), snappyLayerDriver::doLayers(), snappyRefineDriver::doRefine(), dupCommunicator(), PointEdgeWave< Type, TrackingData >::edgeToPoint(), ensightSurfaceReader::ensightSurfaceReader(), calculatedProcessorFvPatchField< Type >::evaluate(), processorFaPatchField< Type >::evaluate(), processorFvPatchField< Type >::evaluate(), mapDistributeBase::exchangeMasks(), Foam::exitNow(), masterUncollatedFileOperation::filePath(), polyMesh::findCell(), distributedTriSurfaceMesh::findLine(), distributedTriSurfaceMesh::findLineAll(), distributedTriSurfaceMesh::findLineAny(), distributedTriSurfaceMesh::findNearest(), distributedTriSurfaceMesh::findNearest(), masterUncollatedFileOperation::findWatch(), freeCommunicator(), PatchTools::gatherAndMerge(), PatchTools::gatherAndMerge(), GenericPatchWriter< indirectPrimitivePatch >::GenericPatchWriter(), GenericPatchWriter< indirectPrimitivePatch >::GenericPatchWriter(), ensightSurfaceReader::geometry(), Foam::getCommPattern(), zoneDistribute::getDatafromOtherProc(), distributedTriSurfaceMesh::getField(), masterUncollatedFileOperation::getFile(), distributedTriSurfaceMesh::getNormal(), distributedTriSurfaceMesh::getRegion(), exprResult::getResult(), masterUncollatedFileOperation::getState(), distributedTriSurfaceMesh::getVolumeType(), GAMGProcAgglomeration::globalCellCells(), faMeshBoundaryHalo::haloSize(), InflationInjection< CloudType >::InflationInjection(), faMesh::init(), calculatedProcessorFvPatchField< Type >::initEvaluate(), processorFaPatchField< Type >::initEvaluate(), processorFvPatchField< Type >::initEvaluate(), processorFaPatch::initGeometry(), processorPolyPatch::initGeometry(), InjectedParticleDistributionInjection< CloudType >::initialise(), InjectedParticleInjection< CloudType >::initialise(), extractEulerianParticles::initialiseBins(), processorPolyPatch::initOrder(), processorCyclicPointPatchField< Type >::initSwapAddSeparated(), processorFaPatch::initUpdateMesh(), processorPolyPatch::initUpdateMesh(), internalMeshWriter::internalMeshWriter(), internalMeshWriter::internalMeshWriter(), internalWriter::internalWriter(), internalWriter::internalWriter(), fileOperation::isIOrank(), isoAdvection::isoAdvection(), FaceCellWave< Type, TrackingData >::iterate(), PointEdgeWave< Type, TrackingData >::iterate(), lagrangianWriter::lagrangianWriter(), lagrangianWriter::lagrangianWriter(), lineWriter::lineWriter(), lineWriter::lineWriter(), masterUncollatedFileOperation::localObjectPath(), fileOperation::lookupAndCacheProcessorsPath(), LUscalarMatrix::LUscalarMatrix(), patchMeanVelocityForce::magUbarAve(), processorFaPatch::makeCorrectionVectors(), processorFaPatch::makeDeltaCoeffs(), processorFaPatch::makeLPN(), processorFaPatch::makeNonGlobalPatchPoints(), processorFaPatch::makeWeights(), processorFvPatch::makeWeights(), error::master(), mergedSurf::merge(), surfaceWriter::merge(), surfaceWriter::mergeFieldTemplate(), polyBoundaryMesh::neighbourEdges(), processorTopology::New(), newCommunicator(), newCommunicator(), masterUncollatedFileOperation::NewIFstream(), processorPolyPatch::order(), fieldMinMax::output(), InflationInjection< CloudType >::parcelsToInject(), parRun(), patchMeshWriter::patchMeshWriter(), patchMeshWriter::patchMeshWriter(), patchWriter::patchWriter(), patchWriter::patchWriter(), polyWriter::polyWriter(), polyWriter::polyWriter(), RecycleInteraction< CloudType >::postEvolve(), powerLawLopesdaCostaZone::powerLawLopesdaCostaZone(), meshRefinement::printMeshInfo(), processorTopology::procAdjacency(), singleDirectionUniformBin::processField(), uniformBin::processField(), collatedFileOperation::processorsDir(), triangulatedPatch::randomGlobalPoint(), masterUncollatedFileOperation::read(), uncollatedFileOperation::read(), surfaceNoise::read(), lumpedPointState::readData(), Time::readModifiedObjects(), masterUncollatedFileOperation::readStream(), surfaceNoise::readSurfaceData(), masterUncollatedFileOperation::removeWatch(), parProfiling::report(), AMIWeights::reportPatch(), faMeshBoundaryHalo::reset(), fvMeshSubset::reset(), GlobalOffset< label >::reset(), cyclicAMIPolyPatch::resetAMI(), mapDistributeBase::schedule(), Time::setControls(), foamReport::setStaticBuiltins(), masterUncollatedFileOperation::setUnmodified(), zoneDistribute::setUpCommforZone(), globalMeshData::sharedPoints(), shortestPathSet::shortestPathSet(), error::simpleExit(), LUscalarMatrix::solve(), TDACChemistryModel< psiReactionThermo, constGasHThermoPhysics >::solve(), splitCommunicator(), messageStream::stream(), surfaceWriter::surface(), surfaceNoise::surfaceAverage(), surfaceFieldWriter::surfaceFieldWriter(), surfaceFieldWriter::surfaceFieldWriter(), surfaceWriter::surfaceWriter(), surfaceWriter::surfaceWriter(), surfaceWriter::surfaceWriter(), processorCyclicPointPatchField< Type >::swapAddSeparated(), syncTools::swapBoundaryCellList(), syncTools::swapBoundaryCellPositions(), syncTools::swapBoundaryFaceList(), syncTools::swapBoundaryFacePositions(), syncTools::swapFaceList(), masterUncollatedFileOperation::sync(), syncObjects::sync(), syncTools::syncBoundaryFaceList(), syncTools::syncEdgeMap(), syncTools::syncFaceList(), syncTools::syncFaceList(), syncTools::syncFacePositions(), faMesh::syncGeom(), syncTools::syncPointMap(), triSurfaceMesh::triSurfaceMesh(), triSurfaceMesh::triSurfaceMesh(), abaqusWriter::TypeNameNoDebug(), boundaryDataWriter::TypeNameNoDebug(), debugWriter::TypeNameNoDebug(), ensightWriter::TypeNameNoDebug(), foamWriter::TypeNameNoDebug(), nastranWriter::TypeNameNoDebug(), nullWriter::TypeNameNoDebug(), proxyWriter::TypeNameNoDebug(), rawWriter::TypeNameNoDebug(), starcdWriter::TypeNameNoDebug(), vtkWriter::TypeNameNoDebug(), x3dWriter::TypeNameNoDebug(), fileOperation::uniformFile(), mapDistributeBase::unionCombineMasks(), turbulentDFSEMInletFvPatchVectorField::updateCoeffs(), calculatedProcessorFvPatchField< Type >::updateInterfaceMatrix(), faMesh::updateMesh(), processorFaPatch::updateMesh(), processorPolyPatch::updateMesh(), fileOperation::updateStates(), masterUncollatedFileOperation::updateStates(), dynamicCode::waitForFile(), energySpectrum::write(), viewFactorHeatFlux::write(), ParticleZoneInfo< CloudType >::write(), Foam::vtk::writeCellSetFaces(), meshToMeshMethod::writeConnectivity(), Foam::ensightOutput::writeFaceConnectivity(), Foam::ensightOutput::writeFaceConnectivity(), Foam::vtk::writeFaceSet(), AMIWeights::writeFileHeader(), fieldMinMax::writeFileHeader(), isoAdvection::writeIsoFaces(), collatedFileOperation::writeObject(), Foam::vtk::writePointSet(), surfaceNoise::writeSurfaceData(), topOVariablesBase::writeSurfaceFiles(), streamLineBase::writeToFile(), and Foam::vtk::writeTopoSet().

Here is the call graph for this function:

◆ parRun() [2/2]

bool & parRun ( )
inlinestaticnoexcept

◆ haveThreads()

bool haveThreads ( )
inlinestaticnoexcept

Have support for threads.

Definition at line 1686 of file UPstream.H.

References Foam::noexcept.

Referenced by OFstreamCollator::write().

Here is the caller graph for this function:

◆ masterNo()

◆ nProcs()

label nProcs ( const label communicator = worldComm)
inlinestatic

Number of ranks in parallel run (for given communicator). It is 1 for serial run.

Definition at line 1697 of file UPstream.H.

References worldComm.

Referenced by fvMeshPrimitiveLduAddressing::addAddressing(), surfaceZonesInfo::addCellZonesToMesh(), surfaceZonesInfo::addFaceZonesToMesh(), AMIInterpolation::agglomerate(), eagerGAMGProcAgglomeration::agglomerate(), manualGAMGProcAgglomeration::agglomerate(), masterCoarsestGAMGProcAgglomeration::agglomerate(), procFacesGAMGProcAgglomeration::agglomerate(), bitSet::allGather(), meshRefinement::balance(), faMesh::boundaryProcs(), faPatch::boundaryProcs(), faMesh::boundaryProcSizes(), faPatch::boundaryProcSizes(), mapDistributeBase::calcCompactAddressing(), mapDistributeBase::calcCompactAddressing(), AMIInterpolation::calcDistribution(), fieldMinMax::calcMinMaxFieldType(), surfaceNoise::calculate(), meshRefinement::checkCoupledFaceZones(), mappedPatchBase::collectSamples(), processorColour::colour(), fieldValue::combineFields(), sizeDistribution::combineFields(), GAMGAgglomeration::continueAgglomerating(), fvMeshDistribute::countCells(), box::createMap(), Foam::createReconstructMap(), cyclicAMIGAMGInterface::cyclicAMIGAMGInterface(), cyclicAMIGAMGInterface::cyclicAMIGAMGInterface(), meshRefinement::directionalRefineCandidates(), distributedTriSurfaceMesh::distribute(), fvMeshDistribute::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), distributedDILUPreconditioner::distributedDILUPreconditioner(), snappyLayerDriver::doLayers(), box::doRefineBoxes(), mapDistributeBase::exchangeAddressing(), mapDistributeBase::exchangeMasks(), extendedCentredCellToCellStencil::extendedCentredCellToCellStencil(), extendedCentredCellToFaceStencil::extendedCentredCellToFaceStencil(), extendedCentredFaceToCellStencil::extendedCentredFaceToCellStencil(), extendedUpwindCellToFaceStencil::extendedUpwindCellToFaceStencil(), extendedUpwindCellToFaceStencil::extendedUpwindCellToFaceStencil(), find_first(), distributedTriSurfaceMesh::findNearest(), mappedPatchBase::findSamples(), decomposedBlockData::gather(), lduPrimitiveMesh::gather(), decomposedBlockData::gatherProcData(), bitSet::gatherValues(), UPstream::commsStructList::get(), fileOperation::getGlobalHostIORanks(), Foam::getSelectedProcs(), distributedTriSurfaceMesh::getVolumeType(), UPstream::commsStructList::init(), InjectedParticleDistributionInjection< CloudType >::initialise(), InjectedParticleInjection< CloudType >::initialise(), viewFactor::initialise(), interNode_offsets(), distributedTriSurfaceMesh::localQueries(), fileOperation::lookupAndCacheProcessorsPath(), LUscalarMatrix::LUscalarMatrix(), mapDistributeBase::mapDistributeBase(), mapDistributeBase::mapDistributeBase(), mapDistributeBase::mapDistributeBase(), inverseDistance::markDonors(), trackingInverseDistance::markDonors(), masterCoarsestGAMGProcAgglomeration::masterCoarsestGAMGProcAgglomeration(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterUncollatedFileOperation(), masterUncollatedFileOperation::masterUncollatedFileOperation(), decompositionMethod::nDomains(), processorTopology::New(), masterUncollatedFileOperation::NewIFstream(), InflationInjection< CloudType >::parcelsToInject(), argList::parse(), ParticleZoneInfo< CloudType >::ParticleZoneInfo(), pointHistory::pointHistory(), RecycleInteraction< CloudType >::postEvolve(), powerLawLopesdaCostaZone::powerLawLopesdaCostaZone(), mapDistributeBase::printLayout(), meshRefinement::printMeshInfo(), printNodeCommsControl(), fileOperation::printRanks(), processorTopology::procAdjacency(), masterUncollatedFileOperation::read(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), masterUncollatedFileOperation::readHeader(), masterUncollatedFileOperation::readStream(), mapDistributeBase::receive(), Foam::PstreamDetail::reduce_offsetRange(), Foam::PstreamDetail::reduce_offsetRanges(), Foam::reduceOffsets(), meshRefinement::refineCandidates(), Foam::regionSum(), Foam::regionSum(), meshRefinement::removeGapCells(), parProfiling::report(), profilingPstream::report(), faMeshBoundaryHalo::reset(), fvMeshSubset::reset(), mapDistributeBase::schedule(), mapDistributeBase::send(), Time::setControls(), foamReport::setStaticBuiltins(), zoneDistribute::setUpCommforZone(), ParSortableList< Type >::sort(), mappedPatchFieldBase< scalar >::storeField(), fileOperation::subRanks(), inverseDistance::update(), trackingInverseDistance::update(), oversetFvMeshBase::updateAddressing(), turbulentDFSEMInletFvPatchVectorField::updateCoeffs(), globalMeshData::updateMesh(), patchInjectionBase::updateMesh(), FacePostProcessing< CloudType >::write(), OFstreamCollator::write(), ParticleHistogram< CloudType >::write(), ParticlePostProcessing< CloudType >::write(), ParticleZoneInfo< CloudType >::write(), decomposedBlockData::writeBlocks(), externalCoupled::writeGeometry(), isoAdvection::writeIsoFaces(), caseInfo::writeMeta(), and streamLineBase::writeToFile().

◆ myProcNo()

int myProcNo ( const label communicator = worldComm)
inlinestatic

Rank of this process in the communicator (starting from masterNo()). Negative if the process is not a rank in the communicator.

Definition at line 1706 of file UPstream.H.

References worldComm.

Referenced by surfaceZonesInfo::addCellZonesToMesh(), surfaceZonesInfo::addFaceZonesToMesh(), AMIInterpolation::agglomerate(), eagerGAMGProcAgglomeration::agglomerate(), GAMGProcAgglomeration::agglomerate(), manualGAMGProcAgglomeration::agglomerate(), masterCoarsestGAMGProcAgglomeration::agglomerate(), faMesh::boundaryProcs(), faPatch::boundaryProcs(), faMesh::boundaryProcSizes(), faPatch::boundaryProcSizes(), SprayParcel< ParcelType >::calcBreakup(), Foam::calcCellCellsImpl(), mapDistributeBase::calcCompactAddressing(), mapDistributeBase::calcCompactAddressing(), fieldMinMax::calcMinMaxFieldType(), globalIndex::calcOffset(), AMIInterpolation::calculate(), faceAreaWeightAMI2D::calculate(), faceAreaWeightAMI::calculate(), viewFactor::calculate(), GAMGAgglomeration::calculateRegionMaster(), meshRefinement::checkCoupledFaceZones(), mappedPatchBase::collectSamples(), processorColour::colour(), fieldValue::combineFields(), sizeDistribution::combineFields(), wallDistAddressing::correct(), box::createMap(), Foam::createReconstructMap(), cyclicACMIGAMGInterface::cyclicACMIGAMGInterface(), cyclicAMIGAMGInterface::cyclicAMIGAMGInterface(), cyclicAMIGAMGInterface::cyclicAMIGAMGInterface(), noDecomp::decompose(), noDecomp::decompose(), noDecomp::decompose(), noDecomp::decompose(), snappyLayerDriver::determineSidePatches(), distributedTriSurfaceMesh::distribute(), fvMeshDistribute::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), distributedDILUPreconditioner::distributedDILUPreconditioner(), box::doRefineBoxes(), globalIndexAndTransform::encode(), mapDistributeBase::exchangeAddressing(), mapDistributeBase::exchangeMasks(), processorField::execute(), find_first(), find_last(), InjectionModel< CloudType >::findCellAtPosition(), internalFieldProbe::findElements(), patchFieldProbe::findElements(), mappedPatchBase::findLocalSamples(), distributedTriSurfaceMesh::findNearest(), distributedTriSurfaceMesh::findNearest(), mappedPatchBase::findSamples(), decomposedBlockData::gatherProcData(), get_edge_list(), get_vertex_list(), zoneDistribute::getDatafromOtherProc(), fileOperation::getGlobalHostIORanks(), meshRefinement::getMasterEdges(), meshRefinement::getMasterPoints(), Foam::getSelectedProcs(), GAMGProcAgglomeration::globalCellCells(), lduPrimitiveMesh::globalCellCells(), inverseDistance::holeExtrapolationStencil(), InjectedParticleDistributionInjection< CloudType >::initialise(), InjectedParticleInjection< CloudType >::initialise(), viewFactor::initialise(), globalIndex::inplaceToGlobal(), cellShapeControlMesh::insert(), cellShapeControlMesh::insertFar(), globalIndex::isLocal(), lduPrimitiveMesh::lduPrimitiveMesh(), globalIndex::localEnd(), localNode_parentProcs(), masterUncollatedFileOperation::localObjectPath(), globalIndex::localSize(), globalIndex::localStart(), mapDistribute::mapDistribute(), mapDistribute::mapDistribute(), mapDistributeBase::mapDistributeBase(), mapDistributeBase::mapDistributeBase(), mapDistributeBase::mapDistributeBase(), inverseDistance::markDonors(), trackingInverseDistance::markDonors(), inverseDistance::markPatchesAsHoles(), trackingInverseDistance::markPatchesAsHoles(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterUncollatedFileOperation(), masterUncollatedFileOperation::masterUncollatedFileOperation(), PstreamBuffers::maxNonLocalRecvCount(), globalIndex::maxNonLocalSize(), processorColour::myColour(), myWorld(), myWorldID(), processorTopology::New(), masterUncollatedFileOperation::NewIFstream(), regionSplit::nLocalRegions(), InflationInjection< CloudType >::parcelsToInject(), argList::parse(), pointHistory::pointHistory(), ParticlePostProcessing< CloudType >::postFace(), ParticlePostProcessing< CloudType >::postPatch(), powerLawLopesdaCostaZone::powerLawLopesdaCostaZone(), UOPstreamBase::print(), mapDistributeBase::printLayout(), fileOperation::printRanks(), processorTopology::procAdjacency(), GAMGAgglomeration::procAgglomerateLduAddressing(), GAMGAgglomeration::procAgglomerateRestrictAddressing(), backgroundMeshDecomposition::procBounds(), processorField::processorField(), triangulatedPatch::randomGlobalPoint(), globalIndex::range(), masterUncollatedFileOperation::read(), masterUncollatedFileOperation::read(), masterUncollatedFileOperation::readHeader(), masterUncollatedFileOperation::readStream(), mapDistributeBase::receive(), Foam::PstreamDetail::reduce_offsetRanges(), indexedVertex< Gt, Vb >::referred(), mapDistributeBase::renumber(), zoltanRenumber::renumber(), faMeshBoundaryHalo::reset(), fvMeshSubset::reset(), cyclicAMIPolyPatch::resetAMI(), mapDistributeBase::schedule(), mapDistributeBase::send(), cellSetOption::setCellSelection(), Time::setControls(), PatchFlowRateInjection< CloudType >::setPositionAndCell(), box::setRefineFlags(), zoneDistribute::setUpCommforZone(), globalIndex::slice(), ParSortableList< Type >::sort(), KinematicSurfaceFilm< CloudType >::splashInteraction(), fileOperation::subRanks(), masterUncollatedFileOperation::sync(), globalIndex::toGlobal(), globalIndex::toGlobal(), globalIndex::toLocal(), mapDistributeBase::unionCombineMasks(), inverseDistance::update(), trackingInverseDistance::update(), oversetFvMeshBase::updateAddressing(), processorField::updateMesh(), globalMeshData::updateMesh(), patchInjectionBase::updateMesh(), propellerInfo::updateSampleDiskCells(), dynamicCode::waitForFile(), globalIndex::whichProcID(), cyclicACMIGAMGInterface::write(), cyclicAMIGAMGInterface::write(), FacePostProcessing< CloudType >::write(), ParticleHistogram< CloudType >::write(), ParticlePostProcessing< CloudType >::write(), box::writeBoxes(), meshToMeshMethod::writeConnectivity(), AMIInterpolation::writeFaceConnectivity(), externalCoupled::writeGeometry(), isoAdvection::writeIsoFaces(), and extendedCellToFaceStencil::writeStencilStats().

◆ master()

bool master ( const label communicator = worldComm)
inlinestatic

True if process corresponds to the master rank in the communicator.

Definition at line 1714 of file UPstream.H.

References masterNo(), and worldComm.

Referenced by abaqusMeshSet::abaqusMeshSet(), abort::abort(), masterUncollatedFileOperation::addWatch(), regIOobject::addWatch(), masterUncollatedFileOperation::addWatches(), Foam::broadcastFile_recursive(), Foam::broadcastFile_single(), AMIInterpolation::calcDistribution(), mappedPatchBase::calcMapping(), pointNoise::calculate(), surfaceNoise::calculate(), viewFactor::calculate(), writeFile::canResetFile(), writeFile::canWriteHeader(), writeFile::canWriteToFile(), argList::check(), ZoneMesh< ZoneType, MeshType >::checkParallelSync(), argList::checkRootCase(), Foam::chMod(), extractEulerianParticles::collectParticle(), processorColour::colour(), sizeDistribution::combineFields(), commSchedule::commSchedule(), Foam::cp(), logFiles::createFiles(), Foam::createReconstructMap(), simpleGeomDecomp::decompose(), metisLikeDecomp::decomposeGeneral(), Foam::defineRunTimeSelectionTable(), masterUncollatedFileOperation::dirPath(), systemCall::dispatch(), distributedTriSurfaceMesh::distribute(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), snappyVoxelMeshDriver::doRefine(), abort::end(), ensightSurfaceReader::ensightSurfaceReader(), abort::execute(), Curle::execute(), graphFunctionObject::execute(), momentum::execute(), Foam::exists(), externalFileCoupler::externalFileCoupler(), externalFileCoupler::externalFileCoupler(), masterUncollatedFileOperation::filePath(), logFiles::files(), logFiles::files(), Foam::fileSize(), distanceSurface::filterKeepLargestRegion(), distanceSurface::filterKeepNearestRegions(), distanceSurface::filterRegionProximity(), internalFieldProbe::findElements(), masterUncollatedFileOperation::findInstance(), masterUncollatedFileOperation::findTimes(), masterUncollatedFileOperation::findWatch(), STDMD::fit(), lduPrimitiveMesh::gather(), PatchTools::gatherAndMerge(), decomposedBlockData::gatherProcData(), coordSet::gatherSort(), bitSet::gatherValues(), ensightSurfaceReader::geometry(), masterUncollatedFileOperation::getFile(), codeStream::getFunction(), Foam::getSelectedProcs(), masterUncollatedFileOperation::getState(), Random::globalGaussNormal(), globalIndex::globalIndex(), Random::globalPosition(), Random::globalPosition(), Random::globalPosition(), Random::globalRandomise01(), Random::globalSample01(), Foam::highResLastModified(), surfaceNoise::initialise(), viewFactor::initialise(), interfaceHeight::interfaceHeight(), Foam::isDir(), Foam::isFile(), fileOperation::isIOrank(), JobInfo::JobInfo(), Foam::lastModified(), Foam::ln(), fileOperation::lookupAndCacheProcessorsPath(), LUscalarMatrix::LUscalarMatrix(), NURBS3DVolume::makeFolders(), error::master(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterOp(), messageStream::masterStream(), surfaceWriter::mergeFieldTemplate(), Foam::mkDir(), Foam::mode(), Foam::mv(), Foam::mvBak(), ensightCase::newCloud(), writeFile::newFile(), writeFile::newFileAtTime(), ensightCase::newGeometry(), masterUncollatedFileOperation::NewIFstream(), fvMeshTools::newMesh(), fileOperation::nProcs(), objectiveManager::objectiveManager(), InflationInjection< CloudType >::parcelsToInject(), argList::parse(), ParticleZoneInfo< CloudType >::ParticleZoneInfo(), SprayCloud< CloudType >::penetration(), pointHistory::pointHistory(), porosityModel::porosityModel(), Probes< ProbeType >::prepare(), printCommTree(), meshRefinement::printMeshInfo(), fileOperation::printRanks(), pointNoise::processData(), decomposedBlockData::read(), masterUncollatedFileOperation::read(), masterUncollatedFileOperation::read(), uncollatedFileOperation::read(), externalCoupled::read(), timeActivatedFileUpdate::read(), writeFile::read(), sampledSets::read(), sampledSurfaces::read(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), baseIOdictionary::readData(), lumpedPointState::readData(), Foam::readDir(), masterUncollatedFileOperation::readHeader(), regIOobject::readHeaderOk(), Foam::readLink(), masterUncollatedFileOperation::readObjects(), masterUncollatedFileOperation::readStream(), surfaceNoise::readSurfaceData(), externalCoupled::removeDataMaster(), externalCoupled::removeDataSlave(), RemoveParcels< CloudType >::RemoveParcels(), masterUncollatedFileOperation::removeWatch(), profilingPstream::report(), globalIndex::reset(), logFiles::resetNames(), Foam::rm(), Foam::rmDir(), mapDistributeBase::schedule(), fvMatrix< Type >::setComponentReference(), fvMatrix< scalar >::setComponentReference(), Foam::setRefCell(), faMatrix< Type >::setReference(), ensightCase::setTime(), masterUncollatedFileOperation::setUnmodified(), globalMeshData::sharedPoints(), shortestPathSet::shortestPathSet(), externalFileCoupler::shutdown(), snappyVoxelMeshDriver::snappyVoxelMeshDriver(), LUscalarMatrix::solve(), rigidBodyMotion::solve(), rigidBodyMeshMotion::solve(), rigidBodyMeshMotionSolver::solve(), ParSortableList< Type >::sort(), SQPBase::SQPBase(), messageStream::stream(), surfaceNoise::surfaceAverage(), masterUncollatedFileOperation::sync(), hexRef8Data::sync(), syncTools::syncEdgeMap(), syncTools::syncPointMap(), triSurfaceMesh::triSurfaceMesh(), triSurfaceMesh::triSurfaceMesh(), sixDoFRigidBodyMotion::update(), activePressureForceBaffleVelocityFvPatchVectorField::updateCoeffs(), electrostaticDepositionFvPatchScalarField::updateCoeffs(), lumpedPointDisplacementPointPatchVectorField::updateCoeffs(), fileMonitor::updateStates(), masterUncollatedFileOperation::updateStates(), solution::upgradeSolverDict(), externalFileCoupler::useMaster(), externalFileCoupler::useSlave(), OFstreamCollator::waitAll(), externalFileCoupler::waitForMaster(), externalFileCoupler::waitForSlave(), ensightFaMesh::write(), ensightMesh::write(), FacePostProcessing< CloudType >::write(), caseInfo::write(), energySpectrum::write(), volFieldValue::write(), foamReport::write(), graphFunctionObject::write(), radiometerProbes::write(), reactionsSensitivityAnalysis< chemistryType >::write(), regionSizeDistribution::write(), sizeDistribution::write(), timeInfo::write(), viewFactorHeatFlux::write(), wallShearStress::write(), yPlus::write(), effectivenessTable::write(), referenceTemperature::write(), histogramModel::write(), NURBS3DCurve::write(), NURBS3DCurve::write(), NURBS3DSurface::write(), NURBS3DSurface::write(), objective::write(), OFstreamCollator::write(), ParticleCollector< CloudType >::write(), ParticleHistogram< CloudType >::write(), ParticlePostProcessing< CloudType >::write(), ParticleZoneInfo< CloudType >::write(), boundaryDataWriter::write(), debugWriter::write(), foamWriter::write(), nastranWriter::write(), proxyWriter::write(), rawWriter::write(), starcdWriter::write(), vtkWriter::write(), x3dWriter::write(), propellerInfo::writeAxialWake(), decomposedBlockData::writeBlocks(), volumetricBSplinesDesignVariables::writeBounds(), ensightCells::writeBox(), ensightWriter::writeCollated(), updateMethod::writeCorrection(), NURBS3DVolume::writeCps(), decomposedBlockData::writeData(), morphingBoxConstraint::writeDVSensitivities(), Foam::ensightOutput::writeFaceConnectivity(), Foam::ensightOutput::writeFaceConnectivity(), lumpedPointMovement::writeForcesAndMomentsVTP(), externalCoupled::writeGeometry(), objective::writeInstantaneousSeparator(), objective::writeInstantaneousValue(), isoAdvection::writeIsoFaces(), objective::writeMeanValue(), SQPBase::writeMeritFunction(), decomposedBlockData::writeObject(), regIOobject::writeObject(), lumpedPointMovement::writeStateVTP(), surfaceNoise::writeSurfaceData(), topOVariablesBase::writeSurfaceFiles(), abaqusWriter::writeTemplate(), nastranWriter::writeTemplate(), rawWriter::writeTemplate(), vtkWriter::writeTemplate(), boundaryDataWriter::writeTemplate(), debugWriter::writeTemplate(), foamWriter::writeTemplate(), starcdWriter::writeTemplate(), x3dWriter::writeTemplate(), streamLineBase::writeToFile(), GCMMA::writeToFiles(), ensightWriter::writeUncollated(), NURBS3DSurface::writeVTK(), lumpedPointState::writeVTP(), propellerInfo::writeWake(), AMIWeights::writeWeightField(), NURBS3DCurve::writeWParses(), NURBS3DCurve::writeWParses(), NURBS3DSurface::writeWParses(), and NURBS3DSurface::writeWParses().

Here is the call graph for this function:

◆ is_rank()

bool is_rank ( const label communicator = worldComm)
inlinestatic

True if process corresponds to any rank (master or sub-rank) in the given communicator.

Definition at line 1723 of file UPstream.H.

References masterNo(), and worldComm.

Referenced by is_parallel(), and whichCommunication().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_subrank()

bool is_subrank ( const label communicator = worldComm)
inlinestatic

True if process corresponds to a sub-rank in the given communicator.

Definition at line 1731 of file UPstream.H.

References masterNo(), and worldComm.

Referenced by Foam::broadcastFile_recursive(), Foam::broadcastFile_single(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), OFstreamCollator::write(), decomposedBlockData::writeBlocks(), and decomposedBlockData::writeData().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_parallel()

bool is_parallel ( const label communicator = worldComm)
inlinestatic

True if parallel algorithm or exchange is required.

This is when parRun() == true, the process corresponds to a rank in the communicator and there is more than one rank in the communicator

Definition at line 1743 of file UPstream.H.

References is_rank(), UPstream::commsStruct::nProcs(), and worldComm.

Referenced by bitSet::allGather(), bitSet::broadcast(), bitSet::broadcast(), globalIndex::calcOffset(), IOobjectList::checkNames(), find_first(), find_last(), bitSet::gatherValues(), masterUncollatedFileOperation::masterOp(), masterUncollatedFileOperation::masterOp(), exprValueFieldTag::reduce(), Foam::reduce(), Foam::reduce(), Foam::reduce(), Foam::PstreamDetail::reduce_offsetRange(), Foam::PstreamDetail::reduce_offsetRanges(), bitSet::reduceAnd(), Foam::reduceOffsets(), bitSet::reduceOr(), Foam::sumReduce(), and globalMeshData::updateMesh().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ numNodes()

int numNodes ( )
inlinestaticnoexcept

The number of shared/host nodes in the (const) world communicator.

Definition at line 1754 of file UPstream.H.

References Foam::noexcept.

Referenced by fileOperation::getGlobalHostIORanks(), and printNodeCommsControl().

Here is the caller graph for this function:

◆ parent()

label parent ( int communicator)
inlinestatic

◆ procID()

List< int > & procID ( int communicator)
inlinestatic

The list of ranks within a given communicator.

Definition at line 1767 of file UPstream.H.

Referenced by baseProcNo(), Foam::operator<<(), collatedFileOperation::processorsDir(), procNo(), masterUncollatedFileOperation::read(), mappedPatchFieldBase< Type >::retrieveField(), and mappedPatchFieldBase< Type >::storeField().

Here is the caller graph for this function:

◆ sameProcs() [1/3]

bool sameProcs ( int communicator1,
int communicator2 )
inlinestatic

Test for communicator equality.

True if they have the same index or address the same ranks

Definition at line 1778 of file UPstream.H.

Referenced by AMIInterpolation::calcDistribution().

Here is the caller graph for this function:

◆ sameProcs() [2/3]

template<typename T1, typename = std::void_t <std::enable_if_t<std::is_integral_v<T1>>>>
bool sameProcs ( int communicator,
const UList< T1 > & procs )
inlinestatic

Test equality of communicator procs with the given list of ranks. Includes a guard for the communicator index.

Definition at line 1793 of file UPstream.H.

◆ sameProcs() [3/3]

template<typename T1, typename T2, typename = std::void_t < std::enable_if_t<std::is_integral_v<T1>>, std::enable_if_t<std::is_integral_v<T2>> >>
bool sameProcs ( const UList< T1 > & procs1,
const UList< T2 > & procs2 )
inlinestatic

Test the equality of two lists of ranks.

Definition at line 1811 of file UPstream.H.

◆ allWorlds()

const wordList & allWorlds ( )
inlinestaticnoexcept

All worlds.

Definition at line 1822 of file UPstream.H.

References Foam::noexcept.

Referenced by mappedPatchBase::calcMapping(), mappedPatchBase::masterWorld(), argList::parse(), Foam::printDOT(), and mappedPatchBase::sameWorld().

Here is the caller graph for this function:

◆ worldIDs()

const labelList & worldIDs ( )
inlinestaticnoexcept

The indices into allWorlds for all processes.

Definition at line 1830 of file UPstream.H.

References Foam::noexcept.

◆ myWorldID()

label myWorldID ( )
inlinestatic

My worldID.

Definition at line 1838 of file UPstream.H.

References commGlobal(), and myProcNo().

Referenced by mappedPatchBase::calcMapping(), multiWorldConnections::createComms(), and mappedPatchBase::masterWorld().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ myWorld()

const word & myWorld ( )
inlinestatic

My world.

Definition at line 1846 of file UPstream.H.

References commGlobal(), and myProcNo().

Referenced by multiWorldConnections::addConnectionById(), multiWorldConnections::addConnectionByName(), mappedPatchBase::calcMapping(), argList::parse(), mappedPatchBase::sameWorld(), and mappedPatchBase::sampleMesh().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allProcs()

◆ subProcs()

◆ interNode_offsets()

const Foam::List< int > & interNode_offsets ( )
static

Processor offsets corresponding to the inter-node communicator.

Definition at line 776 of file UPstream.C.

References nProcs(), and List< T >::null().

Referenced by localNode_parentProcs().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ localNode_parentProcs()

const Foam::UPstream::rangeType & localNode_parentProcs ( )
static

The range (start/size) of the commLocalNode ranks in terms of the (const) world communicator processors.

Definition at line 817 of file UPstream.C.

References IntRange< IntType >::empty(), Foam::findLower(), interNode_offsets(), myProcNo(), and IntRange< IntType >::reset().

Here is the call graph for this function:

◆ linearCommunication()

const Foam::UPstream::commsStructList & linearCommunication ( int communicator)
static

Linear communication schedule (special case) for given communicator.

Definition at line 699 of file UPstream.C.

Referenced by whichCommunication().

Here is the caller graph for this function:

◆ treeCommunication()

const Foam::UPstream::commsStructList & treeCommunication ( int communicator)
static

Tree communication schedule (standard case) for given communicator.

Definition at line 718 of file UPstream.C.

Referenced by whichCommunication().

Here is the caller graph for this function:

◆ whichCommunication()

const commsStructList & whichCommunication ( const int communicator,
bool linear = false )
inlinestatic

Communication schedule for all-to-master (proc 0) as linear/tree/none with switching based on UPstream::nProcsSimpleSum, the is_parallel() state and the optional linear parameter.

Parameters
linearoptionally select linear schedule only

Definition at line 1898 of file UPstream.H.

References is_rank(), linearCommunication(), UPstream::commsStruct::nProcs(), nProcsSimpleSum, UPstream::commsStructList::null(), and treeCommunication().

Referenced by printCommTree().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ msgType() [1/2]

int & msgType ( )
inlinestaticnoexcept

Message tag of standard messages.

Definition at line 1926 of file UPstream.H.

References Foam::noexcept.

Referenced by solidAbsorption::a(), Pstream::allGatherList(), mappedPatchBase::calcMapping(), AMIInterpolation::calculate(), faceAreaWeightAMI2D::calculate(), faceAreaWeightAMI::calculate(), advancingFrontAMI::checkPatches(), masterUncollatedFileOperation::chMod(), mappedPatchBase::collectSamples(), processorColour::colour(), Pstream::combineAllGather(), Pstream::combineGather(), Pstream::combineReduce(), Pstream::combineScatter(), extendedCentredCellToCellStencil::compact(), extendedCentredCellToFaceStencil::compact(), extendedCentredFaceToCellStencil::compact(), GAMGAgglomeration::continueAgglomerating(), masterUncollatedFileOperation::cp(), inverseDistance::createStencil(), cyclicACMIGAMGInterface::cyclicACMIGAMGInterface(), cyclicAMIGAMGInterface::cyclicAMIGAMGInterface(), metisLikeDecomp::decomposeGeneral(), masterUncollatedFileOperation::dirPath(), mappedPatchBase::distribute(), solidAbsorption::e(), Pstream::exchange(), Pstream::exchange(), Pstream::exchange(), Pstream::exchange(), Pstream::exchangeSizes(), Pstream::exchangeSizes(), Pstream::exchangeSizes(), masterUncollatedFileOperation::exists(), masterUncollatedFileOperation::filePath(), masterUncollatedFileOperation::fileSize(), distributedTriSurfaceMesh::findNearest(), mappedPatchBase::findSamples(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), Pstream::gather(), globalIndex::gatherInplace(), globalIndex::gatherInplace(), globalIndex::gatherInplace(), globalIndex::gatherInplaceOp(), Pstream::gatherList(), globalIndex::gatherOp(), globalIndex::gatherOp(), globalIndex::gatherOp(), globalIndex::gatherOp(), decomposedBlockData::gatherProcData(), Foam::gAverage(), Foam::gAverage(), globalMeshData::geometricSharedPoints(), globalIndex::get(), distributedTriSurfaceMesh::getVolumeType(), globalMeshData::globalEdgeNumbering(), globalMeshData::globalPointNumbering(), globalMeshData::globalTransforms(), Foam::gSumCmptProd(), Foam::gSumProd(), Foam::gWeightedAverage(), Foam::gWeightedSum(), masterUncollatedFileOperation::highResLastModified(), viewFactor::initialise(), cellCellStencil::interpolate(), regionModel::interRegionAMI(), masterUncollatedFileOperation::isDir(), masterUncollatedFileOperation::isFile(), masterUncollatedFileOperation::lastModified(), lduPrimitiveMesh::lduPrimitiveMesh(), Pstream::listCombineAllGather(), Pstream::listCombineGather(), Pstream::listCombineReduce(), Pstream::listCombineScatter(), Pstream::listGather(), globalIndex::listGatherValues(), Pstream::listGatherValues(), Pstream::listReduce(), Pstream::listScatterValues(), masterUncollatedFileOperation::ln(), LUscalarMatrix::LUscalarMatrix(), oversetFvPatchField< Type >::manipulateMatrix(), Pstream::mapCombineAllGather(), Pstream::mapCombineGather(), Pstream::mapCombineReduce(), Pstream::mapCombineScatter(), Pstream::mapGather(), mappedPatchFieldBase< Type >::mappedField(), mappedPatchFieldBase< Type >::mappedInternalField(), mappedPatchFieldBase< Type >::mappedWeightField(), mappedPatchFieldBase< Type >::mappedWeightField(), Pstream::mapReduce(), regionModel::mapRegionPatchField(), regionModel::mapRegionPatchField(), regionModel::mapRegionPatchInternalField(), surfaceWriter::mergeFieldTemplate(), globalMeshData::mergePoints(), globalMeshData::mergePoints(), masterUncollatedFileOperation::mkDir(), masterUncollatedFileOperation::mode(), globalIndex::mpiGather(), globalIndex::mpiGather(), globalIndex::mpiGatherInplace(), globalIndex::mpiGatherInplaceOp(), globalIndex::mpiGatherOp(), globalIndex::mpiGatherOp(), masterUncollatedFileOperation::mv(), masterUncollatedFileOperation::mvBak(), masterUncollatedFileOperation::NewIFstream(), AMIInterpolation::normaliseWeights(), fileOperation::printRanks(), processorTopology::procAdjacency(), GAMGAgglomeration::procAgglomerateRestrictAddressing(), GAMGAgglomeration::prolongField(), GAMGAgglomeration::prolongField(), PstreamBuffers::PstreamBuffers(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), masterUncollatedFileOperation::readDir(), masterUncollatedFileOperation::readHeader(), masterUncollatedFileOperation::readStream(), surfaceNoise::readSurfaceData(), IPstream::recv(), exprValueFieldTag::reduce(), lduMesh::reduce(), Foam::reduce(), Foam::reduce(), fvMatrix< Type >::relax(), faMeshBoundaryHalo::reset(), cyclicAMIPolyPatch::resetAMI(), GAMGAgglomeration::restrictField(), Foam::returnReduce(), mappedPatchBase::reverseDistribute(), masterUncollatedFileOperation::rm(), masterUncollatedFileOperation::rmDir(), globalIndex::scatter(), globalIndex::scatter(), globalIndex::scatter(), globalIndex::scatter(), Pstream::scatter(), mapDistributeBase::schedule(), OPstream::send(), OPstream::send(), globalMeshData::sharedPoints(), LUscalarMatrix::solve(), Foam::sumReduce(), surfaceNoise::surfaceAverage(), syncTools::syncBoundaryFaceList(), globalMeshData::syncData(), globalMeshData::syncData(), syncTools::syncEdgeMap(), syncTools::syncFaceList(), syncTools::syncPointMap(), processorCyclicPolyPatch::tag(), processorFaPatch::tag(), processorFvPatch::tag(), processorPolyPatch::tag(), masterUncollatedFileOperation::type(), UIPBstream::UIPBstream(), UOPBstream::UOPBstream(), UOPstreamBase::UOPstreamBase(), oversetFvMeshBase::updateAddressing(), alphatFilmWallFunctionFvPatchScalarField::updateCoeffs(), thermalBaffle1DFvPatchScalarField< solidType >::updateCoeffs(), turbulentTemperatureCoupledBaffleMixedFvPatchScalarField::updateCoeffs(), turbulentTemperatureRadCoupledMixedFvPatchScalarField::updateCoeffs(), filmPyrolysisTemperatureCoupledFvPatchScalarField::updateCoeffs(), filmPyrolysisVelocityCoupledFvPatchVectorField::updateCoeffs(), mappedFixedInternalValueFvPatchField< Type >::updateCoeffs(), mappedFlowRateFvPatchVectorField::updateCoeffs(), mappedVelocityFluxFixedValueFvPatchField::updateCoeffs(), greyDiffusiveRadiationMixedFvPatchScalarField::updateCoeffs(), MarshakRadiationFixedTemperatureFvPatchScalarField::updateCoeffs(), MarshakRadiationFvPatchScalarField::updateCoeffs(), wideBandDiffusiveRadiationMixedFvPatchScalarField::updateCoeffs(), distributedDILUPreconditioner::updateMatrixInterfaces(), Sampled< Type >::value(), OFstreamCollator::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), decomposedBlockData::writeBlocks(), and surfaceNoise::writeSurfaceData().

◆ msgType() [2/2]

int msgType ( int val)
inlinestaticnoexcept

Set the message tag for standard messages.

Returns
the previous value

Definition at line 1936 of file UPstream.H.

◆ incrMsgType()

int incrMsgType ( int val = 1)
inlinestaticnoexcept

Increment the message tag for standard messages.

Returns
the previous value

Definition at line 1948 of file UPstream.H.

References Foam::noexcept.

Referenced by solidAbsorption::a(), faceAreaWeightAMI2D::calculate(), faceAreaWeightAMI::calculate(), solidAbsorption::e(), globalMeshData::geometricSharedPoints(), globalMeshData::globalEdgeNumbering(), globalMeshData::globalPointNumbering(), globalMeshData::globalTransforms(), regionModel::interRegionAMI(), mappedPatchFieldBase< Type >::mappedField(), mappedPatchFieldBase< Type >::mappedInternalField(), mappedPatchFieldBase< Type >::mappedWeightField(), mappedPatchFieldBase< Type >::mappedWeightField(), regionModel::mapRegionPatchField(), regionModel::mapRegionPatchField(), regionModel::mapRegionPatchInternalField(), globalMeshData::mergePoints(), globalMeshData::mergePoints(), globalMeshData::sharedPoints(), syncTools::syncBoundaryFaceList(), globalMeshData::syncData(), globalMeshData::syncData(), syncTools::syncEdgeMap(), syncTools::syncFaceList(), syncTools::syncPointMap(), alphatFilmWallFunctionFvPatchScalarField::updateCoeffs(), thermalBaffle1DFvPatchScalarField< solidType >::updateCoeffs(), turbulentTemperatureCoupledBaffleMixedFvPatchScalarField::updateCoeffs(), turbulentTemperatureRadCoupledMixedFvPatchScalarField::updateCoeffs(), filmPyrolysisTemperatureCoupledFvPatchScalarField::updateCoeffs(), filmPyrolysisVelocityCoupledFvPatchVectorField::updateCoeffs(), mappedFixedInternalValueFvPatchField< Type >::updateCoeffs(), mappedFlowRateFvPatchVectorField::updateCoeffs(), mappedVelocityFluxFixedValueFvPatchField::updateCoeffs(), greyDiffusiveRadiationMixedFvPatchScalarField::updateCoeffs(), MarshakRadiationFixedTemperatureFvPatchScalarField::updateCoeffs(), MarshakRadiationFvPatchScalarField::updateCoeffs(), wideBandDiffusiveRadiationMixedFvPatchScalarField::updateCoeffs(), distributedDILUPreconditioner::updateMatrixInterfaces(), and Sampled< Type >::value().

Here is the caller graph for this function:

◆ commsType() [1/2]

◆ commsType() [2/2]

commsTypes commsType ( const commsTypes ct)
inlinenoexcept

Set the communications type of the stream.

Returns
the previous value

Definition at line 1968 of file UPstream.H.

◆ shutdown()

void shutdown ( int errNo = 0)
static

Shutdown (finalize) MPI as required.

Uses MPI_Abort instead of MPI_Finalize if errNo is non-zero

Definition at line 57 of file UPstream.C.

Referenced by ParRunControl::~ParRunControl().

Here is the caller graph for this function:

◆ abort()

void abort ( int errNo = 1)
static

Call MPI_Abort with no other checks or cleanup.

Definition at line 68 of file UPstream.C.

Referenced by error::simpleExit().

Here is the caller graph for this function:

◆ exit()

void exit ( int errNo = 1)
static

Shutdown (finalize) MPI as required and exit program with errNo.

Definition at line 61 of file UPstream.C.

Referenced by designVariablesUpdate::checkConvergence(), Foam::exitNow(), argList::parse(), ParRunControl::runPar(), and error::simpleExit().

Here is the caller graph for this function:

◆ allToAll() [1/2]

void allToAll ( const UList< int32_t > & sendData,
UList< int32_t > & recvData,
const int communicator = UPstream::worldComm )
static

Exchange int32_t data with all ranks in communicator.

non-parallel : simple copy of sendData to recvData

Parameters
[in]sendDataThe value at [proci] is sent to proci
[out]recvDataThe data received from the other ranks

Definition at line 42 of file UPstreamAllToAll.C.

Referenced by fvMeshDistribute::distribute().

Here is the caller graph for this function:

◆ allToAllConsensus() [1/6]

void allToAllConsensus ( const UList< int32_t > & sendData,
UList< int32_t > & recvData,
const int tag,
const int communicator = UPstream::worldComm )
static

Exchange non-zero int32_t data between ranks [NBX].

recvData is always initially assigned zero and no non-zero

values are sent/received from other ranks.

non-parallel : simple copy of sendData to recvData

Note
The message tag should be chosen to be a unique value

since the implementation uses probing with ANY_SOURCE !!

An initial barrier may help to avoid synchronisation problems

caused elsewhere (See "nbx.tuning" opt switch)

Parameters
[in]sendDataThe non-zero value at [proci] is sent to proci
[out]recvDataThe non-zero value received from each rank
tagMessage tag for the communication

Definition at line 74 of file UPstreamAllToAll.C.

◆ allToAllConsensus() [2/6]

void allToAllConsensus ( const Map< int32_t > & sendData,
Map< int32_t > & recvData,
const int tag,
const int communicator = UPstream::worldComm )
static

Exchange int32_t data between ranks [NBX].

recvData map is always cleared initially so a simple check

of its keys is sufficient to determine connectivity.

non-parallel : copy own rank (if it exists)

See notes about message tags and "nbx.tuning" opt switch

Parameters
[in]sendDataThe value at [proci] is sent to proci.
[out]recvDataThe values received from given ranks.
tagMessage tag for the communication

Definition at line 74 of file UPstreamAllToAll.C.

◆ allToAllConsensus() [3/6]

Map< int32_t > allToAllConsensus ( const Map< int32_t > & sendData,
const int tag,
const int communicator = UPstream::worldComm )
inlinestatic

Exchange int32_t data between ranks [NBX].

Returns
any received data as a Map
Parameters
[in]sendDataThe value at [proci] is sent to proci.
tagMessage tag for the communication

Definition at line 2059 of file UPstream.H.

◆ allToAll() [2/2]

void allToAll ( const UList< int64_t > & sendData,
UList< int64_t > & recvData,
const int communicator = UPstream::worldComm )
static

Exchange int64_t data with all ranks in communicator.

non-parallel : simple copy of sendData to recvData

Parameters
[in]sendDataThe value at [proci] is sent to proci
[out]recvDataThe data received from the other ranks

Definition at line 43 of file UPstreamAllToAll.C.

◆ allToAllConsensus() [4/6]

void allToAllConsensus ( const UList< int64_t > & sendData,
UList< int64_t > & recvData,
const int tag,
const int communicator = UPstream::worldComm )
static

Exchange non-zero int64_t data between ranks [NBX].

recvData is always initially assigned zero and no non-zero

values are sent/received from other ranks.

non-parallel : simple copy of sendData to recvData

Note
The message tag should be chosen to be a unique value

since the implementation uses probing with ANY_SOURCE !!

An initial barrier may help to avoid synchronisation problems

caused elsewhere (See "nbx.tuning" opt switch)

Parameters
[in]sendDataThe non-zero value at [proci] is sent to proci
[out]recvDataThe non-zero value received from each rank
tagMessage tag for the communication

Definition at line 75 of file UPstreamAllToAll.C.

◆ allToAllConsensus() [5/6]

void allToAllConsensus ( const Map< int64_t > & sendData,
Map< int64_t > & recvData,
const int tag,
const int communicator = UPstream::worldComm )
static

Exchange int64_t data between ranks [NBX].

recvData map is always cleared initially so a simple check

of its keys is sufficient to determine connectivity.

non-parallel : copy own rank (if it exists)

See notes about message tags and "nbx.tuning" opt switch

Parameters
[in]sendDataThe value at [proci] is sent to proci.
[out]recvDataThe values received from given ranks.
tagMessage tag for the communication

Definition at line 75 of file UPstreamAllToAll.C.

◆ allToAllConsensus() [6/6]

Map< int64_t > allToAllConsensus ( const Map< int64_t > & sendData,
const int tag,
const int communicator = UPstream::worldComm )
inlinestatic

Exchange int64_t data between ranks [NBX].

Returns
any received data as a Map
Parameters
[in]sendDataThe value at [proci] is sent to proci.
tagMessage tag for the communication

Definition at line 2060 of file UPstream.H.

◆ mpiGather()

template<class Type>
void mpiGather ( const Type * sendData,
Type * recvData,
int count,
const int communicator = UPstream::worldComm )
static

Receive identically-sized (contiguous) data from all ranks.

Parameters
sendDataAll ranks: location of individual value to send
recvDataMaster: receive buffer with all values. Other ranks: ignored
countNumber of send/recv data per rank. Globally consistent!

References worldComm.

Referenced by decomposedBlockData::gather(), bitSet::gatherValues(), and profilingPstream::report().

Here is the caller graph for this function:

◆ mpiScatter()

template<class Type>
void mpiScatter ( const Type * sendData,
Type * recvData,
int count,
const int communicator = UPstream::worldComm )
static

Send identically-sized (contiguous) data to all ranks.

Parameters
sendDataMaster: send buffer with all values. Other ranks: ignored
recvDataAll ranks: location to receive individual value
countNumber of send/recv data per rank. Globally consistent!

References worldComm.

◆ mpiAllGather()

template<class Type>
void mpiAllGather ( Type * allData,
int count,
const int communicator = UPstream::worldComm )
static

Gather/scatter identically-sized data.

Send data from proc slot, receive into all slots

Parameters
allDataAll ranks: the base of the data locations
countNumber of send/recv data per rank. Globally consistent!

References worldComm.

Referenced by fileOperation::getGlobalHostIORanks(), and globalMeshData::updateMesh().

Here is the caller graph for this function:

◆ mpiGatherv()

template<class Type>
void mpiGatherv ( const Type * sendData,
int sendCount,
Type * recvData,
const UList< int > & recvCounts,
const UList< int > & recvOffsets,
const int communicator = UPstream::worldComm )
static

Receive variable length data from all ranks.

Parameters
sendCountIgnored on master if recvCount[0] == 0
recvDataIgnored on non-root rank
recvCountsIgnored on non-root rank
recvOffsetsIgnored on non-root rank

References worldComm.

Referenced by gather(), and decomposedBlockData::gatherProcData().

Here is the caller graph for this function:

◆ mpiScatterv()

template<class Type>
void mpiScatterv ( const Type * sendData,
const UList< int > & sendCounts,
const UList< int > & sendOffsets,
Type * recvData,
int recvCount,
const int communicator = UPstream::worldComm )
static

Send variable length data to all ranks.

Parameters
sendDataIgnored on non-root rank
sendCountsIgnored on non-root rank
sendOffsetsIgnored on non-root rank

References worldComm.

Referenced by GAMGAgglomeration::procAgglomerateLduAddressing(), and scatter().

Here is the caller graph for this function:

◆ allGatherValues()

template<class T>
List< T > allGatherValues ( const T & localValue,
const int communicator = UPstream::worldComm )
static

Allgather individual values into list locations.

The returned list has size nProcs, identical on all ranks.

References Foam::T(), and worldComm.

Referenced by AMIInterpolation::calcDistribution(), and mappedPatchBase::collectSamples().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ listGatherValues()

template<class T>
List< T > listGatherValues ( const T & localValue,
const int communicator = UPstream::worldComm )
static

Gather individual values into list locations.

On master list length == nProcs, otherwise zero length.
For non-parallel : the returned list length is 1 with localValue.

References Foam::T(), and worldComm.

Referenced by distributedTriSurfaceMesh::distribute(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), distributedTriSurfaceMesh::distributedTriSurfaceMesh(), globalIndex::globalIndex(), globalIndex::globalIndex(), triangulatedPatch::randomGlobalPoint(), masterUncollatedFileOperation::readStream(), globalIndex::reset(), globalIndex::reset(), OFstreamCollator::write(), and decomposedBlockData::writeObject().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ listScatterValues()

template<class T>
T listScatterValues ( const UList< T > & allValues,
const int communicator = UPstream::worldComm )
static

Scatter individual values from list locations.

On master input list length == nProcs, ignored on other procs.
For non-parallel : returns the first list element (or default initialized).

References Foam::T(), and worldComm.

Here is the call graph for this function:

◆ broadcast() [1/2]

template<class Type>
bool broadcast ( Type * buffer,
std::streamsize count,
const int communicator,
const int root = UPstream::masterNo() )
inlinestatic

Broadcast buffer contents (contiguous types) to all ranks (default: from rank=0). The sizes must match on all processes.

For non-parallel : do nothing.

Returns
True on success
Parameters
rootThe broadcast root

References masterNo().

Referenced by printTopoControl().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ broadcast() [2/2]

template<class Type, unsigned N>
bool broadcast ( FixedList< Type, N > & list,
const int communicator,
const int root = UPstream::masterNo() )
inlinestatic

Broadcast fixed-list content (contiguous types) to all ranks (default: from rank=0). The sizes must match on all processes.

For non-parallel : do nothing.

Returns
True on success
Parameters
rootThe broadcast root

References masterNo().

Here is the call graph for this function:

◆ mpiReduce() [1/2]

template<class T>
void mpiReduce ( T values[],
int count,
const UPstream::opCodes opCodeId,
const int communicator )
static

MPI_Reduce (blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Here is the call graph for this function:

◆ mpiReduce() [2/2]

template<UPstream::opCodes opCode, class T>
void mpiReduce ( T values[],
int count,
const int communicator )
static

MPI_Reduce (blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Here is the call graph for this function:

◆ mpiAllReduce() [1/4]

template<class T>
void mpiAllReduce ( T values[],
int count,
const UPstream::opCodes opCodeId,
const int communicator )
static

MPI_Allreduce (blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Referenced by Foam::reduce(), Foam::reduce(), Foam::reduce(), bitSet::reduceAnd(), bitSet::reduceOr(), and Foam::sumReduce().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ mpiAllReduce() [2/4]

template<UPstream::opCodes opCode, class T>
void mpiAllReduce ( T values[],
int count,
const int communicator )
static

MPI_Allreduce (blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Here is the call graph for this function:

◆ mpiAllReduce() [3/4]

template<class T>
void mpiAllReduce ( T values[],
int count,
const UPstream::opCodes opCodeId,
const int communicator,
UPstream::Request & req )
static

MPI_Iallreduce (non-blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Here is the call graph for this function:

◆ mpiAllReduce() [4/4]

template<UPstream::opCodes opCode, class T>
void mpiAllReduce ( T values[],
int count,
const int communicator,
UPstream::Request & req )
static

MPI_Iallreduce (non-blocking) for known operators.

For non-parallel : do nothing.

References Foam::T().

Here is the call graph for this function:

◆ mpiScan() [1/2]

template<Foam::UPstream::opCodes opCode, class T>
void mpiScan ( T values[],
int count,
const int communicator,
const bool exclusive = false )
static

Inclusive/exclusive scan (in-place).

In exclusive mode, the degenerate value on rank=0 has no meaning and will normally be treated like non-exclusive mode (ie, original values). However, for the opCodes::op_sum type we can provide a sensible value and overwrite it with a zero-initialized value.

Note
For non-parallel : do nothing.
Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

References Foam::T().

Here is the call graph for this function:

◆ mpiScan() [2/2]

template<Foam::UPstream::opCodes opCode, class T>
T mpiScan ( const T & localValue,
const int communicator,
const bool exclusive = false )
static

Inclusive/exclusive scan returning the result. In exclusive mode, the degenerate value on rank=0 has no meaning but is treated like non-exclusive mode (ie, original values).

Note
For non-parallel : return own value
Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

References Foam::T().

Here is the call graph for this function:

◆ mpiScan_min() [1/2]

template<class T>
void mpiScan_min ( T values[],
int count,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive min scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2405 of file UPstream.H.

◆ mpiExscan_min() [1/2]

template<class T>
void mpiExscan_min ( T values[],
int count,
const int communicator )
inlinestatic

Exclusive min scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Definition at line 2405 of file UPstream.H.

References Foam::min(), and op_min.

Here is the call graph for this function:

◆ mpiScan_min() [2/2]

template<class T>
T mpiScan_min ( const T & value,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive min scan returning result.

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2405 of file UPstream.H.

◆ mpiExscan_min() [2/2]

template<class T>
T mpiExscan_min ( const T & value,
const int communicator )
inlinestatic

Exclusive min scan returning result.

See UPstream::mpiScan for notes about the behaviour

Definition at line 2405 of file UPstream.H.

◆ mpiScan_max() [1/2]

template<class T>
void mpiScan_max ( T values[],
int count,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive max scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2406 of file UPstream.H.

◆ mpiExscan_max() [1/2]

template<class T>
void mpiExscan_max ( T values[],
int count,
const int communicator )
inlinestatic

Exclusive max scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Definition at line 2406 of file UPstream.H.

References Foam::max(), and op_max.

Here is the call graph for this function:

◆ mpiScan_max() [2/2]

template<class T>
T mpiScan_max ( const T & value,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive max scan returning result.

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2406 of file UPstream.H.

◆ mpiExscan_max() [2/2]

template<class T>
T mpiExscan_max ( const T & value,
const int communicator )
inlinestatic

Exclusive max scan returning result.

See UPstream::mpiScan for notes about the behaviour

Definition at line 2406 of file UPstream.H.

◆ mpiScan_sum() [1/2]

template<class T>
void mpiScan_sum ( T values[],
int count,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive sum scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2407 of file UPstream.H.

◆ mpiExscan_sum() [1/2]

template<class T>
void mpiExscan_sum ( T values[],
int count,
const int communicator )
inlinestatic

Exclusive sum scan (in-place).

See UPstream::mpiScan for notes about the behaviour

Definition at line 2407 of file UPstream.H.

References op_sum, Foam::sum(), and worldComm.

Referenced by Foam::PstreamDetail::reduce_offsetRange(), Foam::PstreamDetail::reduce_offsetRanges(), and Foam::reduceOffsets().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ mpiScan_sum() [2/2]

template<class T>
T mpiScan_sum ( const T & value,
const int communicator,
const bool exclusive = false )
inlinestatic

Inclusive/exclusive sum scan returning result.

See UPstream::mpiScan for notes about the behaviour

Parameters
exclusiveUse exclusive scan (MPI_Exscan vs MPI_Scan)

Definition at line 2407 of file UPstream.H.

◆ mpiExscan_sum() [2/2]

template<class T>
T mpiExscan_sum ( const T & value,
const int communicator )
inlinestatic

Exclusive sum scan returning result.

See UPstream::mpiScan for notes about the behaviour

Definition at line 2407 of file UPstream.H.

◆ reduceAnd()

void reduceAnd ( bool & value,
const int communicator = worldComm )
static

Logical (and) reduction (MPI_AllReduce).

For non-parallel : do nothing

Definition at line 28 of file UPstreamReduce.C.

Referenced by pressureControl::pressureControl(), displacementPointSmoothingMotionSolver::relax(), displacementSmartPointSmoothingMotionSolver::relax(), Foam::returnReduceAnd(), and mappedPatchBase::upToDate().

Here is the caller graph for this function:

◆ reduceOr()

void reduceOr ( bool & value,
const int communicator = worldComm )
static

◆ find_first()

int find_first ( bool condition,
int communicator )
static

Locate the first rank for which the condition is true, or -1 if no ranks satisfy the condition.

Definition at line 1055 of file UPstream.C.

References is_parallel(), mpi_allreduce(), myProcNo(), nProcs(), op_min, and type_int32.

Here is the call graph for this function:

◆ find_last()

int find_last ( bool condition,
int communicator )
static

Locate the last rank for which the condition is true, or -1 if no ranks satisfy the condition.

Definition at line 1087 of file UPstream.C.

References is_parallel(), mpi_allreduce(), myProcNo(), op_max, and type_int32.

Here is the call graph for this function:

◆ allocateCommunicator() [1/2]

label allocateCommunicator ( const label parent,
const labelRange & subRanks,
const bool withComponents = true )
inlinestatic
Deprecated
(2025-02) prefer newCommunicator

Definition at line 2454 of file UPstream.H.

References allocateCommunicator(), newCommunicator(), and parent().

Referenced by allocateCommunicator(), and allocateCommunicator().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocateCommunicator() [2/2]

label allocateCommunicator ( const label parent,
const labelUList & subRanks,
const bool withComponents = true )
inlinestatic
Deprecated
(2025-02) prefer newCommunicator

Definition at line 2468 of file UPstream.H.

References allocateCommunicator(), newCommunicator(), and parent().

Here is the call graph for this function:

◆ commInterHost()

label commInterHost ( )
inlinestaticnoexcept

Communicator between nodes (respects any local worlds).

Definition at line 2482 of file UPstream.H.

References commInterHost(), commInterNode(), and Foam::noexcept.

Referenced by commInterHost().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ commIntraHost()

label commIntraHost ( )
inlinestaticnoexcept

Communicator within the node (respects any local worlds).

Definition at line 2488 of file UPstream.H.

References commIntraHost(), commLocalNode(), and Foam::noexcept.

Referenced by commIntraHost().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ waitRequests() [3/3]

void waitRequests ( )
inlinestatic

Wait for all requests to finish.

Deprecated
(2023-01) Probably not what you want. Should normally be restricted to a particular starting request.

Definition at line 2497 of file UPstream.H.

References waitRequests(), and waitRequests().

Referenced by volPointInterpolationAdjoint::addSeparated(), GAMGAgglomeration::agglomerateLduAddressing(), motionSmootherAlgo::correctBoundaryConditions(), metisLikeDecomp::decomposeGeneral(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), mapDistributeBase::distribute(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluate(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluate_if(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluateLocal(), GeometricBoundaryField< Type, PatchField, GeoMesh >::evaluateSelected(), mapDistributeBase::exchangeMasks(), GAMGProcAgglomeration::globalCellCells(), lduPrimitiveMesh::globalCellCells(), faceAreaPairGAMGAgglomeration::movePoints(), decomposedBlockData::readBlocks(), decomposedBlockData::readBlocks(), mapDistributeBase::receive(), InteractionLists< ParticleType >::receiveReferredData(), motionSmootherAlgo::setDisplacementPatchFields(), LUscalarMatrix::solve(), syncTools::syncBoundaryFaceList(), syncTools::syncFaceList(), distributedDILUPreconditioner::updateMatrixInterfaces(), lduMatrix::updateMatrixInterfaces(), distributedDILUPreconditioner::wait(), waitRequests(), OFstreamCollator::write(), and decomposedBlockData::writeBlocks().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ gather()

template<class Type>
void gather ( const Type * send,
int count,
Type * recv,
const UList< int > & counts,
const UList< int > & offsets,
const int comm = UPstream::worldComm )
inlinestatic
Deprecated
(2025-02) prefer mpiGatherv

Definition at line 2504 of file UPstream.H.

References gather(), mpiGatherv(), UPstream(), and worldComm.

Referenced by gather(), and printTopoControl().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ scatter()

template<class Type>
void scatter ( const Type * send,
const UList< int > & counts,
const UList< int > & offsets,
Type * recv,
int count,
const int comm = UPstream::worldComm )
inlinestatic
Deprecated
(2025-02) prefer mpiScatterv

Definition at line 2522 of file UPstream.H.

References mpiScatterv(), scatter(), UPstream(), and worldComm.

Referenced by scatter().

Here is the call graph for this function:
Here is the caller graph for this function:

Member Data Documentation

◆ commsTypeNames

◆ nodeCommsControl_

int nodeCommsControl_
static

Use of host/node topology-aware routines.

0: disabled 1: split by hostname [default] 2: split by shared >=4: (debug) split with given number per node

Definition at line 995 of file UPstream.H.

Referenced by printNodeCommsControl(), registerOptSwitch(), and usingNodeComms().

◆ nodeCommsMin_

int nodeCommsMin_
static

Minimum number of nodes before topology-aware routines are enabled.

<= 2 : always >= 3 : when there are more than N nodes

Definition at line 1003 of file UPstream.H.

Referenced by printNodeCommsControl(), registerOptSwitch(), and usingNodeComms().

◆ topologyControl_

int topologyControl_
static

Selection of topology-aware routines as a bitmask combination of the topoControls enumerations.

Definition at line 1009 of file UPstream.H.

Referenced by printTopoControl(), registerOptSwitch(), and usingTopoControl().

◆ floatTransfer

◆ nProcsSimpleSum

int nProcsSimpleSum
static

Number of processors to change from linear to tree communication.

Definition at line 1029 of file UPstream.H.

Referenced by argList::parse(), registerOptSwitch(), and whichCommunication().

◆ nProcsNonblockingExchange

int nProcsNonblockingExchange
static

Number of processors to change to nonBlocking consensual exchange (NBX). Ignored for zero or negative values.

Definition at line 1035 of file UPstream.H.

Referenced by argList::parse(), and registerOptSwitch().

◆ nPollProcInterfaces

int nPollProcInterfaces
static

Number of polling cycles in processor updates.

Definition at line 1040 of file UPstream.H.

Referenced by argList::parse(), registerOptSwitch(), LduMatrix< Type, DType, LUType >::updateMatrixInterfaces(), and lduMatrix::updateMatrixInterfaces().

◆ defaultCommsType

◆ maxCommsSize

int maxCommsSize
static

Optional maximum message size (bytes).

Definition at line 1050 of file UPstream.H.

Referenced by Foam::broadcastFile_single(), argList::parse(), and registerOptSwitch().

◆ tuning_NBX_

int tuning_NBX_
static

Tuning parameters for non-blocking exchange (NBX).

Definition at line 1055 of file UPstream.H.

Referenced by argList::parse(), and registerOptSwitch().

◆ mpiBufferSize

const int mpiBufferSize
static

MPI buffer-size (bytes).

Definition at line 1060 of file UPstream.H.

Referenced by attachOurBuffers().

◆ worldComm

Foam::label worldComm
static

Communicator for all ranks. May differ from commGlobal() if local worlds are in use.

Definition at line 1069 of file UPstream.H.

Referenced by masterUncollatedFileOperation::addWatch(), regIOobject::addWatch(), bitSet::allGather(), Pstream::allGatherList(), allGatherValues(), allProcs(), DimensionedField< Type, areaMesh >::average(), bitSet::broadcast(), Pstream::broadcast(), Pstream::broadcast(), Pstream::broadcastList(), Foam::calcCellCellsImpl(), globalIndex::calcRange(), globalIndex::calcRecvSizes(), GlobalOffset< label >::calculate(), GlobalOffset< label >::calculate(), surfaceNoise::calculate(), faMeshBoundaryHalo::ClassName(), Pstream::combineAllGather(), Pstream::combineGather(), Pstream::combineReduce(), Pstream::combineScatter(), multiWorldConnections::comms(), commWorld(), commWorld(), metisLikeDecomp::decomposeGeneral(), masterUncollatedFileOperation::dirPath(), ensightSurfaceReader::ensightSurfaceReader(), Pstream::exchange(), Pstream::exchange(), Pstream::exchange(), Pstream::exchange(), Pstream::exchangeSizes(), Pstream::exchangeSizes(), Pstream::exchangeSizes(), Pstream::exchangeSizes(), includeEntry::execute(), includeEntry::execute(), includeEtcEntry::execute(), includeEtcEntry::execute(), masterUncollatedFileOperation::filePath(), masterUncollatedFileOperation::findInstance(), masterUncollatedFileOperation::findTimes(), masterUncollatedFileOperation::findWatch(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), globalIndex::gather(), Pstream::gather(), gather(), globalIndex::gatherInplace(), globalIndex::gatherInplaceOp(), Pstream::gatherList(), globalIndex::gatherOp(), globalIndex::gatherOp(), globalIndex::gatherOp(), globalIndex::gatherOp(), bitSet::gatherValues(), ensightSurfaceReader::geometry(), globalIndex::get(), multiWorldConnections::getCommById(), multiWorldConnections::getCommByName(), Foam::getCommPattern(), masterUncollatedFileOperation::getFile(), codeStream::getFunction(), fileOperation::getGlobalHostIORanks(), Foam::getSelectedProcs(), masterUncollatedFileOperation::getState(), masterUncollatedFileOperation::highResLastModified(), surfaceNoise::initialise(), viewFactor::initialise(), globalIndex::inplaceToGlobal(), is_parallel(), is_rank(), is_subrank(), globalIndex::isLocal(), masterUncollatedFileOperation::lastModified(), Pstream::listCombineAllGather(), Pstream::listCombineGather(), Pstream::listCombineReduce(), Pstream::listCombineScatter(), Pstream::listGather(), Pstream::listGatherValues(), listGatherValues(), Pstream::listReduce(), Pstream::listScatterValues(), listScatterValues(), globalIndex::localEnd(), masterUncollatedFileOperation::localObjectPath(), globalIndex::localSize(), globalIndex::localStart(), fileOperation::lookupAndCacheProcessorsPath(), Pstream::mapCombineAllGather(), Pstream::mapCombineGather(), Pstream::mapCombineReduce(), Pstream::mapCombineScatter(), Pstream::mapGather(), Pstream::mapReduce(), master(), messageStream::masterStream(), globalIndex::maxNonLocalSize(), surfaceWriter::mergeFieldTemplate(), mpiAllGather(), mpiExscan_sum(), globalIndex::mpiGather(), globalIndex::mpiGather(), mpiGather(), globalIndex::mpiGatherInplace(), globalIndex::mpiGatherInplaceOp(), globalIndex::mpiGatherOp(), globalIndex::mpiGatherOp(), mpiGatherv(), mpiScatter(), mpiScatterv(), myProcNo(), fvMeshTools::newMesh(), nProcs(), lduPrimitiveProcessorInterface::operator=(), argList::parse(), printNodeCommsControl(), fileOperation::printRanks(), globalIndex::range(), masterUncollatedFileOperation::read(), uncollatedFileOperation::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), UIPstream::read(), lumpedPointState::readData(), masterUncollatedFileOperation::readHeader(), masterUncollatedFileOperation::readObjects(), masterUncollatedFileOperation::readStream(), surfaceNoise::readSurfaceData(), IPBstream::recv(), IPstream::recv(), exprValueFieldTag::reduce(), GlobalOffset< label >::reduce(), Foam::reduce(), Foam::reduce(), Foam::reduce(), bitSet::reduceAnd(), Foam::reduceOffset(), bitSet::reduceOr(), masterUncollatedFileOperation::removeWatch(), Foam::returnReduce(), Foam::returnReduceAnd(), Foam::returnReduceOr(), globalIndex::scatter(), globalIndex::scatter(), Pstream::scatter(), scatter(), OPBstream::send(), OPstream::send(), OPstream::send(), Time::setMonitoring(), masterUncollatedFileOperation::setUnmodified(), globalIndex::slice(), globalIndex::splitNodeOffsets(), mappedPatchFieldBase< scalar >::storeField(), messageStream::stream(), subProcs(), fileOperation::subRanks(), Foam::sumReduce(), surfaceNoise::surfaceAverage(), fileOperation::sync(), masterUncollatedFileOperation::sync(), globalIndex::toGlobal(), globalIndex::toGlobal(), globalIndex::toLocal(), masterUncollatedFileOperation::updateStates(), DimensionedField< Type, areaMesh >::weightedAverage(), DimensionedField< Type, areaMesh >::weightedAverage(), globalIndex::whichProcID(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), UOPstream::write(), and surfaceNoise::writeSurfaceData().

◆ warnComm

Foam::label warnComm
static

Debugging: warn for use of any communicator differing from warnComm.

Definition at line 1074 of file UPstream.H.

Referenced by commWarn(), messageStream::masterStream(), Foam::reduce(), and Foam::PstreamGlobals::warnCommunicator().


The documentation for this class was generated from the following files: