Loading...
Searching...
No Matches
UPstreamWrapping.H
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2012-2016 OpenFOAM Foundation
9 Copyright (C) 2022-2025 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27InNamespace
28 Foam::PstreamDetail
29
30Description
31 Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc.
32
33SourceFiles
34 UPstreamWrapping.txx
35
36\*---------------------------------------------------------------------------*/
37
38#ifndef Foam_UPstreamWrapping_H
39#define Foam_UPstreamWrapping_H
40
41#include "openfoam_mpi.H"
42
43// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
44
45namespace Foam
46{
47namespace PstreamDetail
48{
50// MPI_Bcast (default from root=0)
51// No fail/abort handling
52template<class Type>
53bool broadcast
54(
55 Type* values,
56 int count,
57 MPI_Datatype datatype,
58 const int communicator,
59 const int root = 0
60);
61
62// MPI_Reduce, using root=0
63template<class Type>
64void reduce
65(
66 const Type* sendData, // Use nullptr for in-place operation
67 Type* values,
68 int count,
69 MPI_Datatype datatype,
70 MPI_Op optype,
71 const int communicator, // Communicator
72 UPstream::Request* req = nullptr // Non-null for non-blocking
73);
74
75// MPI_Allreduce or MPI_Iallreduce : in-place operation
76template<class Type>
77void allReduce
78(
79 Type* values,
80 int count,
81 MPI_Datatype datatype,
82 MPI_Op optype,
83 const int communicator, // Communicator
84 UPstream::Request* req = nullptr // Non-null for non-blocking
85);
86
87// MPI_Scan or MPI_Exscan
88template<class Type>
89void scanReduce
90(
91 const Type* sendData,
92 Type* recvData,
93 int count,
94 MPI_Datatype datatype,
95 MPI_Op optype,
96 const int communicator, // Communicator
97 const int exclusive // Use Exscan instead of Scan
98);
99
100// MPI_Alltoall or MPI_Ialltoall with one element per rank
101template<class Type>
102void allToAll
103(
104 const UList<Type>& sendData,
105 UList<Type>& recvData,
106 MPI_Datatype datatype,
107 const int communicator, // Communicator
108 UPstream::Request* req = nullptr // Non-null for non-blocking
109);
111
112// MPI_Alltoallv or MPI_Ialltoallv
113template<class Type>
114void allToAllv
115(
116 const Type* sendData,
117 const UList<int>& sendCounts,
118 const UList<int>& sendOffsets,
119
120 Type* recvData,
121 const UList<int>& recvCounts,
122 const UList<int>& recvOffsets,
123
124 MPI_Datatype datatype,
125 const int communicator, // Communicator
126 UPstream::Request* req = nullptr // Non-null for non-blocking
127);
129
130// Non-blocking consensual integer (size) exchange
131template<class Type>
133(
134 const UList<Type>& sendData,
135 UList<Type>& recvData,
136 MPI_Datatype datatype,
137 const int tag, // Message tag
138 const int communicator // Communicator
139);
141
142// Non-blocking consensual integer (size) exchange
143template<class Type>
145(
146 const Map<Type>& sendData,
147 Map<Type>& recvData,
148 MPI_Datatype datatype,
149 const int tag, // Message tag
150 const int communicator // Communicator
151);
152
154// MPI_Gather or MPI_Igather
155// Uses recvData as send/recv when sendData is nullptr
156template<class Type>
157void gather
158(
159 const Type* sendData, // Local send value
160 Type* recvData, // On master: recv buffer. Ignored elsewhere
161 int count, // Per rank send/recv count. Globally consistent!
162 MPI_Datatype datatype, // The send/recv data type
163 const int communicator, // Communicator
164 UPstream::Request* req = nullptr // Non-null for non-blocking
165);
166
168// MPI_Scatter or MPI_Iscatter
169// Uses recvData as send/recv when sendData is nullptr
170template<class Type>
171void scatter
172(
173 const Type* sendData, // On master: send buffer. Ignored elsewhere
174 Type* recvData, // Local recv value
175 int count, // Per rank send/recv count. Globally consistent!
176 MPI_Datatype datatype, // The send/recv data type
177 const int communicator, // Communicator
178 UPstream::Request* req = nullptr // Non-null for non-blocking
179);
181
182// MPI_Gatherv or MPI_Igatherv
183template<class Type>
184void gatherv
185(
186 const Type* sendData,
187 int sendCount, // Ignored on master if recvCounts[0] == 0
188
189 Type* recvData, // Ignored on non-root rank
190 const UList<int>& recvCounts, // Ignored on non-root rank
191 const UList<int>& recvOffsets, // Ignored on non-root rank
192
193 MPI_Datatype datatype, // The send/recv data type
194 const int communicator, // Communicator
195 UPstream::Request* req = nullptr // Non-null for non-blocking
196);
198
199// MPI_Scatterv or MPI_Iscatterv
200template<class Type>
201void scatterv
202(
203 const Type* sendData, // Ignored on non-root rank
204 const UList<int>& sendCounts, // Ignored on non-root rank
205 const UList<int>& sendOffsets, // Ignored on non-root rank
206
207 Type* recvData,
208 int recvCount,
209
210 MPI_Datatype datatype, // The send/recv data type
211 const int communicator, // Communicator
212 UPstream::Request* req = nullptr // Non-null for non-blocking
213);
215
216// MPI_Allgather or MPI_Iallgather
217template<class Type>
218void allGather
219(
220 Type* allData, // The send/recv data
221 int count, // The send/recv count per element
222
223 MPI_Datatype datatype, // The send/recv data type
224 const int communicator, // Communicator
225 UPstream::Request* req = nullptr // Non-null for non-blocking
226);
227
228
229// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
230
231} // End namespace PstreamDetail
232} // End namespace Foam
233
234// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
235
236#include "UPstreamWrapping.txx"
237
238// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
239
240#endif
241
242// ************************************************************************* //
A HashTable to objects of type <T> with a label key.
Definition Map.H:54
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition UList.H:89
An opaque wrapper for MPI_Request with a vendor-independent representation without any <mpi....
Definition UPstream.H:2919
Implementation details for UPstream/Pstream/MPI etc.
Definition UPstream.H:57
void scatterv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, int recvCount, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void gather(const Type *sendData, Type *recvData, int count, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void allToAllv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void scatter(const Type *sendData, Type *recvData, int count, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void reduce(const Type *sendData, Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const int communicator, UPstream::Request *req=nullptr)
void allToAllConsensus(const UList< Type > &sendData, UList< Type > &recvData, MPI_Datatype datatype, const int tag, const int communicator)
void gatherv(const Type *sendData, int sendCount, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void allReduce(Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const int communicator, UPstream::Request *req=nullptr)
void allToAll(const UList< Type > &sendData, UList< Type > &recvData, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
void scanReduce(const Type *sendData, Type *recvData, int count, MPI_Datatype datatype, MPI_Op optype, const int communicator, const int exclusive)
void allGather(Type *allData, int count, MPI_Datatype datatype, const int communicator, UPstream::Request *req=nullptr)
bool broadcast(Type *values, int count, MPI_Datatype datatype, const int communicator, const int root=0)
Namespace for OpenFOAM.
Header for low-level interfaces between MPI and OpenFOAM. The detail interfaces are subject to change...