Loading...
Searching...
No Matches
UPstreamRequest.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2011 OpenFOAM Foundation
9 Copyright (C) 2023-2025 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27\*---------------------------------------------------------------------------*/
28
29#include "UPstreamWrapping.H"
30#include "PstreamGlobals.H"
31#include "profilingPstream.H"
32
33// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
34
36:
37 UPstream::Request(MPI_REQUEST_NULL)
38{}
39
40
41// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
42
44{
45 return (MPI_REQUEST_NULL != PstreamUtils::Cast::to_mpi(*this));
46}
47
48
50{
51 *this = UPstream::Request(MPI_REQUEST_NULL);
52}
53
54
55// * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
56
57// Foam::UPstream::Request
58// Foam::UPstream::Request::lookup(const label req)
59// {
60// if (req < 0 || req >= PstreamGlobals::outstandingRequests_.size())
61// {
62// WarningInFunction
63// << "Illegal request " << req << nl
64// << "Should be within range [0,"
65// << PstreamGlobals::outstandingRequests_.size()
66// << ')' << endl;
67//
68// return UPstream::Request(MPI_REQUEST_NULL);
69// }
70//
71// return UPstream::Request(PstreamGlobals::outstandingRequests_[req]);
72// }
73
74
75// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
76
78{
80}
81
82
83void Foam::UPstream::resetRequests(const label n)
84{
85 if (n >= 0 && n < PstreamGlobals::outstandingRequests_.size())
86 {
88 }
89}
90
91
92void Foam::UPstream::addRequest(UPstream::Request& req)
93{
94 // No-op for non-parallel
95 if (!UPstream::parRun())
96 {
97 return;
98 }
99
100 {
101 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
102 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
103 {
104 PstreamGlobals::outstandingRequests_.push_back(request);
105 }
106 }
107
108 // Invalidate parameter
109 req = UPstream::Request(MPI_REQUEST_NULL);
110}
111
112
113void Foam::UPstream::cancelRequest(const label i)
114{
115 // No-op for non-parallel, or out-of-range (eg, placeholder indices)
116 if
117 (
119 || (i < 0 || i >= PstreamGlobals::outstandingRequests_.size())
120 )
121 {
122 return;
123 }
124
125 {
126 auto& request = PstreamGlobals::outstandingRequests_[i];
127 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
128 {
129 MPI_Cancel(&request);
130 MPI_Request_free(&request); //<- Sets to MPI_REQUEST_NULL
131 }
132 }
133}
134
135
136void Foam::UPstream::cancelRequest(UPstream::Request& req)
137{
138 // No-op for non-parallel
139 if (!UPstream::parRun())
140 {
141 return;
142 }
143
144 {
145 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
146 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
147 {
148 MPI_Cancel(&request);
149 MPI_Request_free(&request);
150 }
151 req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
152 }
153}
154
155
156void Foam::UPstream::cancelRequests(UList<UPstream::Request>& requests)
157{
158 // No-op for non-parallel
159 if (!UPstream::parRun())
160 {
161 return;
162 }
163
164 for (auto& req : requests)
165 {
166 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
167 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
168 {
169 MPI_Cancel(&request);
170 MPI_Request_free(&request);
171 }
172 }
173
174 // Everything handled, reset all to MPI_REQUEST_NULL
175 requests = UPstream::Request(MPI_REQUEST_NULL);
176}
177
178
179void Foam::UPstream::removeRequests(label pos, label len)
180{
181 // No-op for non-parallel, no pending requests or out-of-range
182 if
183 (
186 || !len
187 )
188 {
189 return;
190 }
191
193
194 // Apply range-checking on slice with (len < 0) behaving like npos
195 // (ie, the rest of the list)
196 if (len >= 0 && len < count)
197 {
198 // A non-trailing slice
199 count = len;
200 }
201 // Have count >= 1
202
203 const labelRange range(pos, count);
204
205 for (const label i : range)
206 {
207 auto& request = PstreamGlobals::outstandingRequests_[i];
208 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
209 {
210 MPI_Cancel(&request);
211 MPI_Request_free(&request); //<- Sets to MPI_REQUEST_NULL
212 }
213 }
214
215 // Remove from list of outstanding requests and move down
217}
218
219
220void Foam::UPstream::freeRequest(UPstream::Request& req)
221{
222 // No-op for non-parallel
223 if (!UPstream::parRun())
224 {
225 return;
226 }
227
228 {
229 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
230 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
231 {
232 // if (cancel)
233 // {
234 // MPI_Cancel(&request);
235 // }
236 MPI_Request_free(&request);
237 }
238 req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
239 }
240}
241
242
243void Foam::UPstream::freeRequests(UList<UPstream::Request>& requests)
244{
245 // No-op for non-parallel
246 if (!UPstream::parRun())
247 {
248 return;
249 }
250
251 for (auto& req : requests)
252 {
253 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
254 if (MPI_REQUEST_NULL != request) // Active handle is mandatory
255 {
256 // if (cancel)
257 // {
258 // MPI_Cancel(&request);
259 // }
260 MPI_Request_free(&request);
261 }
262 }
263
264 // Everything handled, reset all to MPI_REQUEST_NULL
265 requests = UPstream::Request(MPI_REQUEST_NULL);
266}
267
268
269void Foam::UPstream::waitRequests(label pos, label len)
270{
271 // No-op for non-parallel, no pending requests or out-of-range
272 if
273 (
276 || !len
277 )
278 {
279 return;
280 }
281
283 bool trim = true; // Can trim the trailing part of the list
284
285 // Apply range-checking on slice with (len < 0) behaving like npos
286 // (ie, the rest of the list)
287 if (len >= 0 && len < count)
288 {
289 // A non-trailing slice
290 count = len;
291 trim = false;
292 }
293 // Have count >= 1
294
295 auto* mpiRequests = (PstreamGlobals::outstandingRequests_.data() + pos);
296
297 if (UPstream::debug)
298 {
299 Perr<< "UPstream::waitRequests : starting wait for "
300 << count << " requests starting at " << pos << endl;
301 }
302
304
305 if (count == 1)
306 {
307 // On success: sets request to MPI_REQUEST_NULL
308 if (MPI_Wait(mpiRequests, MPI_STATUS_IGNORE))
309 {
311 << "MPI_Wait returned with error"
313 }
314 }
315 else if (count > 1)
316 {
317 // On success: sets each request to MPI_REQUEST_NULL
318 if (MPI_Waitall(count, mpiRequests, MPI_STATUSES_IGNORE))
319 {
321 << "MPI_Waitall returned with error"
323 }
324 }
325
327
328 if (trim)
329 {
330 // Trim the length of outstanding requests
332 }
333
334 if (UPstream::debug)
335 {
336 Perr<< "UPstream::waitRequests : finished wait." << endl;
337 }
338}
339
340
341void Foam::UPstream::waitRequests(UList<UPstream::Request>& requests)
342{
343 // No-op for non-parallel or no pending requests
344 if (!UPstream::parRun() || requests.empty())
345 {
346 return;
347 }
348
349 // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
350 // which is always large enough to hold an MPI_Request (int or pointer)
351
352 int count = 0;
353 auto* mpiRequests = reinterpret_cast<MPI_Request*>(requests.data());
354
355 // Transcribe, but pre-filter to eliminate inactive requests
356 for (auto& req : requests)
357 {
358 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
359
360 if (MPI_REQUEST_NULL != request) // Apply some prefiltering
361 {
362 mpiRequests[count] = request;
363 ++count;
364 }
365 }
366
367 if (!count)
368 {
369 // No active request handles
370 return;
371 }
372
374
375 // On success: sets each request to MPI_REQUEST_NULL
376 if (MPI_Waitall(count, mpiRequests, MPI_STATUSES_IGNORE))
377 {
379 << "MPI_Waitall returned with error"
381 }
382
384
385 // Everything handled, reset all to MPI_REQUEST_NULL
386 requests = UPstream::Request(MPI_REQUEST_NULL);
387}
388
389
390bool Foam::UPstream::waitAnyRequest(label pos, label len)
391{
392 // No-op for non-parallel, no pending requests or out-of-range
393 if
394 (
397 || !len
398 )
399 {
400 return false;
401 }
402
404
405 // Apply range-checking on slice with (len < 0) behaving like npos
406 // (ie, the rest of the list)
407 if (len >= 0 && len < count)
408 {
409 // A non-trailing slice
410 count = len;
411 }
412 // Have count >= 1
413
414 auto* mpiRequests = (PstreamGlobals::outstandingRequests_.data() + pos);
415
416 if (UPstream::debug)
417 {
418 Perr<< "UPstream::waitAnyRequest : starting wait for any of "
419 << count << " requests starting at " << pos << endl;
420 }
421
423
424 // On success: sets request to MPI_REQUEST_NULL
425 int index = MPI_UNDEFINED;
426 if (MPI_Waitany(count, mpiRequests, &index, MPI_STATUS_IGNORE))
427 {
429 << "MPI_Waitany returned with error"
431 }
432
434
435 if (index == MPI_UNDEFINED)
436 {
437 // No active request handles
438 return false;
439 }
440
441 return true;
442}
443
444
446(
447 label pos,
448 label len,
449 DynamicList<int>* indices
450)
451{
452 // No-op for non-parallel, no pending requests or out-of-range
453 if
454 (
457 || !len
458 )
459 {
460 if (indices) indices->clear();
461 return false;
462 }
463
465
466 // Apply range-checking on slice with (len < 0) behaving like npos
467 // (ie, the rest of the list)
468 if (len >= 0 && len < count)
469 {
470 // A non-trailing slice
471 count = len;
472 }
473 // Have count >= 1
474
475 auto* mpiRequests = (PstreamGlobals::outstandingRequests_.data() + pos);
476
477 if (UPstream::debug)
478 {
479 Perr<< "UPstream:waitSomeRequest : starting wait for some of "
480 << count << " requests starting at " << pos << endl;
481 }
482
483
484 // Local temporary storage, or return via calling parameter
485 List<int> tmpIndices;
486 if (indices)
487 {
488 indices->resize_nocopy(count);
489 }
490 else
491 {
492 tmpIndices.resize(count);
493 }
494
496
497 // On success: sets non-blocking requests to MPI_REQUEST_NULL
498 int outcount = 0;
499 if
500 (
501 MPI_Waitsome
502 (
503 count,
504 mpiRequests,
505 &outcount,
506 (indices ? indices->data() : tmpIndices.data()),
507 MPI_STATUSES_IGNORE
508 )
509 )
510 {
512 << "MPI_Waitsome returned with error"
514 }
515
517
518 if (outcount == MPI_UNDEFINED || outcount < 1)
519 {
520 // No active request handles
521 if (indices) indices->clear();
522 return false;
523 }
524
525 if (indices)
526 {
527 indices->resize(outcount);
528 }
529
530 return true;
531}
532
533
535(
536 UList<UPstream::Request>& requests,
537 DynamicList<int>* indices
538)
539{
540 // No-op for non-parallel or no pending requests
541 if (!UPstream::parRun() || requests.empty())
542 {
543 if (indices) indices->clear();
544 return false;
545 }
546
547 // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
548 // which is always large enough to hold an MPI_Request (int or pointer)
549
550 const int count = static_cast<int>(requests.size());
551 auto* mpiRequests = reinterpret_cast<MPI_Request*>(requests.data());
552
553 // Transcribe without changing locations
554 for (int i = 0; i < count; ++i)
555 {
556 mpiRequests[i] = PstreamUtils::Cast::to_mpi(requests[i]);
557 }
558
559 // Local temporary storage, or return via calling parameter
560 List<int> tmpIndices;
561 if (indices)
562 {
563 indices->resize_nocopy(count);
564 }
565 else
566 {
567 tmpIndices.resize(count);
568 }
569
570 if (UPstream::debug)
571 {
572 Perr<< "UPstream:waitSomeRequest : starting wait for some of "
573 << requests.size() << " requests" << endl;
574 }
575
577
578 // On success: sets non-blocking requests to MPI_REQUEST_NULL
579 int outcount = 0;
580 if
581 (
582 MPI_Waitsome
583 (
584 count,
585 mpiRequests,
586 &outcount,
587 (indices ? indices->data() : tmpIndices.data()),
588 MPI_STATUSES_IGNORE
589 )
590 )
591 {
593 << "MPI_Waitsome returned with error"
595 }
596
598
599 if (outcount == MPI_UNDEFINED || outcount < 1)
600 {
601 // No active request handles
602 if (indices) indices->clear();
603
604 // Everything handled or inactive, reset all to MPI_REQUEST_NULL
605 requests = UPstream::Request(MPI_REQUEST_NULL);
606 return false;
607 }
608
609 if (indices)
610 {
611 indices->resize(outcount);
612 }
613
614 // Transcribe MPI_Request back into UPstream::Request
615 // - do in reverse order - see note in finishedRequests()
616 {
617 for (label i = requests.size()-1; i >= 0; --i)
618 {
619 requests[i] = UPstream::Request(mpiRequests[i]);
620 }
621 }
622
623 return true;
624}
625
626
627int Foam::UPstream::waitAnyRequest(UList<UPstream::Request>& requests)
628{
629 // No-op for non-parallel or no pending requests
630 if (!UPstream::parRun() || requests.empty())
631 {
632 return -1;
633 }
634
635 // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
636 // which is always large enough to hold an MPI_Request (int or pointer)
637
638 const int count = static_cast<int>(requests.size());
639 auto* mpiRequests = reinterpret_cast<MPI_Request*>(requests.data());
640
641 // Transcribe UPstream::Request into MPI_Request
642 // - do not change locations within the list since these are relevant
643 // for the return index.
644 for (int i = 0; i < count; ++i)
645 {
646 mpiRequests[i] = PstreamUtils::Cast::to_mpi(requests[i]);
647 }
648
650
651 // On success: sets request to MPI_REQUEST_NULL
652 int index = MPI_UNDEFINED;
653 if (MPI_Waitany(count, mpiRequests, &index, MPI_STATUS_IGNORE))
654 {
656 << "MPI_Waitany returned with error"
658 }
659
661
662 if (index == MPI_UNDEFINED)
663 {
664 index = -1; // No outstanding requests
665 }
666
667 // Transcribe MPI_Request back into UPstream::Request
668 // - do in reverse order - see note in finishedRequests()
669 {
670 for (label i = requests.size()-1; i >= 0; --i)
671 {
672 requests[i] = UPstream::Request(mpiRequests[i]);
673 }
674 }
675
676 return index;
677}
678
679
680// FUTURE?
681//
744
745
746void Foam::UPstream::waitRequest(const label i)
747{
748 // No-op for non-parallel, or out-of-range (eg, placeholder indices)
749 if
750 (
752 || (i < 0 || i >= PstreamGlobals::outstandingRequests_.size())
753 )
754 {
755 return;
756 }
757
758 auto& request = PstreamGlobals::outstandingRequests_[i];
759
760 // No-op for null request
761 if (MPI_REQUEST_NULL == request)
762 {
763 return;
764 }
765
766 if (UPstream::debug)
767 {
768 Perr<< "UPstream::waitRequest : starting wait for request:"
769 << i << endl;
770 }
771
773
774 // On success: sets request to MPI_REQUEST_NULL
775 if (MPI_Wait(&request, MPI_STATUS_IGNORE))
776 {
778 << "MPI_Wait returned with error"
780 }
781
783
784 if (UPstream::debug)
785 {
786 Perr<< "UPstream::waitRequest : finished wait for request:"
787 << i << endl;
788 }
789}
790
791
792void Foam::UPstream::waitRequest(UPstream::Request& req)
793{
794 // No-op for non-parallel
795 if (!UPstream::parRun())
796 {
797 return;
798 }
799
800 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
801
802 // No-op for null request
803 if (MPI_REQUEST_NULL == request)
804 {
805 return;
806 }
807
809
810 if (MPI_Wait(&request, MPI_STATUS_IGNORE))
811 {
813 << "MPI_Wait returned with error"
815 }
816
818
819 req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
820}
821
822
823bool Foam::UPstream::activeRequest(const label i)
824{
825 return
826 (
827 (i >= 0 && i < PstreamGlobals::outstandingRequests_.size())
828 && (MPI_REQUEST_NULL != PstreamGlobals::outstandingRequests_[i])
829 );
830}
831
832
833bool Foam::UPstream::activeRequest(const UPstream::Request& req)
834{
835 // Same as UPstream::Request::active()
836 return (MPI_REQUEST_NULL != PstreamUtils::Cast::to_mpi(req));
837}
838
839
840bool Foam::UPstream::finishedRequest(const label i)
841{
842 // No-op for non-parallel
843 if (!UPstream::parRun())
844 {
845 return true;
846 }
847
848 if (UPstream::debug)
849 {
850 Perr<< "UPstream::finishedRequest : check request:"
851 << i << endl;
852 }
853
854 // NB: call MPI_Test() even with out-of-range or an inactive handle.
855 // This allows MPI to progress behind the scenes if it wishes.
856
857 int flag = 0;
858 if (i >= 0 && i < PstreamGlobals::outstandingRequests_.size())
859 {
860 auto& request = PstreamGlobals::outstandingRequests_[i];
861
862 // On success: sets request to MPI_REQUEST_NULL
863 MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
864 }
865 else
866 {
867 // Pass a dummy request (for progress side-effect)
868 MPI_Request request = MPI_REQUEST_NULL;
869 MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
870 }
871
872 return (flag != 0);
873}
874
875
876bool Foam::UPstream::finishedRequest(UPstream::Request& req)
877{
878 // No-op for non-parallel
879 if (!UPstream::parRun())
880 {
881 return true;
882 }
883
884 MPI_Request request = PstreamUtils::Cast::to_mpi(req);
885
886 // NB: call MPI_Test() even with an inactive handle.
887 // This allows MPI to progress behind the scenes if it wishes.
888
889 int flag = 0;
890 MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
891
892 // Sync values
893 req = UPstream::Request(request);
894
895 return (flag != 0);
896}
897
898
899bool Foam::UPstream::finishedRequests(label pos, label len)
900{
901 // No-op for non-parallel
902 if (!UPstream::parRun())
903 {
904 return true;
905 }
906
907 // Out-of-range (eg, placeholder indices)
908 if
909 (
911 || !len
912 )
913 {
914 pos = 0;
915 len = 0;
916 }
917
919
920 // Apply range-checking on slice with (len < 0) behaving like npos
921 // (ie, the rest of the list)
922 if (len >= 0 && len < count)
923 {
924 // A non-trailing slice
925 count = len;
926 }
927 // Have count >= 1
928
929 if (UPstream::debug)
930 {
931 Perr<< "UPstream::finishedRequests : check " << count
932 << " requests starting at " << pos << endl;
933 }
934
935 auto* mpiRequests = (PstreamGlobals::outstandingRequests_.data() + pos);
936
937 int flag = 1;
938
939 if (count <= 0)
940 {
941 // No requests
942
943 // Pass a dummy request (for progress side-effect)
944 MPI_Request request = MPI_REQUEST_NULL;
945 MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
946 }
947 else if (count == 1)
948 {
949 // Single request
950
951 // On success: sets request to MPI_REQUEST_NULL
952 MPI_Test(mpiRequests, &flag, MPI_STATUS_IGNORE);
953 }
954 else // (count > 1)
955 {
956 // On success: sets each request to MPI_REQUEST_NULL
957 // On failure: no request is modified
958 MPI_Testall(count, mpiRequests, &flag, MPI_STATUSES_IGNORE);
959 }
960
961 return (flag != 0);
962}
963
964
965bool Foam::UPstream::finishedRequests(UList<UPstream::Request>& requests)
966{
967 // No-op for non-parallel or no pending requests
968 if (!UPstream::parRun() || requests.empty())
969 {
970 return true;
971 }
972
973 // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
974 // which is always large enough to hold an MPI_Request (int or pointer)
975
976 const int count = static_cast<int>(requests.size());
977 auto* mpiRequests = reinterpret_cast<MPI_Request*>(requests.data());
978
979 // Transcribe
980 for (int i = 0; i < count; ++i)
981 {
982 mpiRequests[i] = PstreamUtils::Cast::to_mpi(requests[i]);
983 }
984
985
986 // NB: call MPI_Test() even with an inactive handle.
987 // This allows MPI to progress behind the scenes if it wishes.
988
989 int flag = 1;
990 if (count <= 0)
991 {
992 // No requests
993
994 // Pass a dummy request (for progress side-effect)
995 MPI_Request request = MPI_REQUEST_NULL;
996 MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
997 }
998 else if (count == 1)
999 {
1000 // Single request
1001
1002 // On success: sets request to MPI_REQUEST_NULL
1003 MPI_Test(mpiRequests, &flag, MPI_STATUS_IGNORE);
1004 }
1005 else // (count > 1)
1006 {
1007 // On success: sets each request to MPI_REQUEST_NULL
1008 // On failure: no request is modified
1009 MPI_Testall(count, mpiRequests, &flag, MPI_STATUSES_IGNORE);
1010 }
1011
1012
1013 // Transcribe MPI_Request back into UPstream::Request
1014 // - do in reverse order - see note in finishedRequests()
1015 {
1016 for (label i = requests.size()-1; i >= 0; --i)
1017 {
1018 requests[i] = UPstream::Request(mpiRequests[i]);
1019 }
1020 }
1021
1022 return (flag != 0);
1023}
1024
1025
1026bool Foam::UPstream::finishedRequestPair(label& req0, label& req1)
1027{
1028 // No-op for non-parallel
1029 if (!UPstream::parRun())
1030 {
1031 req0 = -1;
1032 req1 = -1;
1033 return true;
1034 }
1035
1036 bool anyActive = false;
1037 MPI_Request mpiRequests[2];
1038
1039 // No-op for out-of-range (eg, placeholder indices)
1040
1041 if (req0 >= 0 && req0 < PstreamGlobals::outstandingRequests_.size())
1042 {
1043 mpiRequests[0] = PstreamGlobals::outstandingRequests_[req0];
1044 }
1045 else
1046 {
1047 mpiRequests[0] = MPI_REQUEST_NULL;
1048 }
1049
1050 if (req1 >= 0 && req1 < PstreamGlobals::outstandingRequests_.size())
1051 {
1052 mpiRequests[1] = PstreamGlobals::outstandingRequests_[req1];
1053 }
1054 else
1055 {
1056 mpiRequests[1] = MPI_REQUEST_NULL;
1057 }
1058
1059 if (MPI_REQUEST_NULL != mpiRequests[0]) // An active handle
1060 {
1061 anyActive = true;
1062 }
1063 else
1064 {
1065 req0 = -1;
1066 }
1067
1068 if (MPI_REQUEST_NULL != mpiRequests[1]) // An active handle
1069 {
1070 anyActive = true;
1071 }
1072 else
1073 {
1074 req1 = -1;
1075 }
1076
1077 if (!anyActive)
1078 {
1079 // No active handles
1080 return true;
1081 }
1082
1084
1085 // On success: sets each request to MPI_REQUEST_NULL
1086 int indices[2];
1087 int outcount = 0;
1088 if
1089 (
1090 MPI_Testsome
1091 (
1092 2,
1093 mpiRequests,
1094 &outcount,
1095 indices,
1096 MPI_STATUSES_IGNORE
1097 )
1098 )
1099 {
1101 << "MPI_Testsome returned with error"
1103 }
1104
1106
1107 if (outcount == MPI_UNDEFINED)
1108 {
1109 // No active request handles.
1110 // Slight pedantic, but copy back requests in case they were altered
1111
1112 if (req0 >= 0)
1113 {
1114 PstreamGlobals::outstandingRequests_[req0] = mpiRequests[0];
1115 }
1116
1117 if (req1 >= 0)
1118 {
1119 PstreamGlobals::outstandingRequests_[req1] = mpiRequests[1];
1120 }
1121
1122 // Flag indices as 'done'
1123 req0 = -1;
1124 req1 = -1;
1125 return true;
1126 }
1127
1128 // Copy back requests to their 'stack' locations
1129 for (int i = 0; i < outcount; ++i)
1130 {
1131 const int idx = indices[i];
1132
1133 if (idx == 0)
1134 {
1135 if (req0 >= 0)
1136 {
1137 PstreamGlobals::outstandingRequests_[req0] = mpiRequests[0];
1138 req0 = -1;
1139 }
1140 }
1141 if (idx == 1)
1142 {
1143 if (req1 >= 0)
1144 {
1145 PstreamGlobals::outstandingRequests_[req1] = mpiRequests[1];
1146 req1 = -1;
1147 }
1148 }
1149 }
1150
1151 return (outcount > 0);
1152}
1153
1154
1155void Foam::UPstream::waitRequestPair(label& req0, label& req1)
1156{
1157 // No-op for non-parallel. Flag indices as 'done'
1158 if (!UPstream::parRun())
1159 {
1160 req0 = -1;
1161 req1 = -1;
1162 return;
1163 }
1164
1165 int count = 0;
1166 MPI_Request mpiRequests[2];
1167
1168 // No-op for out-of-range (eg, placeholder indices)
1169 // Prefilter inactive handles
1170
1171 if (req0 >= 0 && req0 < PstreamGlobals::outstandingRequests_.size())
1172 {
1173 mpiRequests[count] = PstreamGlobals::outstandingRequests_[req0];
1174 PstreamGlobals::outstandingRequests_[req0] = MPI_REQUEST_NULL;
1175
1176 if (MPI_REQUEST_NULL != mpiRequests[count]) // An active handle
1177 {
1178 ++count;
1179 }
1180 }
1181
1182 if (req1 >= 0 && req1 < PstreamGlobals::outstandingRequests_.size())
1183 {
1184 mpiRequests[count] = PstreamGlobals::outstandingRequests_[req1];
1185 PstreamGlobals::outstandingRequests_[req1] = MPI_REQUEST_NULL;
1186
1187 if (MPI_REQUEST_NULL != mpiRequests[count]) // An active handle
1188 {
1189 ++count;
1190 }
1191 }
1192
1193 // Flag in advance as being handled
1194 req0 = -1;
1195 req1 = -1;
1196
1197 if (!count)
1198 {
1199 // No active handles
1200 return;
1201 }
1202
1204
1205 // On success: sets each request to MPI_REQUEST_NULL
1206 if (MPI_Waitall(count, mpiRequests, MPI_STATUSES_IGNORE))
1207 {
1209 << "MPI_Waitall returned with error"
1211 }
1212
1214}
1215
1216
1217// ************************************************************************* //
scalar range
label n
Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc.
void reset() noexcept
Reset to default constructed value (MPI_REQUEST_NULL).
bool good() const noexcept
True if not equal to MPI_REQUEST_NULL.
Request() noexcept
Default construct as MPI_REQUEST_NULL.
static void freeRequest(UPstream::Request &req)
Non-blocking comms: free outstanding request. Corresponds to MPI_Request_free().
static bool waitSomeRequests(label pos, label len=-1, DynamicList< int > *indices=nullptr)
Wait until some requests (from position onwards) have finished. Corresponds to MPI_Waitsome().
static bool finishedRequests(label pos, label len=-1)
Non-blocking comms: have all requests (from position onwards) finished? Corresponds to MPI_Testall().
static void addRequest(UPstream::Request &req)
Transfer the (wrapped) MPI request to the internal global list and invalidate the parameter (ignores ...
static label nRequests() noexcept
Number of outstanding requests (on the internal list of requests).
static void cancelRequest(const label i)
Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_fr...
static void resetRequests(const label n)
Truncate outstanding requests to given length, which is expected to be in the range [0 to nRequests()...
static void freeRequests(UList< UPstream::Request > &requests)
Non-blocking comms: free outstanding requests. Corresponds to MPI_Request_free().
static bool finishedRequestPair(label &req0, label &req1)
Non-blocking comms: have both requests finished? Corresponds to pair of MPI_Test().
static void cancelRequests(UList< UPstream::Request > &requests)
Non-blocking comms: cancel and free outstanding requests. Corresponds to MPI_Cancel() + MPI_Request_f...
static bool waitAnyRequest(label pos, label len=-1)
Wait until any request (from position onwards) has finished. Corresponds to MPI_Waitany().
static void removeRequests(label pos, label len=-1)
Non-blocking comms: cancel/free outstanding requests (from position onwards) and remove from internal...
static void waitRequestPair(label &req0, label &req1)
Non-blocking comms: wait for both requests to finish. Corresponds to pair of MPI_Wait().
static void waitRequests()
Wait for all requests to finish.
Definition UPstream.H:2497
static bool activeRequest(const label i)
Is request i active (!= MPI_REQUEST_NULL)?
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished? Corresponds to MPI_Test().
static void waitRequest(const label i)
Wait until request i has finished. Corresponds to MPI_Wait().
static bool & parRun() noexcept
Test if this a parallel run.
Definition UPstream.H:1681
static void beginTiming()
Update timer prior to measurement.
static void addWaitTime()
Add time increment to wait time.
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition error.H:600
unsigned int count(const UList< bool > &bools, const bool val=true)
Count number of 'true' entries.
Definition BitOps.H:73
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
string trim(const std::string &s)
Return string trimmed of leading and trailing whitespace.
dimensionedScalar pos(const dimensionedScalar &ds)
prefixOSstream Perr
OSstream wrapped stderr (std::cerr) with parallel prefix.
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition Ostream.H:519
errorManip< error > abort(error &err)
Definition errorManip.H:139
const direction noexcept
Definition scalarImpl.H:265
error FatalError
Error stream (stdout output on all processes), with additional 'FOAM FATAL ERROR' header text and sta...
static Type to_mpi(UPstream::Communicator arg) noexcept
Cast UPstream::Communicator to MPI_Comm.