Loading...
Searching...
No Matches
UPstreamWindow.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2025 OpenCFD Ltd.
9-------------------------------------------------------------------------------
10License
11 This file is part of OpenFOAM.
12
13 OpenFOAM is free software: you can redistribute it and/or modify it
14 under the terms of the GNU General Public License as published by
15 the Free Software Foundation, either version 3 of the License, or
16 (at your option) any later version.
17
18 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
19 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
25
26\*---------------------------------------------------------------------------*/
27
28#include "UPstreamWindow.H"
29#include "PstreamGlobals.H"
30#include "profilingPstream.H"
31
32// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
33
35:
36 UPstream::Window(MPI_WIN_NULL)
37{}
38
39
40// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
41
43{
44 return MPI_WIN_NULL != PstreamUtils::Cast::to_mpi(*this);
45}
46
47
49{
50 *this = UPstream::Window(MPI_WIN_NULL);
51}
52
53
55{
56 int val = 0;
57
58 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
59 MPI_Group group;
60
61 // Get num of ranks from the group information
62 if
63 (
64 (MPI_WIN_NULL != win)
65 && (MPI_SUCCESS == MPI_Win_get_group(win, &group))
66 )
67 {
68 if (MPI_SUCCESS != MPI_Group_size(group, &val))
69 {
70 val = 0;
71 }
72 MPI_Group_free(&group);
73 }
74
75 return val;
76}
77
78
79// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
80
81//
82// Allocate a local or shared memory window.
83// Uses MPI_Win_allocate() or MPI_Win_allocate_shared(), respectively.
84//
85static std::pair<void*,int64_t>
87(
89 MPI_Comm communicator,
90
91 // [in] number of elements (not bytes)
92 std::streamsize num_elements,
93 // [in] size of each element == sizeof(Type)
94 const int disp_unit,
95 const bool shared
96)
97{
98 using namespace Foam;
99
100 // No-op for non-parallel
101 if (!UPstream::parRun())
102 {
103 *self = UPstream::Window(MPI_WIN_NULL);
104 return {nullptr, 0};
105 }
106
107 // if (FOAM_UNLIKELY(MPI_COMM_NULL == communicator))
108 // {
109 // FatalErrorInFunction
110 // << "Attempt to use NULL communicator"
111 // << Foam::abort(FatalError);
112 // return false;
113 // }
114
115 MPI_Win win = PstreamUtils::Cast::to_mpi(*self);
116
117 // Stringent handling of existing windows
118 if (FOAM_UNLIKELY(MPI_WIN_NULL != win))
119 {
121 << "Window already exists. Use close() first"
123 return {nullptr, 0};
124 }
125
126 int returnCode(MPI_SUCCESS);
127 void *baseptr = nullptr;
128
129 if (shared)
130 {
131 returnCode = MPI_Win_allocate_shared
132 (
133 // From num elements -> num of bytes
134 std::streamsize(num_elements * disp_unit),
135 disp_unit,
136 MPI_INFO_NULL,
137 communicator,
138 &baseptr,
139 &win
140 );
141 }
142 else
143 {
144 returnCode = MPI_Win_allocate
145 (
146 // From num elements -> num of bytes
147 std::streamsize(num_elements * disp_unit),
148 disp_unit,
149 MPI_INFO_NULL,
150 communicator,
151 &baseptr,
152 &win
153 );
154 }
155
156 if (FOAM_UNLIKELY((MPI_SUCCESS != returnCode) || (MPI_WIN_NULL == win)))
157 {
158 if (shared)
159 {
160 FatalError("MPI_Win_allocate_shared()")
162 }
163 else
164 {
165 FatalError("MPI_Win_allocate()")
167 }
168
169 return {nullptr, 0};
170 }
171
172 // Now have a window
173 *self = UPstream::Window(win);
174
175 // The address and the type-specific count
176 return {baseptr, num_elements};
177}
178
179
180// ------------------------------------------------------------------------- //
181
182std::pair<void*,int64_t>
184(
185 std::streamsize num_elements,
186 int disp_unit,
187 UPstream::Communicator communicator,
188 const bool shared
189)
190{
192 (
193 this,
195
196 num_elements,
197 disp_unit,
198 shared
199 );
200}
201
202
203std::pair<void*,int64_t>
205(
206 std::streamsize num_elements,
207 int disp_unit,
208 int communicator, // Index into MPICommunicators_
209 const bool shared
210)
211{
213 (
214 this,
216
217 num_elements,
218 disp_unit,
219 shared
220 );
221}
222
223
224// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
225
226// NOTE: Currently no
227// - MPI_Win_create_dynamic()
228// - MPI_Win_attach()
229// - MPI_Win_detach()
230// since working with their addresses (and broadcasting them)
231// is fairly painful and probably not particularly efficient either
232
233//
234// Create a window to existing memory with MPI_Win_create().
235//
236static bool call_window_create
237(
239 MPI_Comm communicator,
240
241 // [in] base address
242 void *baseptr,
243 // [in] number of elements (not bytes)
244 std::streamsize num_elements,
245 // [in] size of each element == sizeof(Type)
246 const int disp_unit
247)
248{
249 using namespace Foam;
250
251 // No-op for non-parallel
252 if (!UPstream::parRun())
253 {
254 *self = UPstream::Window(MPI_WIN_NULL);
255 return false;
256 }
257
258 // if (FOAM_UNLIKELY(MPI_COMM_NULL == communicator))
259 // {
260 // using namespace Foam;
261 // FatalErrorInFunction
262 // << "Attempt to use NULL communicator"
263 // << Foam::abort(FatalError);
264 // return false;
265 // }
266
267 MPI_Win win = PstreamUtils::Cast::to_mpi(*self);
268
269 // Stringent handling of existing windows
270 if (FOAM_UNLIKELY(MPI_WIN_NULL != win))
271 {
273 << "Window already exists. Use close() first"
275 return false;
276 }
277
278 // Leave nothing to chance
279 if (!baseptr || !num_elements)
280 {
281 baseptr = nullptr;
282 num_elements = 0;
283 }
284
285 int returnCode = MPI_Win_create
286 (
287 baseptr,
288 // From num elements -> num of bytes
289 std::streamsize(num_elements * disp_unit),
290 disp_unit,
291 MPI_INFO_NULL,
292 communicator,
293 &win
294 );
295
296 if (FOAM_UNLIKELY((MPI_SUCCESS != returnCode) || (MPI_WIN_NULL == win)))
297 {
298 FatalError("MPI_Win_create()")
300 return false;
301 }
302
303 // Now have a window
304 *self = UPstream::Window(win);
305
306 return (MPI_SUCCESS == returnCode);
307}
308
309
311(
312 void *baseptr,
313 std::streamsize num_elements,
314 const int disp_unit,
315 UPstream::Communicator communicator
316)
317{
318 return call_window_create
319 (
320 this,
322
323 baseptr,
324 num_elements,
325 disp_unit
326 );
327}
328
329
331(
332 void *baseptr,
333 std::streamsize num_elements,
334 const int disp_unit,
335 int communicator // Index into MPICommunicators_
336)
337{
338 return call_window_create
339 (
340 this,
342
343 baseptr,
344 num_elements,
345 disp_unit
346 );
347}
348
349
350// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
351
353{
354 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
355
356 if (UPstream::parRun() && (MPI_WIN_NULL != win))
357 {
358 MPI_Win_free(&win);
359 *this = UPstream::Window(MPI_WIN_NULL);
360 }
361}
362
363
364// * * * * * * * * * * * * * * * Synchronization * * * * * * * * * * * * * * //
365
367{
368 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
369
370 if (UPstream::parRun() && (MPI_WIN_NULL != win))
371 {
372 if (rank < 0)
373 {
374 if (local) MPI_Win_flush_local_all(win);
375 else /* */ MPI_Win_flush_all(win);
376 }
377 else
378 {
379 if (local) MPI_Win_flush_local(rank, win);
380 else /* */ MPI_Win_flush(rank, win);
381 }
382 }
383}
384
385
387{
388 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
389
390 if (UPstream::parRun() && (MPI_WIN_NULL != win))
391 {
392 MPI_Win_sync(win);
393 }
394}
395
396
397void Foam::UPstream::Window::mpi_win_locking(int rank, bool exclusive)
398{
399 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
400
401 if (UPstream::parRun() && (MPI_WIN_NULL != win))
402 {
403 if (rank < 0)
404 {
405 MPI_Win_lock_all
406 (
407 (exclusive ? MPI_MODE_NOCHECK : 0),
408 win
409 );
410 }
411 else
412 {
413 MPI_Win_lock
414 (
415 (exclusive ? MPI_LOCK_EXCLUSIVE : MPI_LOCK_SHARED),
416 rank,
417 0, // No assertion
418 win
419 );
420 }
421 }
422}
423
424
426{
427 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
428
429 if (UPstream::parRun() && (MPI_WIN_NULL != win))
430 {
431 if (rank < 0)
432 {
433 MPI_Win_unlock_all(win);
434 }
435 else
436 {
437 MPI_Win_unlock(rank, win);
438 }
439 }
440}
441
442
443// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
444
446(
447 void* origin, // Type checking done by caller
448 std::streamsize count,
449 const UPstream::dataTypes dataTypeId,
450 int target_rank,
451 int target_disp
452) const
453{
454 if (!UPstream::parRun() || !origin || !count)
455 {
456 // Nothing to do
457 return true;
458 }
459
460 MPI_Datatype datatype = PstreamGlobals::getDataType(dataTypeId);
461 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
462
463 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
464 {
465 FatalError("MPI_Get()")
466 << "Called with MPI_WIN_NULL."
468 return false;
469 }
470
471 int returnCode = MPI_Get
472 (
473 // origin
474 origin, count, datatype,
475 // target
476 target_rank, target_disp, count, datatype,
477 // window
478 win
479 );
480
481 // Error handling
482 if (FOAM_UNLIKELY(returnCode != MPI_SUCCESS))
483 {
484 FatalError("MPI_Get()")
486 return false;
487 }
488
489 return (MPI_SUCCESS == returnCode);
490}
491
492
494(
495 const void* origin, // Type checking done by caller
496 std::streamsize count,
497 const UPstream::dataTypes dataTypeId,
498 int target_rank,
499 int target_disp
500) const
501{
502 if (!UPstream::parRun() || !origin || !count)
503 {
504 // Nothing to do
505 return true;
506 }
507
508 MPI_Datatype datatype = PstreamGlobals::getDataType(dataTypeId);
509 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
510
511 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
512 {
513 FatalError("MPI_Put()")
514 << "Called with MPI_WIN_NULL."
516 return false;
517 }
518
519 int returnCode = MPI_Put
520 (
521 // origin
522 origin, count, datatype,
523 // target
524 target_rank, target_disp, count, datatype,
525 // window
526 win
527 );
528
529 // Error handling
530 if (FOAM_UNLIKELY(returnCode != MPI_SUCCESS))
531 {
532 FatalError("MPI_Put()")
534 return false;
535 }
536
537 return (MPI_SUCCESS == returnCode);
538}
539
540
542(
543 const UPstream::opCodes opCodeId,
544 const void* origin, // Type checking done by caller
545 std::streamsize count,
546 const UPstream::dataTypes dataTypeId,
547 int target_rank,
548 int target_disp
549) const
550{
551 if (UPstream::opCodes::invalid == opCodeId)
552 {
553 // Regular data put - doesn't use/need an op-type!
554 return this->put_data
555 (
556 origin,
557 count,
558 dataTypeId,
559 target_rank,
560 target_disp
561 );
562 }
563
564 if (!UPstream::parRun() || !origin || !count)
565 {
566 // Nothing to do
567 return true;
568 }
569
570 MPI_Datatype datatype = PstreamGlobals::getDataType(dataTypeId);
571 MPI_Op optype = PstreamGlobals::getOpCode(opCodeId);
572 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
573
574 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
575 {
576 FatalError("MPI_Accumulate()")
577 << "Called with MPI_WIN_NULL."
579 return false;
580 }
581 if (FOAM_UNLIKELY(MPI_OP_NULL == optype))
582 {
583 FatalError("MPI_Accumulate()")
584 << "Invalid opcode:" << int(opCodeId)
585 << " type:" << int(dataTypeId) << " count:" << label(count) << nl
587 return false;
588 }
589
590 int returnCode = MPI_Accumulate
591 (
592 // origin
593 origin, count, datatype,
594 // target
595 target_rank, target_disp, count, datatype,
596 // operation
597 optype,
598 // window
599 win
600 );
601
602 // Error handling
603 if (FOAM_UNLIKELY(returnCode != MPI_SUCCESS))
604 {
605 FatalError("MPI_Accumulate()")
607 return false;
608 }
609
610 return (MPI_SUCCESS == returnCode);
611}
612
613
615(
616 const UPstream::opCodes opCodeId,
617 const void* origin, // Type checking done by caller
618 void* result, // Type checking done by caller
619 const UPstream::dataTypes dataTypeId,
620 int target_rank,
621 int target_disp
622) const
623{
624 if (!UPstream::parRun())
625 {
626 // Fails in non-parallel
627 return false;
628 }
629
630 MPI_Datatype datatype = PstreamGlobals::getDataType(dataTypeId);
631 MPI_Op optype = PstreamGlobals::getOpCode(opCodeId);
632 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
633
634 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
635 {
636 FatalError("MPI_Fetch_and_op()")
637 << "Called with MPI_WIN_NULL."
639 return false;
640 }
641 if (FOAM_UNLIKELY(MPI_OP_NULL == optype))
642 {
643 FatalError("MPI_Fetch_and_op()")
644 << "Invalid opcode:" << int(opCodeId)
645 << " type:" << int(dataTypeId) << nl
647 return false;
648 }
649
650 int returnCode = MPI_Fetch_and_op
651 (
652 origin, result, datatype,
653 // target
654 target_rank, target_disp,
655 // operation
656 optype,
657 // window
658 win
659 );
660
661 // Error handling
662 if (FOAM_UNLIKELY(returnCode != MPI_SUCCESS))
663 {
664 FatalError("MPI_Fetch_and_op()")
666 return false;
667 }
668
669 return (MPI_SUCCESS == returnCode);
671
672
673// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
674
675// Check for failure of MPI_Win_get_attr
676#undef CheckFail_Win_get_attr
677#define CheckFail_Win_get_attr(returnCode, flag, attribute) \
678{ \
679 if (FOAM_UNLIKELY((MPI_SUCCESS != returnCode) || !flag)) \
680 { \
681 FatalError("MPI_Win_get_attr()") \
682 << "Failed getting attribute " << attribute << endl \
683 << Foam::abort(FatalError); \
684 } \
685}
686
687
688bool Foam::UPstream::Window::is_shared(const bool failNonShared) const
689{
690 if (!UPstream::parRun())
691 {
692 // Nothing to do
693 return false;
694 }
695
696 MPI_Win win = PstreamUtils::Cast::to_mpi(*this);
697
698 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
699 {
700 return false;
701 }
702
703 // Error handling flags
704 int returnCode(MPI_ERR_UNKNOWN);
705 int flag(1);
706 int flavour(0);
707
708 // MPI_WIN_CREATE_FLAVOR : Type (int *)
709 {
710 // const auto key = MPI_WIN_CREATE_FLAVOR;
711 typedef int value_type;
712 void* val(nullptr);
713
714 returnCode = MPI_Win_get_attr(win, MPI_WIN_CREATE_FLAVOR, &val, &flag);
715 CheckFail_Win_get_attr(returnCode, flag, "MPI_WIN_CREATE_FLAVOR");
716
717 flavour = int
718 (
719 *static_cast<value_type*>(val)
720 );
721 }
722
723 if (failNonShared && (MPI_WIN_FLAVOR_SHARED != flavour))
724 {
726 << "Expecting a shared window but had ("
727 << flavour << ") flavour instead" << endl
729 }
730
731 return (MPI_WIN_FLAVOR_SHARED == flavour);
732}
733
734
735std::pair<void*,int64_t>
737(
738 UPstream::Window window,
739 const int expected_disp_unit
740)
741{
742 if (!UPstream::parRun())
743 {
744 // Nothing to do
745 return {nullptr, 0};
746 }
747
748 MPI_Win win = PstreamUtils::Cast::to_mpi(window);
749
750 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
751 {
752 FatalError("MPI_Win_get_attr()")
753 << "Called with MPI_WIN_NULL."
755 return {nullptr, 0};
756 }
757
758
759 // Error handling flags
760 int returnCode(MPI_ERR_UNKNOWN);
761 int flag(1);
762
763 // Debugging
764 // MPI_WIN_CREATE_FLAVOR : Type (int *)
765 // if (FOAM_UNLIKELY(UPstream::debug & 2))
766 // {
767 // // const auto key = MPI_WIN_CREATE_FLAVOR;
768 // typedef int value_type;
769 // void* val(nullptr);
770 //
771 // returnCode =
772 // MPI_Win_get_attr(win, MPI_WIN_CREATE_FLAVOR, &val, &flag);
773 // CheckFail_Win_get_attr(returnCode, flag, "MPI_WIN_CREATE_FLAVOR");
774 //
775 // int flavour = *static_cast<value_type*>(val);
776 // Perr<< "Window created with flavour (" << flavour << ')' << endl;
777 // }
778
779 std::pair<void*,int64_t> result(nullptr, 0);
780
781 // The window size
782 // MPI_WIN_SIZE : Type (MPI_Aint *)
783 {
784 // const auto key = MPI_WIN_SIZE;
785 typedef MPI_Aint value_type;
786 void* val(nullptr);
787
788 returnCode = MPI_Win_get_attr(win, MPI_WIN_SIZE, &val, &flag);
789 CheckFail_Win_get_attr(returnCode, flag, "MPI_WIN_SIZE");
790
791 result.second = *static_cast<value_type*>(val);
792 }
793
794 // Early exit
795 if (result.second == 0)
796 {
797 return {nullptr, 0};
798 }
799
800 // The base address
801 // MPI_WIN_BASE : Type (void *)
802 {
803 // const auto key = MPI_WIN_BASE;
804 void* value(nullptr);
805
806 returnCode = MPI_Win_get_attr(win, MPI_WIN_BASE, &value, &flag);
807 CheckFail_Win_get_attr(returnCode, flag, "MPI_WIN_BASE");
808
809 result.first = value;
810 }
811
812 // Early exit - this probably can never happen
813 // (ie, nullptr but non-zero size)
814 if (result.first == nullptr)
815 {
816 return {nullptr, 0};
817 }
818
819 // Scale count by the expected displacement unit
820 if (expected_disp_unit)
821 {
822 result.second /= expected_disp_unit;
823
824 int disp_unit = 1;
825
826 // The displacement units
827 // MPI_WIN_DISP_UNIT : Type (int *)
828 {
829 // const auto key = MPI_WIN_DISP_UNIT;
830 typedef int value_type;
831 void* val(nullptr);
832
833 returnCode = MPI_Win_get_attr(win, MPI_WIN_DISP_UNIT, &val, &flag);
834 CheckFail_Win_get_attr(returnCode, flag, "MPI_WIN_DISP_UNIT");
835
836 disp_unit = *static_cast<value_type*>(val);
837 }
838
839 // Error if the expected disp_unit is incorrect
840 // - ignore this check if the window is empty
841
842 if (expected_disp_unit != disp_unit)
843 {
845 << "Window [size=" << result.second
846 << "] created with Type size=" << disp_unit
847 << " but expecting Type size=" << expected_disp_unit << endl
849 }
850 }
851
852 return result;
853}
854
855
856std::pair<void*,int64_t>
858(
859 UPstream::Window window,
860 int target_rank,
861 const int expected_disp_unit
862)
863{
864 if (!UPstream::parRun())
865 {
866 // Nothing to do
867 return {nullptr, 0};
868 }
869
870 MPI_Win win = PstreamUtils::Cast::to_mpi(window);
871
872 if (FOAM_UNLIKELY(MPI_WIN_NULL == win))
873 {
874 FatalError("MPI_Win_shared_query()")
875 << "Called with MPI_WIN_NULL."
877 return {nullptr, 0};
878 }
879
880 // Fail if window is not shared
881 const bool shared = window.is_shared(true);
882
883 if (!shared)
884 {
885 return {nullptr, 0};
886 }
887
888 // Initial values and fallback
889 MPI_Aint num_bytes = 0;
890 void *baseptr = nullptr;
891 int disp_unit = 1;
892
893 int returnCode = MPI_Win_shared_query
894 (
895 win,
896 target_rank,
897 &num_bytes,
898 &disp_unit,
899 &baseptr
900 );
901
902 if (FOAM_UNLIKELY(MPI_SUCCESS != returnCode))
903 {
904 FatalError("MPI_Win_shared_query()")
906 return {nullptr, 0};
907 }
908
909 std::pair<void*,int64_t> result(baseptr, num_bytes);
910
911 // Scale count by the expected displacement unit
912 // - probably Fatal not to supply this value
913 //
914 // Note that with share the baseptr will be non-null even if the
915 // local window has zero bytes. This maintains the contiguous
916 // addressing across all ranks
917
918 if (result.second && expected_disp_unit)
919 {
920 result.second /= expected_disp_unit;
921
922 // Error if the expected disp_unit is incorrect
923 // - ignore this check if the window is empty
924
925 if (expected_disp_unit != disp_unit)
926 {
928 << "Window on rank(" << target_rank
929 << ") [size=" << result.second
930 << "] created with Type size=" << disp_unit
931 << " but expecting Type size=" << expected_disp_unit << endl
933 }
934 }
935
936 return result;
937}
938
939
940#undef CheckFail_Win_get_attr
941
942
943// ************************************************************************* //
An opaque wrapper for MPI_Win with a vendor-independent representation and without any <mpi....
bool mpi_fetch_and_op(const UPstream::opCodes opCodeId, const void *origin, void *result, const UPstream::dataTypes dataTypeId, int target_rank, int target_disp=0) const
Retrieve the remote content (a single value) and then combine in new content.
static std::pair< void *, int64_t > mpi_win_query(UPstream::Window window, const int expected_disp_unit)
Retrieve window sizing information as address/count tuple. The expected sizeof(Type) is supplied as a...
Window() noexcept
Default construct as MPI_WIN_NULL.
void reset() noexcept
Reset to default constructed value (MPI_WIN_NULL).
std::pair< void *, int64_t > mpi_win_allocate(std::streamsize num_elements, int disp_unit, UPstream::Communicator communicator, const bool shared=false)
Allocate a local or shared memory window. Uses MPI_Win_allocate() or MPI_Win_allocate_shared(),...
bool get_data(void *origin, std::streamsize count, const UPstream::dataTypes dataTypeId, int target_rank, int target_disp=0) const
Get buffer contents from given rank.
bool put_data(const void *origin, std::streamsize count, const UPstream::dataTypes dataTypeId, int target_rank, int target_disp=0) const
Put buffer contents to given rank.
void mpi_win_unlocking(int rank)
Entry point to MPI_Win_unlock(), MPI_Win_unlock_all().
bool good() const noexcept
True if not equal to MPI_WIN_NULL.
void close()
MPI_Win_free(). Closes the window view and frees any associated memory,.
void mpi_win_locking(int rank, bool exclusive=false)
Entry point to MPI_Win_lock(), MPI_Win_lock_all(), optionally as exclusive lock.
bool mpi_win_create(void *baseptr, std::streamsize num_elements, int disp_unit, UPstream::Communicator communicator)
Create window onto existing memory with MPI_Win_create().
static std::pair< void *, int64_t > mpi_win_query_shared(UPstream::Window window, int target_rank, const int expected_disp_unit)
Retrieve shared window information as address/count tuple. The expected sizeof(Type) is supplied as a...
void sync()
MPI_Win_sync() - ignored if the window is not active.
bool is_shared(const bool failNonShared=false) const
Test if the window is a shared memory window.
void mpi_win_flushing(int rank, bool local=false)
Entry point to MPI_Win_flush(), MPI_Win_flush_all(), MPI_Win_flush_local(), MPI_Win_flush_local_all()...
int size() const
The number of ranks associated with the window group.
Wrapper for internally indexed communicator label. Always invokes UPstream::allocateCommunicatorCompo...
Definition UPstream.H:2546
Inter-processor communications stream.
Definition UPstream.H:69
static bool parRun(const bool on) noexcept
Set as parallel run on/off.
Definition UPstream.H:1669
opCodes
Mapping of some MPI op codes.
Definition UPstream.H:149
dataTypes
Mapping of some fundamental and aggregate types to MPI data types.
Definition UPstream.H:107
static bool & parRun() noexcept
Test if this a parallel run.
Definition UPstream.H:1681
bool local
Definition EEqn.H:20
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition error.H:600
static std::pair< void *, int64_t > call_window_allocate(Foam::UPstream::Window *self, MPI_Comm communicator, std::streamsize num_elements, const int disp_unit, const bool shared)
#define CheckFail_Win_get_attr(returnCode, flag, attribute)
static bool call_window_create(Foam::UPstream::Window *self, MPI_Comm communicator, void *baseptr, std::streamsize num_elements, const int disp_unit)
MPI_Datatype getDataType(UPstream::dataTypes id)
Lookup of dataTypes enumeration as an MPI_Datatype.
MPI_Op getOpCode(UPstream::opCodes id)
Lookup of opCodes enumeration as an MPI_Op.
DynamicList< MPI_Comm > MPICommunicators_
constexpr const char *const group
Group name for atomic constants.
Namespace for OpenFOAM.
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition Ostream.H:519
errorManip< error > abort(error &err)
Definition errorManip.H:139
const direction noexcept
Definition scalarImpl.H:265
error FatalError
Error stream (stdout output on all processes), with additional 'FOAM FATAL ERROR' header text and sta...
constexpr char nl
The newline '\n' character (0x0a).
Definition Ostream.H:50
#define FOAM_UNLIKELY(cond)
Definition stdFoam.H:64
static Type to_mpi(UPstream::Communicator arg) noexcept
Cast UPstream::Communicator to MPI_Comm.