Loading...
Searching...
No Matches
cyclicAMIGAMGInterface.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2011-2016 OpenFOAM Foundation
9 Copyright (C) 2019-2025 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27\*---------------------------------------------------------------------------*/
28
29#include "AMIInterpolation.H"
32#include "Map.H"
33
34// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
35
36namespace Foam
37{
40 (
44 );
46 (
50 );
51}
52
53
54// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
55
56Foam::cyclicAMIGAMGInterface::cyclicAMIGAMGInterface
57(
58 const label index,
59 const lduInterfacePtrsList& coarseInterfaces,
60 const lduInterface& fineInterface,
61 const labelField& localRestrictAddressing,
62 const labelField& neighbourRestrictAddressing,
63 const label fineLevelIndex,
64 const label coarseComm
65)
66:
68 (
69 index,
70 coarseInterfaces
71 ),
72 neighbPatchID_
73 (
74 refCast<const cyclicAMILduInterface>(fineInterface).neighbPatchID()
75 ),
76 owner_
77 (
78 refCast<const cyclicAMILduInterface>(fineInterface).owner()
79 ),
80 forwardT_
81 (
82 refCast<const cyclicAMILduInterface>(fineInterface).forwardT()
83 ),
84 reverseT_
85 (
86 refCast<const cyclicAMILduInterface>(fineInterface).reverseT()
87 ),
88 myProcNo_(-1)
89{
90 const auto& fineCyclicAMIInterface =
92
93 // Construct face agglomeration from cell agglomeration
94 {
95 // From coarse face to cell
96 DynamicList<label> dynFaceCells(localRestrictAddressing.size());
97
98 // From face to coarse face
99 DynamicList<label> dynFaceRestrictAddressing
100 (
101 localRestrictAddressing.size()
102 );
103
104 Map<label> masterToCoarseFace(localRestrictAddressing.size());
105
106 for (const label curMaster : localRestrictAddressing)
107 {
108 const auto iter = masterToCoarseFace.cfind(curMaster);
109
110 if (iter.good())
111 {
112 // Already have coarse face
113 dynFaceRestrictAddressing.append(iter.val());
114 }
115 else
116 {
117 // New coarse face
118 const label coarseI = dynFaceCells.size();
119 dynFaceRestrictAddressing.append(coarseI);
120 dynFaceCells.append(curMaster);
121 masterToCoarseFace.insert(curMaster, coarseI);
122 }
123 }
124
125 faceCells_.transfer(dynFaceCells);
126 faceRestrictAddressing_.transfer(dynFaceRestrictAddressing);
127 }
128
129
130 // On the owner side construct the AMI
131
132 if (fineCyclicAMIInterface.owner())
133 {
134 // Construct the neighbour side agglomeration (as the neighbour would
135 // do it so it the exact loop above using neighbourRestrictAddressing
136 // instead of localRestrictAddressing)
137
138 labelList nbrFaceRestrictAddressing;
139 {
140 // From face to coarse face
141 DynamicList<label> dynNbrFaceRestrictAddressing
142 (
143 neighbourRestrictAddressing.size()
144 );
145
146 Map<label> masterToCoarseFace(neighbourRestrictAddressing.size());
147
148 for (const label curMaster : neighbourRestrictAddressing)
149 {
150 const auto iter = masterToCoarseFace.cfind(curMaster);
151
152 if (iter.good())
153 {
154 // Already have coarse face
155 dynNbrFaceRestrictAddressing.append(iter.val());
156 }
157 else
158 {
159 // New coarse face
160 const label coarseI = masterToCoarseFace.size();
161 dynNbrFaceRestrictAddressing.append(coarseI);
162 masterToCoarseFace.insert(curMaster, coarseI);
163 }
164 }
165
166 nbrFaceRestrictAddressing.transfer(dynNbrFaceRestrictAddressing);
167 }
168
169
170 amiPtr_.reset
171 (
173 (
174 fineCyclicAMIInterface.AMI(),
176 nbrFaceRestrictAddressing
177 )
178 );
179
180
181 const auto& AMI = amiPtr_();
182
183 if (debug & 2 && AMI.comm() != -1)
184 {
185 const auto oldWarnComm = UPstream::commWarn(AMI.comm());
186
187 const label myRank = UPstream::myProcNo(AMI.comm());
188 Pout<< "At level:" << fineLevelIndex
189 << " agglomerating from ownsize:"
190 << fineInterface.faceCells().size()
191 << " nbrSize:" << neighbourRestrictAddressing.size()
192 << " down to ownsize:" << AMI.srcAddress().size()
193 << " nbrsize:" << AMI.tgtAddress().size()
194 << " Patch:" << index << " comm:" << AMI.comm()
195 << " nProcs:" << UPstream::nProcs(AMI.comm())
196 << " myRank:" << myRank << " agglomerated AMI:"
197 << endl;
198
199 const label nbrSize = AMI.tgtAddress().size();
200 // From from nbr to owner side
201 {
202 Pout<< "From nbr:" << nbrSize << " to owner:" << this->size()
203 << endl;
204
205 const auto& addresses = AMI.srcAddress();
206 const auto& weights = AMI.srcWeights();
207
208 labelList globalIDs(identity(nbrSize));
209 if (AMI.distributed() && AMI.comm() != -1)
210 {
211 const auto& map = AMI.tgtMap();
212 forAll(map.subMap(), proci)
213 {
214 Pout<< " TGTMap: sending to rank:" << proci
215 << " elements:" << flatOutput(map.subMap()[proci])
216 << endl;
217 }
218 forAll(map.constructMap(), proci)
219 {
220 Pout<< " TGTMap: receiving from rank:" << proci
221 << " elements:"
222 << flatOutput(map.constructMap()[proci])
223 << endl;
224 }
225 // Fetch remote global IDs
226 const globalIndex globalFaces(nbrSize, AMI.comm());
227 Pout<< " localNbrSize:" << nbrSize
228 << " globalSize:" << globalFaces.totalSize() << endl;
229
230 //const label myOffset = globalFaces.offsets()[myRank];
231 for (label& id : globalIDs)
232 {
233 id = globalFaces.toGlobal(myRank, id);
234 }
235 map.distribute(globalIDs);
236 }
237
238 // Renumber my slots so they are now global face numbers
239 forAll(addresses, facei)
240 {
241 Pout<< " source face:" << facei
242 << " have weights:"
243 << flatOutput(weights[facei])
244 << " from slots:" << flatOutput(addresses[facei])
245 << " from global tgt faces:"
246 << UIndirectList<label>(globalIDs, addresses[facei])
247 << endl;
248 }
249 }
250 // From from owner to nbr side
251 {
252 Pout<< "From owner:" << this->size() << " to nbr:" << nbrSize
253 << endl;
254
255 const auto& addresses = AMI.tgtAddress();
256 const auto& weights = AMI.tgtWeights();
257
258 labelList globalIDs(identity(this->size()));
259 if (AMI.distributed() && AMI.comm() != -1)
260 {
261 const auto& map = AMI.srcMap();
262 forAll(map.subMap(), proci)
263 {
264 Pout<< " SRCMap: sending to rank:" << proci
265 << " elements:" << flatOutput(map.subMap()[proci])
266 << endl;
267 }
268 forAll(map.constructMap(), proci)
269 {
270 Pout<< " SRCMap: receiving from rank:" << proci
271 << " elements:"
272 << flatOutput(map.constructMap()[proci])
273 << endl;
274 }
275 // Fetch remote global IDs
276 const globalIndex globalFaces(this->size(), AMI.comm());
277 Pout<< " localSize:" << this->size()
278 << " globalSize:" << globalFaces.totalSize() << endl;
279
280 for (label& id : globalIDs)
281 {
282 id = globalFaces.toGlobal(myRank, id);
283 }
284 map.distribute(globalIDs);
285 }
286
287 // Renumber my slots so they are now global face numbers
288 forAll(addresses, facei)
289 {
290 Pout<< " target face:" << facei
291 << " have weights:"
292 << flatOutput(weights[facei])
293 << " from slots:" << flatOutput(addresses[facei])
294 << " from global src faces:"
295 << UIndirectList<label>(globalIDs, addresses[facei])
296 << endl;
297 }
298 }
299 Pout<< "DONE agglomerating at level:" << fineLevelIndex << endl;
301 UPstream::commWarn(oldWarnComm);
302 }
303 }
304}
305
306
307Foam::cyclicAMIGAMGInterface::cyclicAMIGAMGInterface
308(
309 const label index,
310 const lduInterfacePtrsList& coarseInterfaces,
311 Istream& is
312)
313:
314 GAMGInterface(index, coarseInterfaces, is),
315 neighbPatchID_(readLabel(is)),
316 owner_(readBool(is)),
317 forwardT_(is),
318 reverseT_(is),
319 myProcNo_(-1)
320{
321 const bool hasAMI(readBool(is));
322
323 if (hasAMI)
324 {
325 amiPtr_.reset(new AMIPatchToPatchInterpolation(is));
326
327 // Store originating ranks locally - used when processor agglomerating
328 // onto a processor that wasn't in the communicator originally (since
329 // it had no faces)
330 const label comm = AMI().comm();
331
332 if (comm != -1)
334 is >> myProcNo_;
335 }
336 }
337}
338
339
340Foam::cyclicAMIGAMGInterface::cyclicAMIGAMGInterface
341(
342 const label index,
343 const lduInterfacePtrsList& coarseInterfaces,
344 const lduInterface& fineInterface,
345 const labelList& interfaceMap,
346 const labelUList& faceCells,
347 const labelUList& faceRestrictAddresssing,
348 const labelUList& faceOffsets,
349 const lduInterfacePtrsList& allInterfaces,
350 const label coarseComm,
351 const label myProcNo,
352 const labelList& procAgglomMap
353)
354:
356 (
357 index,
358 coarseInterfaces,
359 faceCells,
360 faceRestrictAddresssing
361 ),
362 neighbPatchID_
363 (
364 interfaceMap.find
365 (
366 refCast
367 <
369 >(fineInterface).neighbPatchID()
370 )
371 ),
372 owner_
373 (
374 refCast<const cyclicAMILduInterface>(fineInterface).owner()
375 ),
376 forwardT_
377 (
378 refCast<const cyclicAMILduInterface>(fineInterface).forwardT()
379 ),
380 reverseT_
381 (
382 refCast<const cyclicAMILduInterface>(fineInterface).reverseT()
383 ),
384 myProcNo_(-1)
385{
386 if (!owner_)
387 {
388 return;
389 }
390
391
392 // Get stats, sizes from the input interfaces. For the global settings
393 // the problem is that the
394 // local processor might not have any valid interfaces so here just
395 // collect and do a global reduction afterwards.
396
397 // Structure to pack all. First element is used to decide who has the
398 // valid AMI.
399 typedef
400 Tuple2
401 <
402 label,
403 Tuple2
404 <
405 Tuple2
406 <
407 FixedList<bool, 4>,
408 scalar
409 >,
410 label
411 >
412 > AMIType;
413
414 AMIType globalInfo;
415 FixedList<bool, 4>& bools = globalInfo.second().first().first();
416
417 // Define aliases to make our life easier
418 label& firstValidAMI = globalInfo.first();
419 bool& requireMatch = bools[0];
420 bool& reverseTarget = bools[1];
421 bool& srcHasFlip = bools[2];
422 bool& tgtHasFlip = bools[3];
423 scalar& lowWeightCorrection = globalInfo.second().first().second();
424 label& singlePatchProc = globalInfo.second().second();
425
426 // Initialise all global variables
427 firstValidAMI = labelMax;
428 requireMatch = false;
429 reverseTarget = false;
430 srcHasFlip = false;
431 tgtHasFlip = false;
432 lowWeightCorrection = -1;
433 singlePatchProc = -1;
434
435 // Initialise all local variables
436 bool hasSrcMagSf = false;
437 bool hasSrcCentroids = false;
438 bool hasTgtMagSf = false;
439 label nSrc = 0;
440 label nTgt = 0;
441
442 forAll(allInterfaces, inti)
443 {
444 if (allInterfaces.set(inti))
445 {
446 const auto& intf =
447 refCast<const cyclicAMIGAMGInterface>(allInterfaces[inti]);
448
449 if (!intf.amiPtr_)
450 {
451 continue;
452 }
453
454 if (firstValidAMI == labelMax)
455 {
456 firstValidAMI = inti;
457 }
458
459 const auto& AMI = intf.AMI();
460
461 // Note:
462 // - AMI.singlePatchProc or AMI.distributed : global properties.
463 // Should be already parallel synced.
464 // - AMI.comm() : -1 on processors that are not involved
465 // - AMI.srcMap(),tgtMap() : not valid if AMI.comm() is -1
466
467 if (!AMI.distributed())
468 {
469 singlePatchProc = AMI.singlePatchProc();
470 }
471 else if (AMI.comm() != -1)
472 {
473 singlePatchProc = -1;
474 srcHasFlip =
475 srcHasFlip || AMI.srcMap().constructHasFlip();
476 tgtHasFlip =
477 tgtHasFlip || AMI.tgtMap().constructHasFlip();
478 }
479 requireMatch = AMI.requireMatch();
480 reverseTarget = AMI.reverseTarget();
481 lowWeightCorrection = AMI.lowWeightCorrection();
482
483 nSrc += AMI.srcAddress().size();
484 nTgt += AMI.tgtAddress().size();
485
486 if (AMI.srcMagSf().size())
487 {
488 hasSrcMagSf = true;
489 if (AMI.srcMagSf().size() != AMI.srcAddress().size())
490 {
492 << "srcMagSf size:" << AMI.srcMagSf().size()
493 << "srcAddress size:" << AMI.srcAddress().size()
494 << exit(FatalError);
495 }
496 }
497 if (AMI.srcCentroids().size())
498 {
499 hasSrcCentroids = true;
500 if (AMI.srcCentroids().size() != AMI.srcAddress().size())
501 {
503 << "srcCentroids size:" << AMI.srcCentroids().size()
504 << "srcAddress size:" << AMI.srcAddress().size()
505 << exit(FatalError);
506 }
507 }
508 if (AMI.tgtMagSf().size())
509 {
510 hasTgtMagSf = true;
511 if (AMI.tgtMagSf().size() != AMI.tgtAddress().size())
512 {
514 << "tgtMagSf size:" << AMI.tgtMagSf().size()
515 << "tgtAddress size:" << AMI.tgtAddress().size()
516 << exit(FatalError);
517 }
518 }
519 }
520 }
521
522
523 // Reduce global information in case one of the coarse ranks does not
524 // have an input AMI to get data from. Could use minFirstEqOp from Tuple2
525 // instead ...
527 (
528 globalInfo,
529 [](AMIType& x, const AMIType& y)
530 {
531 if (y.first() < x.first())
532 {
533 x = y;
534 }
535 },
537 coarseComm
538 );
539
541 << "Input amis :"
542 << " singlePatchProc:" << singlePatchProc
543 << " srcHasFlip:" << srcHasFlip
544 << " tgtHasFlip:" << tgtHasFlip
545 << " requireMatch:" << requireMatch
546 << " reverseTarget:" << reverseTarget
547 << " lowWeightCorrection:" << lowWeightCorrection
548 << " hasSrcMagSf:" << hasSrcMagSf
549 << " hasSrcCentroids:" << hasSrcCentroids
550 << " hasTgtMagSf:" << hasTgtMagSf
551 << " nSrc:" << nSrc
552 << " nTgt:" << nTgt
553 << endl;
554
555
556 labelListList srcAddress;
557 scalarListList srcWeights;
558 scalarList srcMagSf;
559 // Needed?
560 pointListList srcCentroids;
561
562 labelListList tgtAddress;
563 scalarListList tgtWeights;
564 scalarList tgtMagSf;
565
566
567 // Map to send src side data to tgt side
568 autoPtr<mapDistribute> srcToTgtMap;
569
570 // Map to send tgt side data to src side
571 autoPtr<mapDistribute> tgtToSrcMap;
572
573 if (singlePatchProc == -1)
574 {
575 // Find ranks that agglomerate together
576 const label myAgglom = UPstream::myProcNo(coarseComm);
577
578 // Per input map either -1 or the index in the maps that is local
579 // data.
580 labelList localRanks(allInterfaces.size(), -1);
581 // From rank in coarse communicator back to rank in original (fine)
582 // communicator.
583 labelListList newToOldRanks;
584 {
585 // Pass 1: count number of valid maps
586 label nOldRanks = 0;
587 forAll(allInterfaces, inti)
588 {
589 if (allInterfaces.set(inti))
590 {
592 (
593 allInterfaces[inti]
594 );
595
596 if (!intf.amiPtr_ || intf.AMI().comm() == -1)
597 {
598 continue;
599 }
600 nOldRanks++;
601 }
602 }
603
604 // Pass 2: collect
605 DynamicList<label> oldRanks(nOldRanks);
606 forAll(allInterfaces, inti)
607 {
608 if (allInterfaces.set(inti))
609 {
611 (
612 allInterfaces[inti]
613 );
614
615 if (!intf.amiPtr_ || intf.AMI().comm() == -1)
616 {
617 continue;
618 }
619
620 label fineRank = -1;
621 if (intf.myProcNo() == -1)
622 {
623 // The interface was already local so got never
624 // sent across so myProcNo_ is never set ...
625 fineRank = UPstream::myProcNo(intf.AMI().comm());
626 }
627 else
628 {
629 fineRank = intf.myProcNo();
630 }
631
632 oldRanks.append(fineRank);
633 localRanks[inti] = fineRank;
634 }
635 }
636
637 // Pull individual parts together - this is the only communication
638 // needed.
639 newToOldRanks = Pstream::listGatherValues
640 (
641 labelList(std::move(oldRanks)),
642 coarseComm
643 );
644 Pstream::broadcast(newToOldRanks, coarseComm);
645 }
646
647
648 // Create combined maps
649 UPtrList<const mapDistribute> srcMaps(allInterfaces.size());
650 UPtrList<const mapDistribute> tgtMaps(allInterfaces.size());
651 forAll(allInterfaces, inti)
652 {
653 if (allInterfaces.set(inti))
654 {
656 (
657 allInterfaces[inti]
658 );
659
660 if (!intf.amiPtr_)
661 {
662 // Should not be in allInterfaces?
663 continue;
664 }
665
666 const auto& AMI = intf.AMI();
667
668 if (AMI.comm() != -1)
669 {
670 srcMaps.set(inti, &AMI.srcMap());
671 tgtMaps.set(inti, &AMI.tgtMap());
672 }
673 }
674 }
675
676
677 // Offsets for slots into results of srcToTgtMap
678 labelList srcStartOfLocal;
679 List<Map<label>> srcCompactMaps;
680
681 srcToTgtMap.reset
682 (
683 new mapDistribute
684 (
685 srcMaps,
686 localRanks, // per src map which rank represents local data
687 coarseComm,
688 newToOldRanks, // destination rank to source ranks
689 srcStartOfLocal,
690 srcCompactMaps
691 )
692 );
693
694
695 // Assemble tgtAddress
696 tgtAddress.setSize(nTgt);
697 if (tgtAddress.size())
698 {
699 label alli = 0;
700 forAll(allInterfaces, inti)
701 {
702 if (allInterfaces.set(inti))
703 {
705 (
706 allInterfaces[inti]
707 );
708
709 if (!intf.amiPtr_)
710 {
711 continue;
712 }
713
714 const auto& AMI = intf.AMI();
715 const auto& tgtSlots = AMI.tgtAddress();
716 const label localSize =
717 srcStartOfLocal[inti+1]
718 - srcStartOfLocal[inti];
719
720 forAll(tgtSlots, tgti)
721 {
722 // Append old slots: copy old values and adapt
723 auto& newSlots = tgtAddress[alli++];
724 newSlots = tgtSlots[tgti];
725
726 // Renumber to new indices
728 (
729 newSlots,
730 localSize,
731 srcStartOfLocal[inti],
732 srcCompactMaps[inti],
733 srcHasFlip //hasFlip
734 );
735
736 for (const label slot : newSlots)
737 {
738 if
739 (
740 slot < 0
741 || slot >= srcToTgtMap().constructSize()
742 )
743 {
744 FatalErrorInFunction << " newSlots:"
745 << newSlots << exit(FatalError);
746 }
747 }
748 }
749 }
750 }
751
752 if (nTgt != alli)
753 {
754 FatalErrorInFunction << "nTgt:" << nTgt
755 << " alli:" << alli << exit(FatalError);
756 }
757 }
758
759 // Offsets for slots into results of tgtToSrcMap
760 labelList tgtStartOfLocal;
761 List<Map<label>> tgtCompactMaps;
762
763 tgtToSrcMap.reset
764 (
765 new mapDistribute
766 (
767 tgtMaps,
768 localRanks,
769 coarseComm,
770 newToOldRanks,
771 tgtStartOfLocal,
772 tgtCompactMaps
773 )
774 );
775
776
777 // Assemble srcAddress
778 srcAddress.setSize(nSrc);
779 if (srcAddress.size())
780 {
781 label alli = 0;
782 forAll(allInterfaces, inti)
783 {
784 if (allInterfaces.set(inti))
785 {
787 (
788 allInterfaces[inti]
789 );
790
791 if (!intf.amiPtr_)
792 {
793 continue;
794 }
795
796 const auto& AMI = intf.AMI();
797 const auto& srcSlots = AMI.srcAddress();
798 const label localSize =
799 tgtStartOfLocal[inti+1]
800 - tgtStartOfLocal[inti];
801
802 forAll(srcSlots, srci)
803 {
804 // Append old slots: copy old values and adapt
805 auto& newSlots = srcAddress[alli++];
806 newSlots = srcSlots[srci];
807 // Renumber to new indices
809 (
810 newSlots,
811 localSize,
812 tgtStartOfLocal[inti],
813 tgtCompactMaps[inti],
814 tgtHasFlip
815 );
816
817 for (const label slot : newSlots)
818 {
819 if
820 (
821 slot < 0
822 || slot >= tgtToSrcMap().constructSize()
823 )
824 {
825 FatalErrorInFunction << " newSlots:"
826 << newSlots << exit(FatalError);
827 }
828 }
829 }
830 }
831 }
832
833 if (nSrc != alli)
834 {
835 FatalErrorInFunction << "nSrc:" << nSrc
836 << " alli:" << alli << exit(FatalError);
837 }
838 }
839
840
841 // Clean up: if no remote elements sent/received mark as
842 // non-distributed. We could do this at the start but this
843 // needs to take all the internal transport into account. Easier
844 // (but less efficient) to do afterwards now all is compacted.
845 {
846 const auto& map = srcToTgtMap().subMap();
847
848 bool usesRemote = false;
849 forAll(map, proci)
850 {
851 if (proci != myAgglom)
852 {
853 const auto& ss = srcToTgtMap().subMap()[proci];
854 const auto& sc = srcToTgtMap().constructMap()[proci];
855 const auto& ts = tgtToSrcMap().subMap()[proci];
856 const auto& tc = tgtToSrcMap().constructMap()[proci];
857
858 if (ss.size() || sc.size() || ts.size() || tc.size())
859 {
860 usesRemote = true;
861 break;
862 }
863 }
864 }
865
866 // We can't have a single rank become fully-local since we
867 // expect singlePatchProc to be synchronised. So make sure all
868 // have become local
869
870 if (!returnReduceOr(usesRemote, coarseComm))
871 {
872 DebugPout<< "** making fully local on new rank "
873 << myAgglom << " in comm:" << coarseComm << endl;
874 singlePatchProc = myAgglom;
875 srcToTgtMap.clear();
876 tgtToSrcMap.clear();
877 }
878 }
879 }
880 else
881 {
882 // src/tgt address are straight indices
883
884 srcAddress.setSize(nSrc);
885 tgtAddress.setSize(nTgt);
886
887 nSrc = 0;
888 nTgt = 0;
889 forAll(allInterfaces, inti)
890 {
891 if (allInterfaces.set(inti))
892 {
894 (
895 allInterfaces[inti]
896 );
897
898 if (!intf.amiPtr_)
899 {
900 continue;
901 }
902
903 const auto& AMI = intf.AMI();
904
905 const auto& srcA = AMI.srcAddress();
906 if (srcAddress.size())
907 {
908 label srci = nSrc;
909 forAll(srcA, i)
910 {
911 srcAddress[srci++] = srcA[i]+nTgt;
912 }
913 }
914
915 const auto& tgtA = AMI.tgtAddress();
916 if (tgtAddress.size())
917 {
918 label tgti = nTgt;
919 forAll(tgtA, i)
920 {
921 tgtAddress[tgti++] = tgtA[i]+nSrc;
922 }
923 }
924
925 nSrc += srcA.size();
926 nTgt += tgtA.size();
927 }
928 }
929 }
930
931 srcWeights.setSize(nSrc);
932 if (hasSrcMagSf)
933 {
934 srcMagSf.setSize(nSrc);
935 }
936 if (hasSrcCentroids)
937 {
938 srcCentroids.setSize(nSrc);
939 }
940 tgtWeights.setSize(nTgt);
941 if (hasTgtMagSf)
942 {
943 tgtMagSf.setSize(nTgt);
944 }
945
946
947 // Append individual data
948 nSrc = 0;
949 nTgt = 0;
950 forAll(allInterfaces, inti)
951 {
952 if (allInterfaces.set(inti))
953 {
955 (
956 allInterfaces[inti]
957 );
958
959 if (!intf.amiPtr_)
960 {
961 continue;
962 }
963
964 const auto& AMI = intf.AMI();
965
966 const auto& srcA = AMI.srcAddress();
967 {
968 // weights
969 SubList<scalarList>(srcWeights, srcA.size(), nSrc) =
970 AMI.srcWeights();
971
972 // magSf
973 if (hasSrcMagSf)
974 {
975 SubList<scalar>(srcMagSf, srcA.size(), nSrc) =
976 AMI.srcMagSf();
977 }
978
979 // centroids
980 if (hasSrcCentroids)
981 {
982 SubList<pointList>(srcCentroids, srcA.size(), nSrc) =
983 AMI.srcCentroids();
984 }
985 }
986
987 const auto& tgtA = AMI.tgtAddress();
988 {
989 // weights
990 SubList<scalarList>(tgtWeights, tgtA.size(), nTgt) =
991 AMI.tgtWeights();
992
993 if (hasTgtMagSf)
994 {
995 SubList<scalar>(tgtMagSf, tgtA.size(), nTgt) =
996 AMI.tgtMagSf();
997 }
998 }
999
1000 nSrc += srcA.size();
1001 nTgt += tgtA.size();
1002 }
1003 }
1004
1005
1006 // Construct with same arguments as original
1007 amiPtr_.reset
1008 (
1010 (
1011 requireMatch,
1012 reverseTarget,
1013 lowWeightCorrection
1014 )
1015 );
1016 amiPtr_().comm(coarseComm),
1017 amiPtr_().reset
1018 (
1019 std::move(srcToTgtMap),
1020 std::move(tgtToSrcMap),
1021 std::move(srcAddress),
1022 std::move(srcWeights),
1023 std::move(tgtAddress),
1024 std::move(tgtWeights),
1025 singlePatchProc
1026 );
1027 amiPtr_().srcMagSf() = std::move(srcMagSf);
1028 amiPtr_().srcCentroids() = std::move(srcCentroids);
1029 amiPtr_().tgtMagSf() = std::move(tgtMagSf);
1030
1031
1032 if (debug & 2)
1033 {
1034 const auto& AMI = amiPtr_();
1035
1036 const auto oldWarnComm = UPstream::commWarn(AMI.comm());
1037
1038 const label myRank = UPstream::myProcNo(AMI.comm());
1039 Pout<< "PROCAGGLOMERATED :"
1040 << " Patch:" << index << " comm:" << AMI.comm()
1041 << " nProcs:" << UPstream::nProcs(AMI.comm())
1042 << " myRank:" << myRank << " agglomerated AMI:"
1043 << endl;
1044
1045 const label nbrSize = AMI.tgtAddress().size();
1046 // From from nbr to owner side
1047 {
1048 Pout<< "From nbr:" << nbrSize << " to owner:" << this->size()
1049 << endl;
1050
1051 const auto& addresses = AMI.srcAddress();
1052 const auto& weights = AMI.srcWeights();
1053
1054 labelList globalIDs(identity(nbrSize));
1055 if (AMI.distributed() && AMI.comm() != -1)
1056 {
1057 const auto& map = AMI.tgtMap();
1058 forAll(map.subMap(), proci)
1059 {
1060 Pout<< " TGTMap: sending to rank:" << proci
1061 << " elements:" << flatOutput(map.subMap()[proci])
1062 << endl;
1063 }
1064 forAll(map.constructMap(), proci)
1065 {
1066 Pout<< " TGTMap: receiving from rank:" << proci
1067 << " elements:"
1068 << flatOutput(map.constructMap()[proci])
1069 << endl;
1070 }
1071
1072 // Fetch remote global IDs
1073 const globalIndex globalFaces(nbrSize, AMI.comm());
1074 Pout<< " localNbrSize:" << nbrSize
1075 << " globalSize:" << globalFaces.totalSize() << endl;
1076 for (label& id : globalIDs)
1077 {
1078 id = globalFaces.toGlobal(myRank, id);
1079 }
1080 map.distribute(globalIDs);
1081 }
1082
1083 // Renumber my slots so they are now global face numbers
1084 forAll(addresses, facei)
1085 {
1086 Pout<< " source face:" << facei
1087 << " have weights:"
1088 << flatOutput(weights[facei])
1089 << " from slots:" << flatOutput(addresses[facei])
1090 << " from global tgt faces:"
1091 << UIndirectList<label>(globalIDs, addresses[facei])
1092 << endl;
1093 }
1094 UPstream::commWarn(oldWarnComm);
1095 }
1096 // From from owner to nbr side
1097 {
1098 Pout<< "From owner:" << this->size() << " to nbr:" << nbrSize
1099 << endl;
1100
1101 const auto& addresses = AMI.tgtAddress();
1102 const auto& weights = AMI.tgtWeights();
1103
1104 labelList globalIDs(identity(this->size()));
1105 if (AMI.distributed() && AMI.comm() != -1)
1106 {
1107 const auto& map = AMI.srcMap();
1108 forAll(map.subMap(), proci)
1109 {
1110 Pout<< " SRCMap: sending to rank:" << proci
1111 << " elements:" << flatOutput(map.subMap()[proci])
1112 << endl;
1113 }
1114 forAll(map.constructMap(), proci)
1115 {
1116 Pout<< " SRCMap: receiving from rank:" << proci
1117 << " elements:"
1118 << flatOutput(map.constructMap()[proci])
1119 << endl;
1120 }
1121
1122 // Fetch remote global IDs
1123 const globalIndex globalFaces(this->size(), AMI.comm());
1124 Pout<< " localSize:" << this->size()
1125 << " globalSize:" << globalFaces.totalSize() << endl;
1126 for (label& id : globalIDs)
1127 {
1128 id = globalFaces.toGlobal(myRank, id);
1129 }
1130 map.distribute(globalIDs);
1131 }
1132
1133 // Renumber my slots so they are now global face numbers
1134 forAll(addresses, facei)
1135 {
1136 Pout<< " target face:" << facei
1137 << " have weights:"
1138 << flatOutput(weights[facei])
1139 << " from slots:" << flatOutput(addresses[facei])
1140 << " from global src faces:"
1141 << UIndirectList<label>(globalIDs, addresses[facei])
1142 << endl;
1143 }
1144 }
1145 Pout<< "DONE PROCAGGLOMERATED" << endl;
1147 }
1148}
1149
1150
1151// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
1152
1154(
1155 const Pstream::commsTypes commsType,
1156 const labelUList& iF
1157) const
1158{
1159 const cyclicAMIGAMGInterface& nbr =
1160 dynamic_cast<const cyclicAMIGAMGInterface&>(neighbPatch());
1161 const labelUList& nbrFaceCells = nbr.faceCells();
1162
1163 auto tpnf = tmp<labelField>::New(nbrFaceCells.size());
1164 labelField& pnf = tpnf.ref();
1165
1166 forAll(pnf, facei)
1167 {
1168 pnf[facei] = iF[nbrFaceCells[facei]];
1169 }
1170
1171 return tpnf;
1172}
1173
1174
1175void Foam::cyclicAMIGAMGInterface::write(Ostream& os) const
1176{
1178
1179 const bool hasAMI = bool(amiPtr_);
1180
1181 os << token::SPACE << neighbPatchID_
1182 << token::SPACE << owner_
1183 << token::SPACE << forwardT_
1184 << token::SPACE << reverseT_
1185 << token::SPACE << hasAMI;
1186
1187 if (hasAMI)
1188 {
1189 os << token::SPACE;
1190 AMI().writeData(os);
1191
1192 // Write processors in communicator
1193 const label comm = AMI().comm();
1194
1195 if (comm != -1)
1196 {
1197 os << token::SPACE
1198 << UPstream::myProcNo(comm);
1199 }
1200 }
1201}
1202
1203
1204// ************************************************************************* //
scalar y
Macros for easy insertion into run-time selection tables.
#define addToRunTimeSelectionTable(baseType, thisType, argNames)
Add to construction table with typeName as the key.
label comm() const noexcept
Communicator (local or otherwise) for parallel operations.
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects.
Definition DynamicList.H:68
void append(const T &val)
Copy append an element to the end of this list.
A 1D vector of objects of type <T> with a fixed length <N>.
Definition FixedList.H:73
Abstract base class for GAMG agglomerated interfaces.
virtual label size() const
Return size.
labelList faceRestrictAddressing_
Face restrict addressing.
virtual label index() const
virtual const lduInterfacePtrsList & coarseInterfaces() const
virtual const labelUList & faceCells() const
Return faceCell addressing.
labelList faceCells_
Face-cell addressing.
GAMGInterface(const GAMGInterface &)=delete
No copy construct.
virtual void write(Ostream &) const =0
Write to stream.
const_iterator cfind(const Key &key) const
Find and return an const_iterator set at the hashed entry.
Definition HashTableI.H:113
bool insert(const Key &key, const T &obj)
Copy insert a new entry, not overwriting existing entries.
Definition HashTableI.H:152
label size() const noexcept
The number of elements in table.
Definition HashTable.H:358
An Istream is an abstract base class for all input systems (streams, files, token lists etc)....
Definition Istream.H:60
A 1D array of objects of type <T>, where the size of the vector is known and used for subscript bound...
Definition List.H:72
void transfer(List< T > &list)
Transfer the contents of the argument List into this list and annul the argument list.
Definition List.C:347
void setSize(label n)
Alias for resize().
Definition List.H:536
A HashTable to objects of type <T> with a label key.
Definition Map.H:54
An Ostream is an abstract base class for all output systems (streams, files, token lists,...
Definition Ostream.H:59
static void combineReduce(T &value, CombineOp cop, const int tag=UPstream::msgType(), const int communicator=UPstream::worldComm)
Reduce inplace (cf. MPI Allreduce) applying cop to inplace combine value from different processors.
static List< T > listGatherValues(const T &localValue, const int communicator=UPstream::worldComm, const int tag=UPstream::msgType())
Gather individual values into list locations.
A non-owning sub-view of a List (allocated or unallocated storage).
Definition SubList.H:61
A 2-tuple for storing two objects of dissimilar types. The container is similar in purpose to std::pa...
Definition Tuple2.H:51
A List with indirect addressing. Like IndirectList but does not store addressing.
T & first()
Access first element of the list, position [0].
Definition UList.H:957
void size(const label n)
Older name for setAddressableSize.
Definition UList.H:118
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Negative if the process is not a...
Definition UPstream.H:1706
commsTypes
Communications types.
Definition UPstream.H:81
static int & msgType() noexcept
Message tag of standard messages.
Definition UPstream.H:1926
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator). It is 1 for serial run.
Definition UPstream.H:1697
static label commWarn(const label communicator) noexcept
Alter communicator debugging setting. Warns for use of any communicator differing from specified....
Definition UPstream.H:1122
@ broadcast
broadcast [MPI]
Definition UPstream.H:189
A list of pointers to objects of type <T>, without allocation/deallocation management of the pointers...
Definition UPtrList.H:101
const T * set(const label i) const
Return const pointer to element (can be nullptr), or nullptr for out-of-range access (ie,...
Definition UPtrList.H:366
label size() const noexcept
The number of entries in the list.
Definition UPtrListI.H:106
Pointer management similar to std::unique_ptr, with some additional methods and type checking.
Definition autoPtr.H:65
void clear() noexcept
Same as reset(nullptr).
Definition autoPtr.H:255
void reset(T *p=nullptr) noexcept
Delete managed object and set to new given pointer.
Definition autoPtrI.H:37
GAMG agglomerated cyclic AMI interface.
virtual void write(Ostream &) const
Write to stream.
virtual const tensorField & reverseT() const
Return neighbour-cell transformation tensor.
virtual label neighbPatchID() const
Cyclic interface functions.
virtual const AMIPatchToPatchInterpolation & AMI() const
virtual label myProcNo() const
-1 or old local rank
virtual const cyclicAMIGAMGInterface & neighbPatch() const
Return processor number.
virtual tmp< labelField > internalFieldTransfer(const Pstream::commsTypes commsType, const labelUList &iF) const
Transfer and return internal field adjacent to the interface.
virtual const tensorField & forwardT() const
Return face transformation tensor.
An abstract base class for cyclic AMI coupled interfaces.
cyclicAMILduInterface() noexcept=default
Default construct.
Smooth ATC in cells next to a set of patches supplied by type.
Definition faceCells.H:55
Calculates a non-overlapping list of offsets based on an input size (eg, number of cells) from differ...
Definition globalIndex.H:77
label totalSize() const noexcept
The total addressed size, which corresponds to the end offset and also the sum of all localSizes.
label toGlobal(const label proci, const label i) const
From local to global on proci.
An abstract base class for implicitly-coupled interfaces e.g. processor and cyclic patches.
virtual const labelUList & faceCells() const =0
Return faceCell addressing.
static label renumberMap(labelListList &mapElements, const labelUList &oldToNew, const bool hasFlip)
Helper for renumbering the (compacted) map elements using the supplied old-to-new mapping.
Class containing processor-to-processor mapping information.
A class for managing temporary objects.
Definition tmp.H:75
static tmp< T > New(Args &&... args)
Construct tmp with forwarding arguments.
Definition tmp.H:215
@ SPACE
Space [isspace].
Definition token.H:144
#define defineTypeNameAndDebug(Type, DebugSwitch)
Define the typeName and debug information.
Definition className.H:142
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition error.H:600
OBJstream os(runTime.globalPath()/outputName)
#define DebugPout
Report an information message using Foam::Pout.
List< bool > bools(const labelHashSet &locations)
Transform the on locations to a boolList, with true for each non-negative location and false for all ...
Definition HashOps.C:72
label find(const ListType &input, const UnaryPredicate &pred, const label start=0)
Same as ListOps::find_if.
Definition ListOps.H:855
Namespace for handling debugging switches.
Definition debug.C:45
Namespace for OpenFOAM.
List< scalarList > scalarListList
List of scalarList.
Definition scalarList.H:35
Type & refCast(U &obj)
A dynamic_cast (for references) to Type reference.
Definition typeInfo.H:172
bool returnReduceOr(const bool value, const int communicator=UPstream::worldComm)
Perform logical (or) MPI Allreduce on a copy. Uses UPstream::reduceOr.
List< labelList > labelListList
List of labelList.
Definition labelList.H:38
List< label > labelList
A List of labels.
Definition List.H:62
label readLabel(const char *buf)
Parse entire buffer as a label, skipping leading/trailing whitespace.
Definition label.H:63
UPtrList< const lduInterface > lduInterfacePtrsList
Store lists of lduInterface as a UPtrList.
constexpr label labelMax
Definition label.H:55
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition Ostream.H:519
FlatOutput::OutputAdaptor< Container, Delimiters > flatOutput(const Container &obj, Delimiters delim)
Global flatOutput() function with specified output delimiters.
Definition FlatOutput.H:217
labelList identity(const label len, label start=0)
Return an identity map of the given length with (map[i] == i), works like std::iota() but returning a...
AMIInterpolation AMIPatchToPatchInterpolation
Patch-to-patch interpolation == Foam::AMIInterpolation.
Field< label > labelField
Specialisation of Field<T> for label.
Definition labelField.H:48
error FatalError
Error stream (stdout output on all processes), with additional 'FOAM FATAL ERROR' header text and sta...
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition errorManip.H:125
UList< label > labelUList
A UList of labels.
Definition UList.H:75
List< scalar > scalarList
List of scalar.
Definition scalarList.H:32
List< pointList > pointListList
List of pointList.
Definition pointList.H:35
bool readBool(Istream &is)
Read bool from stream using Foam::Switch(Istream&).
Definition bool.C:72
#define forAll(list, i)
Loop across all elements in list.
Definition stdFoam.H:299