FEAT 3
Finite Element Analysis Toolbox
Loading...
Searching...
No Matches
graph.hpp
1// FEAT3: Finite Element Analysis Toolbox, Version 3
2// Copyright (C) 2010 by Stefan Turek & the FEAT group
3// FEAT3 is released under the GNU General Public License version 3,
4// see the file 'copyright.txt' in the top level directory for details.
5
6#pragma once
7
8// includes, FEAT
9#include <kernel/adjacency/base.hpp>
10#include <kernel/adjacency/adjactor.hpp>
13#include <kernel/util/omp_util.hpp>
14
15// includes, system
16#include <vector>
17
18namespace FEAT
19{
20 namespace Adjacency
21 {
22
23 // forward declaration
24 class Permutation;
25
33 class Graph
34 {
35 public:
37 using IndexVector = std::vector<Index>;
38
45 typedef IndexVector::const_iterator ImageIterator;
46
48 static constexpr std::uint64_t magic = 0x5052474A44413346ull; // "F3ADJGRP"
49
50 protected:
53
60
67
68 public:
74 Graph();
75
93 explicit Graph(
94 Index num_nodes_domain,
95 Index num_nodes_image,
96 Index num_indices_image);
97
118 explicit Graph(
119 Index num_nodes_domain,
120 Index num_nodes_image,
121 Index num_indices_image,
122 const Index* domain_ptr,
123 const Index* image_idx);
124
139 explicit Graph(
140 Index num_nodes_image,
141 const IndexVector& domain_ptr,
142 const IndexVector& image_idx);
143
158 explicit Graph(
159 Index num_nodes_image,
160 IndexVector&& domain_ptr,
161 IndexVector&& image_idx);
162
174 template<typename Adjactor_>
175 explicit Graph(RenderType render_type, const Adjactor_& adjactor) :
177 _domain_ptr(),
178 _image_idx()
179 {
180 switch(render_type)
181 {
184 _render_as_is(adjactor);
185 if(render_type == RenderType::as_is_sorted)
186 this->sort_indices();
187 break;
188
191 _render_injectify(adjactor);
192 if(render_type == RenderType::injectify_sorted)
193 this->sort_indices();
194 break;
195
198 _render_transpose(adjactor);
199 // transpose is automatically sorted
200 break;
201
204 _render_injectify_transpose(adjactor);
205 // transpose is automatically sorted
206 break;
207
208 default:
209 XABORTM("Invalid render_type parameter!");
210 }
211 }
212
227 template<typename Adjactor1_, typename Adjactor2_>
228 explicit Graph(RenderType render_type, const Adjactor1_& adjactor1, const Adjactor2_& adjactor2) :
230 _domain_ptr(),
231 _image_idx()
232 {
233 switch(render_type)
234 {
237 _render_as_is(adjactor1, adjactor2);
238 if(render_type == RenderType::as_is_sorted)
239 this->sort_indices();
240 break;
241
244 _render_injectify(adjactor1, adjactor2);
245 if(render_type == RenderType::injectify_sorted)
246 this->sort_indices();
247 break;
248
251 _render_transpose(adjactor1, adjactor2);
252 // transpose is automatically sorted
253 break;
254
257 _render_injectify_transpose(adjactor1, adjactor2);
258 // transpose is automatically sorted
259 break;
260
261 default:
262 XABORTM("Invalid render_type parameter!");
263 }
264 }
265
267 Graph(Graph&& other);
268
270 Graph& operator=(Graph&& other);
271
287 explicit Graph(const Graph& other, const Permutation& domain_perm, const Permutation& image_perm);
288
298 explicit Graph(const std::vector<char>& buffer);
299
301 virtual ~Graph();
302
304 void clear();
305
311 Graph clone() const
312 {
313 if(!_domain_ptr.empty())
315 else
316 return Graph();
317 }
318
333 Index degree(Index domain_node) const
334 {
335 ASSERTM(domain_node < get_num_nodes_domain(), "Domain node index out of range");
336 return _domain_ptr[domain_node+1] - _domain_ptr[domain_node];
337 }
338
353 Index degree() const;
354
360 {
361 return _domain_ptr.data();
362 }
363
365 const Index* get_domain_ptr() const
366 {
367 return _domain_ptr.data();
368 }
369
375 {
376 return _image_idx.data();
377 }
378
380 const Index* get_image_idx() const
381 {
382 return _image_idx.data();
383 }
384
391 {
392 return Index(_image_idx.size());
393 }
394
398 void sort_indices();
399
406 void permute_indices(const Adjacency::Permutation& inv_perm);
407
411 std::vector<char> serialize() const;
412
413 /* *************************************************** */
414 /* R E N D E R F U N C T I O N T E M P L A T E S */
415 /* *************************************************** */
416 private:
418
420 template<typename Adjactor_>
421 void _render_as_is(const Adjactor_& adj)
422 {
423 typedef typename Adjactor_::ImageIterator AImIt;
424
425 // get counts
426 //_num_nodes_domain = adj.get_num_nodes_domain();
427 _num_nodes_image = adj.get_num_nodes_image();
428 Index num_indices_image = 0;
429
430 // allocate pointer vector
431 _domain_ptr.resize(adj.get_num_nodes_domain() + 1);
432 _domain_ptr[0] = 0;
433
434 // count number of adjacencies and build pointer vector
435 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
436 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
437 {
438 Index num_indices_here = Index(0);
439 AImIt cur(adj.image_begin(i));
440 AImIt end(adj.image_end(i));
441 for(; cur != end; ++cur)
442 {
443 ++num_indices_here;
444 }
445 _domain_ptr[i+1] = num_indices_here;
446 }
447
448 // perform inclusive scan to obtain domain pointer
449 feat_omp_in_scan(adj.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
450 num_indices_image = _domain_ptr[adj.get_num_nodes_domain()];
451
452 // allocate and build index vector
453 _image_idx.resize(num_indices_image);
454 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
455 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
456 {
457 Index k = _domain_ptr[i];
458 AImIt cur(adj.image_begin(i));
459 AImIt end(adj.image_end(i));
460 for(; cur != end; ++cur, ++k)
461 {
462 _image_idx[k] = *cur;
463 }
464 }
465 }
466
468 template<typename Adjactor_>
469 void _render_injectify(const Adjactor_& adj)
470 {
471 // get counts
472 _num_nodes_image = adj.get_num_nodes_image();
473 Index num_indices_image = 0;
474
475 // allocate pointer vector
476 _domain_ptr.resize(adj.get_num_nodes_domain() + 1);
477 _domain_ptr[0] = 0;
478
479 FEAT_PRAGMA_OMP(parallel)
480 {
481 // allocate auxiliary mask vector
482 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
483 char* idx_mask = vidx_mask.data();
484
485 // count number of adjacencies and build pointer vector
486 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
487 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
488 {
489 Index num_indices_here = Index(0);
490 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
491 {
492 if(idx_mask[*it] == 0)
493 {
494 ++num_indices_here;
495 idx_mask[*it] = 1;
496 }
497 }
498 _domain_ptr[i+1] = num_indices_here;
499 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
500 idx_mask[*it] = 0;
501 }
502 }
503
504 // perform inclusive scan to obtain domain pointer
505 feat_omp_in_scan(adj.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
506 num_indices_image = _domain_ptr[adj.get_num_nodes_domain()];
507
508 // allocate and build index vector
509 _image_idx.resize(num_indices_image);
510
511 FEAT_PRAGMA_OMP(parallel)
512 {
513 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
514 char* idx_mask = vidx_mask.data();
515 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
516 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
517 {
518 Index k = _domain_ptr[i];
519 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
520 {
521 if(idx_mask[*it] == 0)
522 {
523 _image_idx[k] = *it;
524 ++k;
525 idx_mask[*it] = 1;
526 }
527 }
528 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
529 idx_mask[*it] = 0;
530 }
531 }
532 }
533
535 template<typename Adjactor_>
536 void _render_transpose(const Adjactor_& adj)
537 {
538 typedef typename Adjactor_::ImageIterator AImIt;
539
540 // get counts
541 _num_nodes_image = adj.get_num_nodes_domain();
542 Index num_indices_image = 0;
543
544 // allocate and format pointer vector
545 _domain_ptr.resize(adj.get_num_nodes_image() + 1, Index(0));
546
547 // count number of adjacencies
548 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
549 {
550 AImIt cur(adj.image_begin(j));
551 AImIt end(adj.image_end(j));
552 for(; cur != end; ++cur)
553 {
554 ++_domain_ptr[(*cur) + 1];
555 }
556 }
557
558 // perform inclusive scan to obtain domain pointer
559 feat_omp_in_scan(adj.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
560 num_indices_image = _domain_ptr[adj.get_num_nodes_image()];
561
562 // allocate and build index vector
563 _image_idx.resize(num_indices_image);
564 std::vector<Index*> vimg_ptr(adj.get_num_nodes_image(), nullptr);
565 Index** image_ptr = vimg_ptr.data();
566 Index* image_idx = _image_idx.data();
567 for(Index i(0); i < adj.get_num_nodes_image(); ++i)
568 {
569 image_ptr[i] = &image_idx[_domain_ptr[i]];
570 }
571
572 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
573 {
574 AImIt cur(adj.image_begin(j));
575 AImIt end(adj.image_end(j));
576 for(; cur != end; ++cur)
577 {
578 Index*& idx = image_ptr[*cur];
579 *idx = j;
580 ++idx;
581 }
582 }
583 }
584
586 template<typename Adjactor_>
587 void _render_injectify_transpose(const Adjactor_& adj)
588 {
589 // get counts
590 _num_nodes_image = adj.get_num_nodes_domain();
591 Index num_indices_image = 0;
592
593 // allocate pointer vector
594 _domain_ptr.resize(adj.get_num_nodes_image() + 1, Index(0));
595 // allocate auxiliary mask vector
596 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
597 char* idx_mask = vidx_mask.data();
598
599 // loop over all image nodes
600 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
601 {
602 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
603 {
604 if(idx_mask[*it] == 0)
605 {
606 ++num_indices_image;
607 ++_domain_ptr[(*it)+1];
608 idx_mask[*it] = 1;
609 }
610 }
611 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
612 idx_mask[*it] = 0;
613 }
614
615 _image_idx.resize(num_indices_image);
616 std::vector<Index*> vimg_ptr(adj.get_num_nodes_image(), nullptr);
617 Index** image_ptr = vimg_ptr.data();
618 Index* image_idx = _image_idx.data();
619
620 // perform inclusive scan to obtain domain pointer
621 feat_omp_in_scan(adj.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
622 for(Index i(0); i < adj.get_num_nodes_image(); ++i)
623 {
624 image_ptr[i] = &image_idx[_domain_ptr[i]];
625 }
626
627 // build image index vector
628 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
629 {
630 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
631 {
632 if(idx_mask[*it] == 0)
633 {
634 Index*& idx = image_ptr[*it];
635 *idx = j;
636 ++idx;
637 idx_mask[*it] = 1;
638 }
639 }
640 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
641 idx_mask[*it] = 0;
642 }
643 }
644
646 template<
647 typename Adjactor1_,
648 typename Adjactor2_>
649 void _render_as_is(
650 const Adjactor1_& adj1,
651 const Adjactor2_& adj2)
652 {
653 // validate adjactor dimensions
654 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
655
656 typedef typename Adjactor1_::ImageIterator AImIt1;
657 typedef typename Adjactor2_::ImageIterator AImIt2;
658
659 // get counts
660 _num_nodes_image = adj2.get_num_nodes_image();
661 Index num_indices_image = 0;
662
663 // allocate pointer vector
664 _domain_ptr.resize(adj1.get_num_nodes_domain() + 1);
665 _domain_ptr[0] = 0;
666 // count number of adjacencies and build pointer vector
667 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
668 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
669 {
670 Index num_indices_here = Index(0);
671 AImIt1 cur1(adj1.image_begin(i));
672 AImIt1 end1(adj1.image_end(i));
673 for(; cur1 != end1; ++cur1)
674 {
675 AImIt2 cur2(adj2.image_begin(*cur1));
676 AImIt2 end2(adj2.image_end(*cur1));
677 for(; cur2 != end2; ++cur2)
678 {
679 ++num_indices_here;
680 }
681 }
682 _domain_ptr[i+1] = num_indices_here;
683 }
684
685 // perform inclusive scan to obtain domain pointer
686 feat_omp_in_scan(adj1.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
687 num_indices_image = _domain_ptr[adj1.get_num_nodes_domain()];
688
689 // allocate and build index vector
690 _image_idx.resize(num_indices_image);
691
692 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
693 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
694 {
695 Index k = _domain_ptr[i];
696 AImIt1 cur1(adj1.image_begin(i));
697 AImIt1 end1(adj1.image_end(i));
698 for(; cur1 != end1; ++cur1)
699 {
700 AImIt2 cur2(adj2.image_begin(*cur1));
701 AImIt2 end2(adj2.image_end(*cur1));
702 for(; cur2 != end2; ++cur2, ++k)
703 {
704 _image_idx[k] = *cur2;
705 }
706 }
707 }
708 }
709
711 template<
712 typename Adjactor1_,
713 typename Adjactor2_>
714 void _render_injectify(
715 const Adjactor1_& adj1,
716 const Adjactor2_& adj2)
717 {
718 // validate adjactor dimensions
719 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
720
721 // get counts
722 _num_nodes_image = adj2.get_num_nodes_image();
723 Index num_indices_image = 0;
724
725 // allocate pointer vector
726 _domain_ptr.resize(adj1.get_num_nodes_domain() + 1);
727 _domain_ptr[0] = Index(0);
728 FEAT_PRAGMA_OMP(parallel)
729 {
730 // allocate auxiliary mask vector
731 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
732 char* idx_mask = vidx_mask.data();
733
734 // count number of adjacencies and build pointer vector
735 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
736 for(Index i=0; i < adj1.get_num_nodes_domain(); ++i)
737 {
738 Index num_indices_here = Index(0);
739 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
740 {
741 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
742 {
743 if(idx_mask[*jt] == 0)
744 {
745 ++num_indices_here;
746 idx_mask[*jt] = 1;
747 }
748 }
749 }
750 _domain_ptr[i+1] = num_indices_here;
751 // reset mask
752 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
753 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
754 idx_mask[*jt] = 0;
755 }
756 }
757
758 // perform inclusive scan to obtain domain pointer
759 feat_omp_in_scan(adj1.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
760 num_indices_image = _domain_ptr[adj1.get_num_nodes_domain()];
761 _image_idx.resize(num_indices_image);
762
763 FEAT_PRAGMA_OMP(parallel)
764 {
765 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
766 char* idx_mask = vidx_mask.data();
767 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
768 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
769 {
770 Index k = _domain_ptr[i];
771 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
772 {
773 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
774 {
775 if(idx_mask[*jt] == 0)
776 {
777 _image_idx[k] = *jt;
778 ++k;
779 idx_mask[*jt] = 1;
780 }
781 }
782 }
783 // reset mask
784 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
785 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
786 idx_mask[*jt] = 0;
787 }
788 }
789 }
790
792 template<
793 typename Adjactor1_,
794 typename Adjactor2_>
795 void _render_transpose(
796 const Adjactor1_& adj1,
797 const Adjactor2_& adj2)
798 {
799 // validate adjactor dimensions
800 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
801
802 typedef typename Adjactor1_::ImageIterator AImIt1;
803 typedef typename Adjactor2_::ImageIterator AImIt2;
804
805 // get counts
806 _num_nodes_image = adj1.get_num_nodes_domain();
807 Index num_indices_image = 0;
808
809 // allocate and format pointer vector
810 _domain_ptr.resize(adj2.get_num_nodes_image() + 1, Index(0));
811
812 // count number of adjacencies
813 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
814 {
815 AImIt1 cur1(adj1.image_begin(j));
816 AImIt1 end1(adj1.image_end(j));
817 for(; cur1 != end1; ++cur1)
818 {
819 AImIt2 cur2(adj2.image_begin(*cur1));
820 AImIt2 end2(adj2.image_end(*cur1));
821 for(; cur2 != end2; ++cur2)
822 {
823 ++_domain_ptr[(*cur2) + 1];
824 }
825 }
826 }
827
828 // perform inclusive scan to obtain domain pointer
829 feat_omp_in_scan(adj2.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
830 num_indices_image = _domain_ptr[adj2.get_num_nodes_image()];
831
832 // allocate and build index vector
833 _image_idx.resize(num_indices_image);
834 std::vector<Index*> vimg_ptr(adj2.get_num_nodes_image(), nullptr);
835 Index** image_ptr = vimg_ptr.data();
836 Index* image_idx = _image_idx.data();
837
838 for(Index i(0); i < adj2.get_num_nodes_image(); ++i)
839 {
840 image_ptr[i] = &image_idx[_domain_ptr[i]];
841 }
842
843 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
844 {
845 AImIt1 cur1(adj1.image_begin(j));
846 AImIt1 end1(adj1.image_end(j));
847 for(; cur1 != end1; ++cur1)
848 {
849 AImIt2 cur2(adj2.image_begin(*cur1));
850 AImIt2 end2(adj2.image_end(*cur1));
851 for(; cur2 != end2; ++cur2)
852 {
853 Index*& idx = image_ptr[*cur2];
854 *idx = j;
855 ++idx;
856 }
857 }
858 }
859 }
860
862 template<
863 typename Adjactor1_,
864 typename Adjactor2_>
865 void _render_injectify_transpose(
866 const Adjactor1_& adj1,
867 const Adjactor2_& adj2)
868 {
869 // validate adjactor dimensions
870 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
871
872 // get counts
873 _num_nodes_image = adj1.get_num_nodes_domain();
874 Index num_indices_image = 0;
875
876 // allocate pointer vector
877 _domain_ptr.resize(adj2.get_num_nodes_image() + 1, Index(0));
878
879 // allocate auxiliary mask vector
880 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
881 char* idx_mask = vidx_mask.data();
882
883 // loop over all image nodes
884 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
885 {
886 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
887 {
888 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
889 {
890 if(idx_mask[*jt] == 0)
891 {
892 ++_domain_ptr[(*jt)+1];
893 idx_mask[*jt] = 1;
894 }
895 }
896 }
897 // reset mask
898 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
899 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
900 idx_mask[*jt] = 0;
901 }
902
903 // perform inclusive scan to obtain domain pointer
904 feat_omp_in_scan(adj2.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
905 num_indices_image = _domain_ptr[adj2.get_num_nodes_image()];
906
907 _image_idx.resize(num_indices_image);
908 std::vector<Index*> vimg_ptr(adj2.get_num_nodes_image(), nullptr);
909 Index** image_ptr = vimg_ptr.data();
910 Index* image_idx = _image_idx.data();
911
912 // build pointer vector
913 for(Index i(0); i < adj2.get_num_nodes_image(); ++i)
914 {
915 image_ptr[i] = &image_idx[_domain_ptr[i]];
916 }
917
918 // build image index vector
919 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
920 {
921 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
922 {
923 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
924 {
925 if(idx_mask[*jt] == 0)
926 {
927 Index*& idx = image_ptr[*jt];
928 *idx = j;
929 ++idx;
930 idx_mask[*jt] = 1;
931 }
932 }
933 }
934 // reset mask
935 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
936 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
937 idx_mask[*jt] = 0;
938 }
939 }
940
942 /* ******************************************************************* */
943 /* A D J A C T O R I N T E R F A C E I M P L E M E N T A T I O N */
944 /* ******************************************************************* */
945 public:
946
947 inline Index get_num_nodes_domain() const
948 {
949 return (_domain_ptr.empty() ? Index(0) : Index(_domain_ptr.size() - 1));
950 }
951
952 inline Index get_num_nodes_image() const
953 {
954 return _num_nodes_image;
955 }
956
958 inline ImageIterator image_begin(Index domain_node) const
959 {
960 ASSERTM(domain_node +1 < _domain_ptr.size(), "Domain node index out of range");
961
962 return _image_idx.begin() + IndexVector::difference_type(_domain_ptr[domain_node]);
963 }
964
966 inline ImageIterator image_end(Index domain_node) const
967 {
968 ASSERTM(domain_node +1< _domain_ptr.size(), "Domain node index out of range");
969
970 return _image_idx.begin() + IndexVector::difference_type(_domain_ptr[domain_node + Index(1)]);
971 }
972 }; // class Graph
973 } // namespace Adjacency
974} // namespace FEAT
#define XABORTM(msg)
Abortion macro definition with custom message.
Definition: assertion.hpp:192
#define ASSERTM(expr, msg)
Debug-Assertion macro definition with custom message.
Definition: assertion.hpp:230
#define XASSERTM(expr, msg)
Assertion macro definition with custom message.
Definition: assertion.hpp:263
Adjacency Graph implementation.
Definition: graph.hpp:34
Graph & operator=(Graph &&other)
move-assign operator
Definition: graph.cpp:90
static constexpr std::uint64_t magic
magic number for Graph serialization
Definition: graph.hpp:48
IndexVector _image_idx
Image node index Vector.
Definition: graph.hpp:66
IndexVector _domain_ptr
Domain pointer Vector.
Definition: graph.hpp:59
Graph clone() const
Clones this graph.
Definition: graph.hpp:311
Graph()
Default constructor.
Definition: graph.cpp:15
ImageIterator image_begin(Index domain_node) const
Returns an iterator for the first adjacent image node.
Definition: graph.hpp:958
std::vector< Index > IndexVector
index vector type
Definition: graph.hpp:37
Graph(RenderType render_type, const Adjactor1_ &adjactor1, const Adjactor2_ &adjactor2)
Composite-Render constructor.
Definition: graph.hpp:228
Graph(RenderType render_type, const Adjactor_ &adjactor)
Render constructor.
Definition: graph.hpp:175
const Index * get_domain_ptr() const
Returns the domain pointer array.
Definition: graph.hpp:365
void sort_indices()
Sorts the image indices to non-descending order.
Definition: graph.cpp:206
Index degree() const
Returns the degree of the graph.
Definition: graph.cpp:195
Index * get_domain_ptr()
Returns the domain pointer array.
Definition: graph.hpp:359
ImageIterator image_end(Index domain_node) const
Returns an iterator for the first position past the last adjacent image node.
Definition: graph.hpp:966
Index * get_image_idx()
Returns the image node index array.
Definition: graph.hpp:374
void permute_indices(const Adjacency::Permutation &inv_perm)
Permutes the image indices.
Definition: graph.cpp:221
Index degree(Index domain_node) const
Returns the degree of a domain node.
Definition: graph.hpp:333
IndexVector::const_iterator ImageIterator
ImageIterator for Graph class.
Definition: graph.hpp:45
void clear()
Clears the graph.
Definition: graph.cpp:188
virtual ~Graph()
virtual destructor
Definition: graph.cpp:184
Index _num_nodes_image
total number of image nodes
Definition: graph.hpp:52
Index get_num_indices() const
Returns the total number indices.
Definition: graph.hpp:390
const Index * get_image_idx() const
Returns the image node index array.
Definition: graph.hpp:380
std::vector< char > serialize() const
Serializes the graph into a buffer.
Definition: graph.cpp:234
RenderType
Render type enumeration.
Definition: base.hpp:26
@ injectify_sorted
Render-Injectified mode, sort image indices.
@ transpose
Render-Transpose mode.
@ injectify_transpose
Render-Injectified-Transpose mode.
@ injectify_transpose_sorted
Render-Injectified-Transpose mode, sort image indices.
@ injectify
Render-Injectified mode.
@ transpose_sorted
Render-Transpose mode, sort image indices.
@ as_is
Render-As-Is mode.
@ as_is_sorted
Render-As-Is mode, sort image indices.
FEAT namespace.
Definition: adjactor.hpp:12
void feat_omp_in_scan(std::size_t n, const T_ x[], T_ y[])
Computes an OpenMP-parallel inclusive scan a.k.a. a prefix sum of an array, i.e.
Definition: omp_util.hpp:35
std::uint64_t Index
Index data type.