FEAT 3
Finite Element Analysis Toolbox
Loading...
Searching...
No Matches
graph.hpp
1// FEAT3: Finite Element Analysis Toolbox, Version 3
2// Copyright (C) 2010 by Stefan Turek & the FEAT group
3// FEAT3 is released under the GNU General Public License version 3,
4// see the file 'copyright.txt' in the top level directory for details.
5
6#pragma once
7
8// includes, FEAT
9#include <kernel/adjacency/base.hpp>
10#include <kernel/adjacency/adjactor.hpp>
13#include <kernel/util/omp_util.hpp>
14
15// includes, system
16#include <vector>
17
18namespace FEAT
19{
20 namespace Adjacency
21 {
22
23 // forward declaration
24 class Permutation;
25
33 class Graph
34 {
35 public:
37 using IndexVector = std::vector<Index>;
38
45 typedef IndexVector::const_iterator ImageIterator;
46
48 static constexpr std::uint64_t magic = 0x5052474A44413346ull; // "F3ADJGRP"
49
50 protected:
53
60
67
68 public:
74 Graph();
75
93 explicit Graph(
94 Index num_nodes_domain,
95 Index num_nodes_image,
96 Index num_indices_image);
97
118 explicit Graph(
119 Index num_nodes_domain,
120 Index num_nodes_image,
121 Index num_indices_image,
122 const Index* domain_ptr,
123 const Index* image_idx);
124
139 explicit Graph(
140 Index num_nodes_image,
141 const IndexVector& domain_ptr,
142 const IndexVector& image_idx);
143
158 explicit Graph(
159 Index num_nodes_image,
160 IndexVector&& domain_ptr,
161 IndexVector&& image_idx);
162
174 template<typename Adjactor_>
175 explicit Graph(RenderType render_type, const Adjactor_& adjactor) :
177 _domain_ptr(),
178 _image_idx()
179 {
180 switch(render_type)
181 {
184 _render_as_is(adjactor);
185 if(render_type == RenderType::as_is_sorted)
186 this->sort_indices();
187 break;
188
191 _render_injectify(adjactor);
192 if(render_type == RenderType::injectify_sorted)
193 this->sort_indices();
194 break;
195
198 _render_transpose(adjactor);
199 // transpose is automatically sorted
200 break;
201
204 _render_injectify_transpose(adjactor);
205 // transpose is automatically sorted
206 break;
207
208 default:
209 XABORTM("Invalid render_type parameter!");
210 }
211 }
212
227 template<typename Adjactor1_, typename Adjactor2_>
228 explicit Graph(RenderType render_type, const Adjactor1_& adjactor1, const Adjactor2_& adjactor2) :
230 _domain_ptr(),
231 _image_idx()
232 {
233 switch(render_type)
234 {
237 _render_as_is(adjactor1, adjactor2);
238 if(render_type == RenderType::as_is_sorted)
239 this->sort_indices();
240 break;
241
244 _render_injectify(adjactor1, adjactor2);
245 if(render_type == RenderType::injectify_sorted)
246 this->sort_indices();
247 break;
248
251 _render_transpose(adjactor1, adjactor2);
252 // transpose is automatically sorted
253 break;
254
257 _render_injectify_transpose(adjactor1, adjactor2);
258 // transpose is automatically sorted
259 break;
260
261 default:
262 XABORTM("Invalid render_type parameter!");
263 }
264 }
265
267 Graph(Graph&& other);
268
270 Graph& operator=(Graph&& other);
271
287 explicit Graph(const Graph& other, const Permutation& domain_perm, const Permutation& image_perm);
288
298 explicit Graph(const std::vector<char>& buffer);
299
301 virtual ~Graph();
302
304 void clear();
305
311 Graph clone() const
312 {
313 if(!_domain_ptr.empty())
315 else
316 return Graph();
317 }
318
333 Index degree(Index domain_node) const
334 {
335 ASSERTM(domain_node < get_num_nodes_domain(), "Domain node index out of range");
336 return _domain_ptr[domain_node+1] - _domain_ptr[domain_node];
337 }
338
353 Index degree() const;
354
360 {
361 return _domain_ptr.data();
362 }
363
365 const Index* get_domain_ptr() const
366 {
367 return _domain_ptr.data();
368 }
369
375 {
376 return _image_idx.data();
377 }
378
380 const Index* get_image_idx() const
381 {
382 return _image_idx.data();
383 }
384
391 {
392 return Index(_image_idx.size());
393 }
394
398 void sort_indices();
399
406 void permute_indices(const Adjacency::Permutation& inv_perm);
407
409 std::size_t bytes() const
410 {
411 return (_image_idx.size() + _domain_ptr.size()) * sizeof(Index);
412 }
413
417 std::vector<char> serialize() const;
418
419 /* *************************************************** */
420 /* R E N D E R F U N C T I O N T E M P L A T E S */
421 /* *************************************************** */
422 private:
424
426 template<typename Adjactor_>
427 void _render_as_is(const Adjactor_& adj)
428 {
429 typedef typename Adjactor_::ImageIterator AImIt;
430
431 // get counts
432 //_num_nodes_domain = adj.get_num_nodes_domain();
433 _num_nodes_image = adj.get_num_nodes_image();
434 Index num_indices_image = 0;
435
436 // allocate pointer vector
437 _domain_ptr.resize(adj.get_num_nodes_domain() + 1);
438 _domain_ptr[0] = 0;
439
440 // count number of adjacencies and build pointer vector
441 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
442 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
443 {
444 Index num_indices_here = Index(0);
445 AImIt cur(adj.image_begin(i));
446 AImIt end(adj.image_end(i));
447 for(; cur != end; ++cur)
448 {
449 ++num_indices_here;
450 }
451 _domain_ptr[i+1] = num_indices_here;
452 }
453
454 // perform inclusive scan to obtain domain pointer
455 feat_omp_in_scan(adj.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
456 num_indices_image = _domain_ptr[adj.get_num_nodes_domain()];
457
458 // allocate and build index vector
459 _image_idx.resize(num_indices_image);
460 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
461 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
462 {
463 Index k = _domain_ptr[i];
464 AImIt cur(adj.image_begin(i));
465 AImIt end(adj.image_end(i));
466 for(; cur != end; ++cur, ++k)
467 {
468 _image_idx[k] = *cur;
469 }
470 }
471 }
472
474 template<typename Adjactor_>
475 void _render_injectify(const Adjactor_& adj)
476 {
477 // get counts
478 _num_nodes_image = adj.get_num_nodes_image();
479 Index num_indices_image = 0;
480
481 // allocate pointer vector
482 _domain_ptr.resize(adj.get_num_nodes_domain() + 1);
483 _domain_ptr[0] = 0;
484
485 FEAT_PRAGMA_OMP(parallel)
486 {
487 // allocate auxiliary mask vector
488 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
489 char* idx_mask = vidx_mask.data();
490
491 // count number of adjacencies and build pointer vector
492 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
493 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
494 {
495 Index num_indices_here = Index(0);
496 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
497 {
498 if(idx_mask[*it] == 0)
499 {
500 ++num_indices_here;
501 idx_mask[*it] = 1;
502 }
503 }
504 _domain_ptr[i+1] = num_indices_here;
505 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
506 idx_mask[*it] = 0;
507 }
508 }
509
510 // perform inclusive scan to obtain domain pointer
511 feat_omp_in_scan(adj.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
512 num_indices_image = _domain_ptr[adj.get_num_nodes_domain()];
513
514 // allocate and build index vector
515 _image_idx.resize(num_indices_image);
516
517 FEAT_PRAGMA_OMP(parallel)
518 {
519 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
520 char* idx_mask = vidx_mask.data();
521 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
522 for(Index i = 0; i < adj.get_num_nodes_domain(); ++i)
523 {
524 Index k = _domain_ptr[i];
525 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
526 {
527 if(idx_mask[*it] == 0)
528 {
529 _image_idx[k] = *it;
530 ++k;
531 idx_mask[*it] = 1;
532 }
533 }
534 for(auto it = adj.image_begin(i); it != adj.image_end(i); ++it)
535 idx_mask[*it] = 0;
536 }
537 }
538 }
539
541 template<typename Adjactor_>
542 void _render_transpose(const Adjactor_& adj)
543 {
544 typedef typename Adjactor_::ImageIterator AImIt;
545
546 // get counts
547 _num_nodes_image = adj.get_num_nodes_domain();
548 Index num_indices_image = 0;
549
550 // allocate and format pointer vector
551 _domain_ptr.resize(adj.get_num_nodes_image() + 1, Index(0));
552
553 // count number of adjacencies
554 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
555 {
556 AImIt cur(adj.image_begin(j));
557 AImIt end(adj.image_end(j));
558 for(; cur != end; ++cur)
559 {
560 ++_domain_ptr[(*cur) + 1];
561 }
562 }
563
564 // perform inclusive scan to obtain domain pointer
565 feat_omp_in_scan(adj.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
566 num_indices_image = _domain_ptr[adj.get_num_nodes_image()];
567
568 // allocate and build index vector
569 _image_idx.resize(num_indices_image);
570 std::vector<Index*> vimg_ptr(adj.get_num_nodes_image(), nullptr);
571 Index** image_ptr = vimg_ptr.data();
572 Index* image_idx = _image_idx.data();
573 for(Index i(0); i < adj.get_num_nodes_image(); ++i)
574 {
575 image_ptr[i] = &image_idx[_domain_ptr[i]];
576 }
577
578 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
579 {
580 AImIt cur(adj.image_begin(j));
581 AImIt end(adj.image_end(j));
582 for(; cur != end; ++cur)
583 {
584 Index*& idx = image_ptr[*cur];
585 *idx = j;
586 ++idx;
587 }
588 }
589 }
590
592 template<typename Adjactor_>
593 void _render_injectify_transpose(const Adjactor_& adj)
594 {
595 // get counts
596 _num_nodes_image = adj.get_num_nodes_domain();
597 Index num_indices_image = 0;
598
599 // allocate pointer vector
600 _domain_ptr.resize(adj.get_num_nodes_image() + 1, Index(0));
601 // allocate auxiliary mask vector
602 std::vector<char> vidx_mask(adj.get_num_nodes_image(), 0);
603 char* idx_mask = vidx_mask.data();
604
605 // loop over all image nodes
606 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
607 {
608 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
609 {
610 if(idx_mask[*it] == 0)
611 {
612 ++num_indices_image;
613 ++_domain_ptr[(*it)+1];
614 idx_mask[*it] = 1;
615 }
616 }
617 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
618 idx_mask[*it] = 0;
619 }
620
621 _image_idx.resize(num_indices_image);
622 std::vector<Index*> vimg_ptr(adj.get_num_nodes_image(), nullptr);
623 Index** image_ptr = vimg_ptr.data();
624 Index* image_idx = _image_idx.data();
625
626 // perform inclusive scan to obtain domain pointer
627 feat_omp_in_scan(adj.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
628 for(Index i(0); i < adj.get_num_nodes_image(); ++i)
629 {
630 image_ptr[i] = &image_idx[_domain_ptr[i]];
631 }
632
633 // build image index vector
634 for(Index j(0); j < adj.get_num_nodes_domain(); ++j)
635 {
636 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
637 {
638 if(idx_mask[*it] == 0)
639 {
640 Index*& idx = image_ptr[*it];
641 *idx = j;
642 ++idx;
643 idx_mask[*it] = 1;
644 }
645 }
646 for(auto it = adj.image_begin(j); it != adj.image_end(j); ++it)
647 idx_mask[*it] = 0;
648 }
649 }
650
652 template<
653 typename Adjactor1_,
654 typename Adjactor2_>
655 void _render_as_is(
656 const Adjactor1_& adj1,
657 const Adjactor2_& adj2)
658 {
659 // validate adjactor dimensions
660 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
661
662 typedef typename Adjactor1_::ImageIterator AImIt1;
663 typedef typename Adjactor2_::ImageIterator AImIt2;
664
665 // get counts
666 _num_nodes_image = adj2.get_num_nodes_image();
667 Index num_indices_image = 0;
668
669 // allocate pointer vector
670 _domain_ptr.resize(adj1.get_num_nodes_domain() + 1);
671 _domain_ptr[0] = 0;
672 // count number of adjacencies and build pointer vector
673 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
674 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
675 {
676 Index num_indices_here = Index(0);
677 AImIt1 cur1(adj1.image_begin(i));
678 AImIt1 end1(adj1.image_end(i));
679 for(; cur1 != end1; ++cur1)
680 {
681 AImIt2 cur2(adj2.image_begin(*cur1));
682 AImIt2 end2(adj2.image_end(*cur1));
683 for(; cur2 != end2; ++cur2)
684 {
685 ++num_indices_here;
686 }
687 }
688 _domain_ptr[i+1] = num_indices_here;
689 }
690
691 // perform inclusive scan to obtain domain pointer
692 feat_omp_in_scan(adj1.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
693 num_indices_image = _domain_ptr[adj1.get_num_nodes_domain()];
694
695 // allocate and build index vector
696 _image_idx.resize(num_indices_image);
697
698 FEAT_PRAGMA_OMP(parallel for schedule(dynamic, 1000))
699 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
700 {
701 Index k = _domain_ptr[i];
702 AImIt1 cur1(adj1.image_begin(i));
703 AImIt1 end1(adj1.image_end(i));
704 for(; cur1 != end1; ++cur1)
705 {
706 AImIt2 cur2(adj2.image_begin(*cur1));
707 AImIt2 end2(adj2.image_end(*cur1));
708 for(; cur2 != end2; ++cur2, ++k)
709 {
710 _image_idx[k] = *cur2;
711 }
712 }
713 }
714 }
715
717 template<
718 typename Adjactor1_,
719 typename Adjactor2_>
720 void _render_injectify(
721 const Adjactor1_& adj1,
722 const Adjactor2_& adj2)
723 {
724 // validate adjactor dimensions
725 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
726
727 // get counts
728 _num_nodes_image = adj2.get_num_nodes_image();
729 Index num_indices_image = 0;
730
731 // allocate pointer vector
732 _domain_ptr.resize(adj1.get_num_nodes_domain() + 1);
733 _domain_ptr[0] = Index(0);
734 FEAT_PRAGMA_OMP(parallel)
735 {
736 // allocate auxiliary mask vector
737 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
738 char* idx_mask = vidx_mask.data();
739
740 // count number of adjacencies and build pointer vector
741 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
742 for(Index i=0; i < adj1.get_num_nodes_domain(); ++i)
743 {
744 Index num_indices_here = Index(0);
745 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
746 {
747 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
748 {
749 if(idx_mask[*jt] == 0)
750 {
751 ++num_indices_here;
752 idx_mask[*jt] = 1;
753 }
754 }
755 }
756 _domain_ptr[i+1] = num_indices_here;
757 // reset mask
758 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
759 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
760 idx_mask[*jt] = 0;
761 }
762 }
763
764 // perform inclusive scan to obtain domain pointer
765 feat_omp_in_scan(adj1.get_num_nodes_domain()+1u, _domain_ptr.data(), _domain_ptr.data());
766 num_indices_image = _domain_ptr[adj1.get_num_nodes_domain()];
767 _image_idx.resize(num_indices_image);
768
769 FEAT_PRAGMA_OMP(parallel)
770 {
771 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
772 char* idx_mask = vidx_mask.data();
773 FEAT_PRAGMA_OMP(for schedule(dynamic, 1000))
774 for(Index i = 0; i < adj1.get_num_nodes_domain(); ++i)
775 {
776 Index k = _domain_ptr[i];
777 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
778 {
779 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
780 {
781 if(idx_mask[*jt] == 0)
782 {
783 _image_idx[k] = *jt;
784 ++k;
785 idx_mask[*jt] = 1;
786 }
787 }
788 }
789 // reset mask
790 for(auto it = adj1.image_begin(i); it != adj1.image_end(i); ++it)
791 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
792 idx_mask[*jt] = 0;
793 }
794 }
795 }
796
798 template<
799 typename Adjactor1_,
800 typename Adjactor2_>
801 void _render_transpose(
802 const Adjactor1_& adj1,
803 const Adjactor2_& adj2)
804 {
805 // validate adjactor dimensions
806 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
807
808 typedef typename Adjactor1_::ImageIterator AImIt1;
809 typedef typename Adjactor2_::ImageIterator AImIt2;
810
811 // get counts
812 _num_nodes_image = adj1.get_num_nodes_domain();
813 Index num_indices_image = 0;
814
815 // allocate and format pointer vector
816 _domain_ptr.resize(adj2.get_num_nodes_image() + 1, Index(0));
817
818 // count number of adjacencies
819 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
820 {
821 AImIt1 cur1(adj1.image_begin(j));
822 AImIt1 end1(adj1.image_end(j));
823 for(; cur1 != end1; ++cur1)
824 {
825 AImIt2 cur2(adj2.image_begin(*cur1));
826 AImIt2 end2(adj2.image_end(*cur1));
827 for(; cur2 != end2; ++cur2)
828 {
829 ++_domain_ptr[(*cur2) + 1];
830 }
831 }
832 }
833
834 // perform inclusive scan to obtain domain pointer
835 feat_omp_in_scan(adj2.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
836 num_indices_image = _domain_ptr[adj2.get_num_nodes_image()];
837
838 // allocate and build index vector
839 _image_idx.resize(num_indices_image);
840 std::vector<Index*> vimg_ptr(adj2.get_num_nodes_image(), nullptr);
841 Index** image_ptr = vimg_ptr.data();
842 Index* image_idx = _image_idx.data();
843
844 for(Index i(0); i < adj2.get_num_nodes_image(); ++i)
845 {
846 image_ptr[i] = &image_idx[_domain_ptr[i]];
847 }
848
849 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
850 {
851 AImIt1 cur1(adj1.image_begin(j));
852 AImIt1 end1(adj1.image_end(j));
853 for(; cur1 != end1; ++cur1)
854 {
855 AImIt2 cur2(adj2.image_begin(*cur1));
856 AImIt2 end2(adj2.image_end(*cur1));
857 for(; cur2 != end2; ++cur2)
858 {
859 Index*& idx = image_ptr[*cur2];
860 *idx = j;
861 ++idx;
862 }
863 }
864 }
865 }
866
868 template<
869 typename Adjactor1_,
870 typename Adjactor2_>
871 void _render_injectify_transpose(
872 const Adjactor1_& adj1,
873 const Adjactor2_& adj2)
874 {
875 // validate adjactor dimensions
876 XASSERTM(adj1.get_num_nodes_image() == adj2.get_num_nodes_domain(), "Adjactor dimension mismatch!");
877
878 // get counts
879 _num_nodes_image = adj1.get_num_nodes_domain();
880 Index num_indices_image = 0;
881
882 // allocate pointer vector
883 _domain_ptr.resize(adj2.get_num_nodes_image() + 1, Index(0));
884
885 // allocate auxiliary mask vector
886 std::vector<char> vidx_mask(adj2.get_num_nodes_image(), 0);
887 char* idx_mask = vidx_mask.data();
888
889 // loop over all image nodes
890 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
891 {
892 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
893 {
894 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
895 {
896 if(idx_mask[*jt] == 0)
897 {
898 ++_domain_ptr[(*jt)+1];
899 idx_mask[*jt] = 1;
900 }
901 }
902 }
903 // reset mask
904 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
905 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
906 idx_mask[*jt] = 0;
907 }
908
909 // perform inclusive scan to obtain domain pointer
910 feat_omp_in_scan(adj2.get_num_nodes_image()+1u, _domain_ptr.data(), _domain_ptr.data());
911 num_indices_image = _domain_ptr[adj2.get_num_nodes_image()];
912
913 _image_idx.resize(num_indices_image);
914 std::vector<Index*> vimg_ptr(adj2.get_num_nodes_image(), nullptr);
915 Index** image_ptr = vimg_ptr.data();
916 Index* image_idx = _image_idx.data();
917
918 // build pointer vector
919 for(Index i(0); i < adj2.get_num_nodes_image(); ++i)
920 {
921 image_ptr[i] = &image_idx[_domain_ptr[i]];
922 }
923
924 // build image index vector
925 for(Index j(0); j < adj1.get_num_nodes_domain(); ++j)
926 {
927 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
928 {
929 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
930 {
931 if(idx_mask[*jt] == 0)
932 {
933 Index*& idx = image_ptr[*jt];
934 *idx = j;
935 ++idx;
936 idx_mask[*jt] = 1;
937 }
938 }
939 }
940 // reset mask
941 for(auto it = adj1.image_begin(j); it != adj1.image_end(j); ++it)
942 for(auto jt = adj2.image_begin(*it); jt != adj2.image_end(*it); ++jt)
943 idx_mask[*jt] = 0;
944 }
945 }
946
948 /* ******************************************************************* */
949 /* A D J A C T O R I N T E R F A C E I M P L E M E N T A T I O N */
950 /* ******************************************************************* */
951 public:
952
953 inline Index get_num_nodes_domain() const
954 {
955 return (_domain_ptr.empty() ? Index(0) : Index(_domain_ptr.size() - 1));
956 }
957
958 inline Index get_num_nodes_image() const
959 {
960 return _num_nodes_image;
961 }
962
964 inline ImageIterator image_begin(Index domain_node) const
965 {
966 ASSERTM(domain_node +1 < _domain_ptr.size(), "Domain node index out of range");
967
968 return _image_idx.begin() + IndexVector::difference_type(_domain_ptr[domain_node]);
969 }
970
972 inline ImageIterator image_end(Index domain_node) const
973 {
974 ASSERTM(domain_node +1< _domain_ptr.size(), "Domain node index out of range");
975
976 return _image_idx.begin() + IndexVector::difference_type(_domain_ptr[domain_node + Index(1)]);
977 }
978 }; // class Graph
979 } // namespace Adjacency
980} // namespace FEAT
#define XABORTM(msg)
Abortion macro definition with custom message.
Definition: assertion.hpp:192
#define ASSERTM(expr, msg)
Debug-Assertion macro definition with custom message.
Definition: assertion.hpp:230
#define XASSERTM(expr, msg)
Assertion macro definition with custom message.
Definition: assertion.hpp:263
Adjacency Graph implementation.
Definition: graph.hpp:34
Graph & operator=(Graph &&other)
move-assign operator
Definition: graph.cpp:90
static constexpr std::uint64_t magic
magic number for Graph serialization
Definition: graph.hpp:48
IndexVector _image_idx
Image node index Vector.
Definition: graph.hpp:66
IndexVector _domain_ptr
Domain pointer Vector.
Definition: graph.hpp:59
Graph clone() const
Clones this graph.
Definition: graph.hpp:311
Graph()
Default constructor.
Definition: graph.cpp:15
ImageIterator image_begin(Index domain_node) const
Returns an iterator for the first adjacent image node.
Definition: graph.hpp:964
std::vector< Index > IndexVector
index vector type
Definition: graph.hpp:37
Graph(RenderType render_type, const Adjactor1_ &adjactor1, const Adjactor2_ &adjactor2)
Composite-Render constructor.
Definition: graph.hpp:228
Graph(RenderType render_type, const Adjactor_ &adjactor)
Render constructor.
Definition: graph.hpp:175
const Index * get_domain_ptr() const
Returns the domain pointer array.
Definition: graph.hpp:365
void sort_indices()
Sorts the image indices to non-descending order.
Definition: graph.cpp:206
Index degree() const
Returns the degree of the graph.
Definition: graph.cpp:195
Index * get_domain_ptr()
Returns the domain pointer array.
Definition: graph.hpp:359
ImageIterator image_end(Index domain_node) const
Returns an iterator for the first position past the last adjacent image node.
Definition: graph.hpp:972
Index * get_image_idx()
Returns the image node index array.
Definition: graph.hpp:374
void permute_indices(const Adjacency::Permutation &inv_perm)
Permutes the image indices.
Definition: graph.cpp:221
Index degree(Index domain_node) const
Returns the degree of a domain node.
Definition: graph.hpp:333
IndexVector::const_iterator ImageIterator
ImageIterator for Graph class.
Definition: graph.hpp:45
void clear()
Clears the graph.
Definition: graph.cpp:188
virtual ~Graph()
virtual destructor
Definition: graph.cpp:184
std::size_t bytes() const
Definition: graph.hpp:409
Index _num_nodes_image
total number of image nodes
Definition: graph.hpp:52
Index get_num_indices() const
Returns the total number indices.
Definition: graph.hpp:390
const Index * get_image_idx() const
Returns the image node index array.
Definition: graph.hpp:380
std::vector< char > serialize() const
Serializes the graph into a buffer.
Definition: graph.cpp:234
RenderType
Render type enumeration.
Definition: base.hpp:26
@ injectify_sorted
Render-Injectified mode, sort image indices.
@ transpose
Render-Transpose mode.
@ injectify_transpose
Render-Injectified-Transpose mode.
@ injectify_transpose_sorted
Render-Injectified-Transpose mode, sort image indices.
@ injectify
Render-Injectified mode.
@ transpose_sorted
Render-Transpose mode, sort image indices.
@ as_is
Render-As-Is mode.
@ as_is_sorted
Render-As-Is mode, sort image indices.
FEAT namespace.
Definition: adjactor.hpp:12
void feat_omp_in_scan(std::size_t n, const T_ x[], T_ y[])
Computes an OpenMP-parallel inclusive scan a.k.a. a prefix sum of an array, i.e.
Definition: omp_util.hpp:63
std::uint64_t Index
Index data type.