8#include <kernel/lafem/dense_vector.hpp>
9#include <kernel/lafem/dense_vector_blocked.hpp>
10#include <kernel/lafem/vector_mirror.hpp>
11#include <kernel/global/gate.hpp>
12#include <kernel/global/transfer.hpp>
13#include <kernel/assembly/symbolic_assembler.hpp>
14#include <kernel/assembly/grid_transfer.hpp>
16#include <control/domain/voxel_domain_control.hpp>
27 template<
typename Vector_,
typename Mirror_>
28 static Vector_ deslag_vector(
const Vector_& vector,
const Mirror_& mirror)
30 const Index num_idx = mirror.num_indices();
31 Vector_ vector_x(num_idx);
32 const auto* v = vector.elements();
33 auto* x = vector_x.elements();
34 const auto* idx_f = mirror.indices();
35 for(
Index i(0); i < num_idx; ++i)
41 template<
typename Matrix_,
typename Mirror_>
42 static Matrix_ deslag_matrix_rows(
const Matrix_& matrix,
const Mirror_& row_mirror)
44 typedef typename Matrix_::IndexType IndexType;
45 const Index num_idx = row_mirror.num_indices();
46 const auto* idx_f = row_mirror.indices();
48 const IndexType* row_ptr_s = matrix.row_ptr();
49 const IndexType* col_idx_s = matrix.col_ind();
51 for(
Index i(0); i < num_idx; ++i)
52 num_nze += row_ptr_s[idx_f[i] + 1u] - row_ptr_s[idx_f[i]];
54 Matrix_ matrix_x(num_idx, matrix.columns(), num_nze);
55 IndexType* row_ptr_x = matrix_x.row_ptr();
56 IndexType* col_idx_x = matrix_x.col_ind();
57 auto* val_x = matrix_x.val();
58 const auto* val_s = matrix.val();
60 for(
Index i(0); i < num_idx; ++i)
62 Index row_s = idx_f[i];
63 IndexType k(row_ptr_x[i]);
64 for(
auto j(row_ptr_s[row_s]); j < row_ptr_s[row_s+1u]; ++j, ++k)
66 col_idx_x[k] = col_idx_s[j];
75 template<
typename Matrix_,
typename Mirror_>
76 static Matrix_ deslag_matrix_cols(
const Matrix_& matrix,
const Mirror_& col_mirror)
78 typedef typename Matrix_::IndexType IndexType;
79 const Index num_idx = col_mirror.num_indices();
80 const auto* idx_f = col_mirror.indices();
82 std::vector<Index> col_map(matrix.columns(), ~
Index(0));
83 for(
Index i(0); i < num_idx; ++i)
84 col_map[idx_f[i]] = i;
86 const Index num_rows = matrix.rows();
87 const IndexType* row_ptr_s = matrix.row_ptr();
88 const IndexType* col_idx_s = matrix.col_ind();
89 Index used_elems = matrix.used_elements();
91 for(
Index i(0); i < used_elems; ++i)
92 num_nze += (col_map[col_idx_s[i]] != ~
Index(0));
94 Matrix_ matrix_x(num_rows, num_idx, num_nze);
95 IndexType* row_ptr_x = matrix_x.row_ptr();
96 IndexType* col_idx_x = matrix_x.col_ind();
97 auto* val_x = matrix_x.val();
98 const auto* val_s = matrix.val();
100 for(
Index i(0); i < num_rows; ++i)
102 IndexType k(row_ptr_x[i]);
103 for(
auto j(row_ptr_s[i]); j < row_ptr_s[i+1u]; ++j)
105 if(col_map[col_idx_s[j]] != ~
Index(0))
107 col_idx_x[k] = IndexType(col_map[col_idx_s[j]]);
156 template<
typename DomainLevel_,
template<
typename>
class SlagDomainLevelWrapper_,
typename SpaceLambda_,
typename Transfer_,
typename Muxer_,
typename Gate_>
157 void asm_transfer_voxel_scalar(
158 const Control::Domain::VirtualLevel<SlagDomainLevelWrapper_<DomainLevel_>>& virt_lvl_f,
159 const Control::Domain::VirtualLevel<SlagDomainLevelWrapper_<DomainLevel_>>& virt_lvl_c,
160 const String& cubature,
bool trunc,
bool shrink,
161 SpaceLambda_&& space_lambda, Transfer_& transfer,
const Muxer_& muxer,
const Gate_& gate_f,
const Gate_& gate_c)
163 typedef typename Transfer_::DataType DataType;
164 typedef typename Transfer_::IndexType IndexType;
165 typedef typename Gate_::MirrorType MirrorType;
166 typedef LAFEM::DenseVector<DataType, IndexType> ScalarVectorType;
169 const bool have_slag(virt_lvl_f->slag_level);
172 const DomainLevel_& level_f = have_slag ? *virt_lvl_f->slag_level :
static_cast<const DomainLevel_&
>(*virt_lvl_f);
173 const DomainLevel_& level_c = virt_lvl_c.is_child() ? virt_lvl_c.level_c() : *virt_lvl_c;
176 const auto& space_f = *space_lambda(level_f);
177 const auto& space_c = *space_lambda(level_c);
180 auto& loc_prol = transfer.get_mat_prol();
181 auto& loc_rest = transfer.get_mat_rest();
182 auto& loc_trunc = transfer.get_mat_trunc();
185 if (loc_prol.empty())
189 auto loc_vec_weight_p = loc_prol.create_vector_l();
190 auto loc_vec_weight_t = loc_prol.create_vector_r();
194 loc_vec_weight_p.format();
195 loc_vec_weight_t.format();
199 loc_trunc.transpose(loc_prol);
212 const auto* slag_part = level_f.find_patch_part(-1);
213 XASSERTM(slag_part !=
nullptr,
"slag patch part not found!");
214 MirrorType slag_mirror;
217 loc_vec_weight_p = VoxelAux::deslag_vector(loc_vec_weight_p, slag_mirror);
218 loc_prol = VoxelAux::deslag_matrix_rows(loc_prol, slag_mirror);
221 loc_trunc = VoxelAux::deslag_matrix_cols(loc_trunc, slag_mirror);
226 gate_f.sync_0(loc_vec_weight_p);
229 loc_vec_weight_p.component_invert(loc_vec_weight_p);
232 loc_prol.scale_rows(loc_prol, loc_vec_weight_p);
236 loc_prol.shrink(DataType(1E-3) * loc_prol.max_abs_element());
239 loc_rest = loc_prol.transpose();
251 if(!virt_lvl_c.is_child())
257 gate_c.sync_0(loc_vec_weight_t);
259 else if(virt_lvl_c.is_parent())
270 ScalarVectorType loc_tmp(gate_c.get_freqs().size());
273 muxer.join(loc_vec_weight_t, loc_tmp);
276 gate_c.sync_0(loc_tmp);
279 muxer.split(loc_vec_weight_t, loc_tmp);
287 muxer.join_send(loc_vec_weight_t);
291 muxer.split_recv(loc_vec_weight_t);
295 loc_vec_weight_t.component_invert(loc_vec_weight_t);
298 loc_trunc.scale_rows(loc_trunc, loc_vec_weight_t);
302 loc_trunc.shrink(DataType(1E-3) * loc_trunc.max_abs_element());
343 template<
typename DomainLevel_,
template<
typename>
class SlagDomainLevelWrapper_,
typename SpaceLambda_,
typename Transfer_,
typename Muxer_,
typename Gate_>
344 static void asm_transfer_voxel_blocked(
345 const Control::Domain::VirtualLevel<SlagDomainLevelWrapper_<DomainLevel_>>& virt_lvl_f,
346 const Control::Domain::VirtualLevel<SlagDomainLevelWrapper_<DomainLevel_>>& virt_lvl_c,
347 const String& cubature,
bool trunc,
bool shrink,
348 SpaceLambda_&& space_lambda, Transfer_& transfer,
const Muxer_& muxer,
const Gate_& gate_f,
const Gate_& gate_c)
350 typedef typename Transfer_::DataType DataType;
351 typedef typename Transfer_::IndexType IndexType;
352 typedef typename Gate_::MirrorType MirrorType;
353 typedef LAFEM::DenseVector<DataType, IndexType> ScalarVectorType;
356 const bool have_slag(virt_lvl_f->slag_level);
359 const DomainLevel_& level_f = have_slag ? *virt_lvl_f->slag_level :
static_cast<const DomainLevel_&
>(*virt_lvl_f);
360 const DomainLevel_& level_c = virt_lvl_c.is_child() ? virt_lvl_c.level_c() : *virt_lvl_c;
363 const auto& space_f = *space_lambda(level_f);
364 const auto& space_c = *space_lambda(level_c);
367 auto& loc_prol_wrapped = transfer.get_mat_prol();
368 auto& loc_rest_wrapped = transfer.get_mat_rest();
369 auto& loc_trunc_wrapped = transfer.get_mat_trunc();
372 auto& loc_prol = loc_prol_wrapped.unwrap();
373 auto& loc_rest = loc_rest_wrapped.unwrap();
374 auto& loc_trunc = loc_trunc_wrapped.unwrap();
377 if (loc_prol.empty())
381 auto loc_vec_weight_p = loc_prol.create_vector_l();
382 auto loc_vec_weight_t = loc_prol.create_vector_r();
386 loc_vec_weight_p.format();
387 loc_vec_weight_t.format();
391 loc_trunc.transpose(loc_prol);
404 const auto* slag_part = level_f.find_patch_part(-1);
405 XASSERTM(slag_part !=
nullptr,
"slag patch part not found!");
406 MirrorType slag_mirror;
409 loc_vec_weight_p = VoxelAux::deslag_vector(loc_vec_weight_p, slag_mirror);
410 loc_prol = VoxelAux::deslag_matrix_rows(loc_prol, slag_mirror);
413 loc_trunc = VoxelAux::deslag_matrix_cols(loc_trunc, slag_mirror);
418 Global::Gate<ScalarVectorType, MirrorType> scalar_gate;
422 scalar_gate.sync_0(loc_vec_weight_p);
425 loc_vec_weight_p.component_invert(loc_vec_weight_p);
428 loc_prol.scale_rows(loc_prol, loc_vec_weight_p);
432 loc_prol.shrink(DataType(1E-3) * loc_prol.max_abs_element());
435 loc_rest = loc_prol.transpose();
447 if(!virt_lvl_c.is_child())
455 Global::Gate<ScalarVectorType, MirrorType> scalar_gate_c;
456 scalar_gate_c.convert(gate_c, ScalarVectorType(space_c.get_num_dofs()));
459 scalar_gate_c.sync_0(loc_vec_weight_t);
461 else if(virt_lvl_c.is_parent())
474 const Index num_dofs_c = gate_c.get_freqs().size();
477 Global::Gate<ScalarVectorType, MirrorType> scalar_gate_c;
478 scalar_gate_c.convert(gate_c, ScalarVectorType(num_dofs_c));
481 Global::Muxer<ScalarVectorType, MirrorType> scalar_muxer_f;
482 scalar_muxer_f.convert(muxer, ScalarVectorType(space_f.get_num_dofs()));
485 ScalarVectorType loc_tmp(num_dofs_c);
488 scalar_muxer_f.join(loc_vec_weight_t, loc_tmp);
491 scalar_gate_c.sync_0(loc_tmp);
494 scalar_muxer_f.split(loc_vec_weight_t, loc_tmp);
503 Global::Muxer<ScalarVectorType, MirrorType> scalar_muxer_f;
504 scalar_muxer_f.convert(muxer, ScalarVectorType(space_f.get_num_dofs()));
506 scalar_muxer_f.join_send(loc_vec_weight_t);
510 scalar_muxer_f.split_recv(loc_vec_weight_t);
514 loc_vec_weight_t.component_invert(loc_vec_weight_t);
517 loc_trunc.scale_rows(loc_trunc, loc_vec_weight_t);
521 loc_trunc.shrink(DataType(1E-3) * loc_trunc.max_abs_element());
#define XASSERT(expr)
Assertion macro definition.
#define XASSERTM(expr, msg)
Assertion macro definition with custom message.
static void assemble_truncation(Matrix_ &matrix, Vector_ &vector, const FineSpace_ &fine_space, const CoarseSpace_ &coarse_space, const String &cubature_name)
Assembles a truncation matrix and its corresponding weight vector.
static void assemble_prolongation(Matrix_ &matrix, Vector_ &vector, const FineSpace_ &fine_space, const CoarseSpace_ &coarse_space, const String &cubature_name)
Assembles a prolongation matrix and its corresponding weight vector.
static void assemble_mirror(LAFEM::VectorMirror< DataType_, IndexType_ > &vec_mirror, const Space_ &space, const MeshPart_ &mesh_part)
Assembles a VectorMirror from a space and a mesh-part.
static void assemble_matrix_2lvl(MatrixType_ &matrix, const FineSpace_ &fine_space, const CoarseSpace_ &coarse_space)
Assembles a 2-level matrix structure from a fine-coarse-space pair.
std::uint64_t Index
Index data type.