9#include <kernel/cubature/dynamic_factory.hpp>
10#include <kernel/cubature/rule.hpp>
11#include <kernel/trafo/standard/mapping.hpp>
12#include <kernel/trafo/standard/inverse_mapping.hpp>
13#include <kernel/util/tiny_algebra.hpp>
20 template<
typename DT_,
int dim_>
21 using BoundingBoxType = Tiny::Matrix<DT_, 2, dim_>;
25 template<
typename DT_,
int dim_,
typename VertexSet_,
typename IndexTuple_>
26 static inline BoundingBoxType<DT_, dim_> get_boundingbox(
const VertexSet_& vtx_set,
const IndexTuple_& vidx, DT_ tol)
29 BoundingBoxType<DT_, dim_> bbox;
32 bbox[0] = bbox[1] = vtx_set[vidx[0]];
33 for(
int i(1); i < vidx.num_indices; ++i)
36 const auto& vtx = vtx_set[vidx[i]];
37 for(
int j(0); j < dim_; ++j)
44 for(
int j(0); j < dim_; ++j)
47 DataType bb_extra = tol * (bbox[1][j] - bbox[0][j]);
48 bbox[0][j] -= bb_extra;
49 bbox[1][j] += bb_extra;
55 template<
typename DT_,
int dim_>
56 static inline bool check_point(
const Tiny::Vector<DT_, dim_>& pt,
const BoundingBoxType<DT_, dim_>& bb)
59 for(
int i = 0; i < dim_; ++i)
61 outside |= (pt[i] < bb[0][i]) || (pt[i] > bb[1][i]);
95 template<
typename Trafo_,
typename SurfaceShape_>
99 typedef Trafo_ TrafoType;
100 typedef typename TrafoType::MeshType MeshType;
101 typedef SurfaceShape_ SurfaceShapeType;
102 static constexpr int domain_dim = Trafo_::ShapeType::dimension;
103 static constexpr int sub_dim = SurfaceShapeType::dimension;
104 typedef typename TrafoType::CoordType DataType;
105 typedef Index IndexType;
119 const TrafoType& _trafo;
120 std::vector<Tiny::Vector<DataType, domain_dim>> _shape_vertices;
123 std::vector<Geometry::IndexTuple<verts_per_face>> _shape_index_tmp;
125 std::vector<IndexType> _cell_helper;
126 std::vector<IndexType> _cell_helper_offsets;
127 std::vector<IndexType> _active_surfaces;
128 std::vector<int> _cell_mask_vec;
138 bool no_intersect =
false;
139 for(
int d = 0; d < domain_dim; ++d)
141 no_intersect |= (bb_a[0][d] > bb_b[1][d]) || (bb_b[0][d] > bb_a[1][d]);
144 return !no_intersect;
149 void set_normal_view(
bool inside_view)
154 bool get_normal_view()
const
162 _shape_indices.set_indices(std::move(_shape_index_tmp));
164 const MeshType& mesh = _trafo.get_mesh();
165 const auto& vtx_set = mesh.get_vertex_set();
167 BoundingBoxType<DataType, domain_dim> mesh_bb;
168 for(
int k = 0; k < domain_dim; ++k)
170 mesh_bb[0][k] = Math::Limits<DataType>::max();
171 mesh_bb[1][k] = Math::Limits<DataType>::lowest();
174 for(
const auto* verts = vtx_set.begin(); verts != vtx_set.end(); ++verts)
176 for(
int k = 0; k < domain_dim; ++k)
182 _active_surfaces.clear();
185 const auto& loc_ind_tuple = _shape_indices[k];
187 auto shape_bb = Intern::get_boundingbox<DataType, domain_dim>(_shape_vertices, loc_ind_tuple, DataType(1E-2));
190 _active_surfaces.push_back(k);
194 _cell_helper.clear();
196 for(
auto& c : _cell_helper_offsets)
203 const auto& ele_to_vert = mesh.template get_index_set<domain_dim, 0>();
204 const Index num_elements = mesh.get_num_elements();
205 constexpr Index batch_size = 1 << 19;
206 std::vector<std::vector<IndexType>> gather_helper(std::min(batch_size,
Index(_active_surfaces.size())));
207 FEAT_PRAGMA_OMP(parallel
for)
208 for(std::size_t i = 0; i < gather_helper.size(); ++i)
211 gather_helper[i].reserve(128);
214 for(
Index num_batch = 0; num_batch*batch_size < _active_surfaces.size(); ++num_batch)
216 FEAT_PRAGMA_OMP(parallel
for)
217 for(
Index t = 0; t < gather_helper.size(); ++t)
219 gather_helper.at(t).clear();
222 const Index offset = num_batch*batch_size;
224 FEAT_PRAGMA_OMP(parallel
for)
225 for(
Index t = 0; t < num_elements; ++t)
228 if(!_cell_mask_vec.empty() && _cell_mask_vec.at(t) == 3)
234 auto cell_bb = Intern::get_boundingbox<DataType, domain_dim>(vtx_set, ele_to_vert[t], DataType(1E-4));
237 for(
Index k = 0; k+offset < std::min(_active_surfaces.size(), offset+batch_size); ++k)
239 const auto& loc_ind_tuple = _shape_indices[_active_surfaces.at(k+offset)];
241 auto shape_bb = Intern::get_boundingbox<DataType, domain_dim>(_shape_vertices, loc_ind_tuple, DataType(1E-4));
245 FEAT_PRAGMA_OMP(critical)
246 gather_helper.at(k).push_back(t);
251 for(std::size_t k = 0; k < gather_helper.size(); ++k)
253 const auto& v_info = gather_helper[k];
254 _cell_helper_offsets.at(_active_surfaces.at(k+offset)+1) = v_info.size();
255 for(
const auto& val : v_info)
256 _cell_helper.push_back(val);
261 std::partial_sum(_cell_helper_offsets.begin(), _cell_helper_offsets.end(), _cell_helper_offsets.begin());
263 XASSERTM(_cell_helper_offsets.back() == _cell_helper.size(),
"Offsets do not match size of cell helper array");
277 _cubature_rule(Cubature::ctor_factory, cubature_factory_),
285 template<
typename Vtxs_>
288 _shape_index_tmp.clear();
289 _shape_vertices.resize(vertices.size());
290 FEAT_PRAGMA_OMP(parallel
for)
291 for(std::size_t k = 0; k < std::size_t(vertices.size()); ++k)
293 _shape_vertices[k] = vertices[k];
300 template<
typename Vtxs_>
303 for(std::size_t i = 0; i < std::size_t(verts_per_face); ++i)
305 _shape_vertices.push_back(face_vertices[i]);
308 const IndexType num_indx = _shape_index_tmp.size();
309 _shape_index_tmp.emplace_back();
310 for(IndexType i = 0; i < IndexType(verts_per_face); ++i)
312 _shape_index_tmp.back()[int(i)] = num_indx*verts_per_face + IndexType(i);
319 template<
typename VertIdx_>
322 _shape_index_tmp.emplace_back();
323 for(
int i = 0; i < verts_per_face; ++i)
325 _shape_index_tmp.back()[i] = IndexType(vert_idx[std::size_t(i)]);
330 template<
typename Job_>
331 void assemble(Job_& job)
351 template<
typename MaskVec_>
354 _cell_mask_vec.resize(cell_mask_vec.size());
355 FEAT_PRAGMA_OMP(parallel
for)
356 for(
Index k = 0; k < cell_mask_vec.size(); ++k)
358 _cell_mask_vec[k] = int(cell_mask_vec[k]);
362 template<
typename Job_>
363 void assemble_omp(Job_& job)
365 typedef typename Job_::Task TaskType;
369 FEAT_PRAGMA_OMP(parallel)
372 std::unique_ptr<TaskType> task(
new TaskType(job));
375 for(IndexType k = 0; k < _active_surfaces.size(); ++k)
379 for(
int i = 0; i < verts_per_face; ++i)
381 surface_verts[i] = _shape_vertices[_shape_indices[_active_surfaces[k]][i]];
387 EvalHelper::set_coefficients(coefficients, _shape_vertices, _shape_indices, _active_surfaces[k]);
394 DataType
jac_det = DataType(0);
396 DataType face_volume = EvalHelper::volume(coefficients);
399 std::vector<CoordVecType> points(std::size_t(_cubature_rule.get_num_points()));
401 std::vector<DataType> weights(std::size_t(_cubature_rule.get_num_points()));
403 NormalVecType
normal(DataType(0));
406 for(
int pt = 0; pt < _cubature_rule.get_num_points(); ++pt)
408 RefCoordVecType domain_point = _cubature_rule.get_point(pt);
409 DataType weight = _cubature_rule.get_weight(pt);
411 EvalHelper::map_point(
img_point, domain_point, coefficients);
412 EvalHelper::calc_jac_mat(jac, domain_point, coefficients);
416 if constexpr(sub_dim+1==domain_dim)
426 DataType point_weight =
jac_det * weight;
428 weights[std::size_t(pt)] = point_weight;
432 task->prepare(_cell_helper, _cell_helper_offsets, points, weights,
normal, face_volume, _active_surfaces[k]);
438 FEAT_PRAGMA_OMP(critical)
443 FEAT_PRAGMA_OMP(critical)
#define XASSERTM(expr, msg)
Assertion macro definition with custom message.
Assemble class Discrete Integrale of an FE function over a d-1 manifold.
void add_face_vertices(const Vtxs_ &face_vertices)
Simply pushes new vertices to the end of the current vertex array.
SurfaceIntegrator(const Trafo_ &trafo, const Cubature::DynamicFactory &cubature_factory_)
Initializes the integrator with a cell wise list of vertices.
bool _inside_view
Normal defined from the inside or outside?
void set_mask_vector(const MaskVec_ &cell_mask_vec)
Sets the cell mask vector, which sorts out any cells completelly outside the fluid domain.
void add_face(const VertIdx_ &vert_idx)
bool boundingbox_intersect_test(const BoundingBoxType< DataType, domain_dim > &bb_a, const BoundingBoxType< DataType, domain_dim > &bb_b)
tests whether two boundingboxes intersect in any way
void set_vertices(const std::vector< Vtxs_ > &vertices)
Cubature Rule class template.
Conformal Index-Set class template.
Index get_num_entities() const
Returns the number of entities.
Tiny Matrix class template.
CUDA_HOST_DEVICE void format(DataType alpha=DataType(0))
Formats the matrix.
Tiny Tensor3 class template.
Tiny Vector class template.
void minimax(T_ x, T_ &a, T_ &b)
Updates the minimum and maximum.
Vector< T_, m_ > orthogonal(const Matrix< T_, m_, m_-1, sm_, sn_ > &tau)
Computes the positively oriented orthogonal vector to the columns of a m_ x (m_-1) Matrix.
std::uint64_t Index
Index data type.
@ img_point
specifies whether the trafo should supply image point coordinates
@ jac_det
specifies whether the trafo should supply jacobian determinants
Face traits tag struct template.
Evalautator helper class for standard trafo.