FEAT 3
Finite Element Analysis Toolbox
Loading...
Searching...
No Matches
surface_integrator.hpp
1// FEAT3: Finite Element Analysis Toolbox, Version 3
2// Copyright (C) 2010 by Stefan Turek & the FEAT group
3// FEAT3 is released under the GNU General Public License version 3,
4// see the file 'copyright.txt' in the top level directory for details.
5
6#pragma once
7
9#include <kernel/cubature/dynamic_factory.hpp>
10#include <kernel/cubature/rule.hpp>
11#include <kernel/trafo/standard/mapping.hpp>
12#include <kernel/trafo/standard/inverse_mapping.hpp>
13#include <kernel/util/tiny_algebra.hpp>
14
15#include <vector>
16
17namespace FEAT::Assembly
18{
19
20 template<typename DT_, int dim_>
21 using BoundingBoxType = Tiny::Matrix<DT_, 2, dim_>;
22
23 namespace Intern
24 {
25 template<typename DT_, int dim_, typename VertexSet_, typename IndexTuple_>
26 static inline BoundingBoxType<DT_, dim_> get_boundingbox(const VertexSet_& vtx_set, const IndexTuple_& vidx, DT_ tol)
27 {
28 typedef DT_ DataType;
29 BoundingBoxType<DT_, dim_> bbox;
30
31 // build cell bounding box
32 bbox[0] = bbox[1] = vtx_set[vidx[0]];
33 for(int i(1); i < vidx.num_indices; ++i)
34 {
35 // get vertex and update bounding box
36 const auto& vtx = vtx_set[vidx[i]];
37 for(int j(0); j < dim_; ++j)
38 {
39 Math::minimax(DataType(vtx[j]), bbox[0][j], bbox[1][j]);
40 }
41 }
42
43 // finally, incorporate tolerances
44 for(int j(0); j < dim_; ++j)
45 {
46 // compute bbox dimension
47 DataType bb_extra = tol * (bbox[1][j] - bbox[0][j]);
48 bbox[0][j] -= bb_extra;
49 bbox[1][j] += bb_extra;
50 }
51
52 return bbox;
53 }
54
55 template<typename DT_, int dim_>
56 static inline bool check_point(const Tiny::Vector<DT_, dim_>& pt, const BoundingBoxType<DT_, dim_>& bb)
57 {
58 bool outside = false;
59 for(int i = 0; i < dim_; ++i)
60 {
61 outside |= (pt[i] < bb[0][i]) || (pt[i] > bb[1][i]);
62 }
63 return !outside;
64 }
65 }
66
95 template<typename Trafo_, typename SurfaceShape_>
97 {
98 public:
99 typedef Trafo_ TrafoType;
100 typedef typename TrafoType::MeshType MeshType;
101 typedef SurfaceShape_ SurfaceShapeType;
102 static constexpr int domain_dim = Trafo_::ShapeType::dimension;
103 static constexpr int sub_dim = SurfaceShapeType::dimension;
104 typedef typename TrafoType::CoordType DataType;
105 typedef Index IndexType;
114
115 static constexpr int verts_per_face = Shape::FaceTraits<SurfaceShapeType, 0>::count;
116
117
118 private:
119 const TrafoType& _trafo;
120 std::vector<Tiny::Vector<DataType, domain_dim>> _shape_vertices;
122
123 std::vector<Geometry::IndexTuple<verts_per_face>> _shape_index_tmp;
124
125 std::vector<IndexType> _cell_helper;
126 std::vector<IndexType> _cell_helper_offsets;
127 std::vector<IndexType> _active_surfaces;
128 std::vector<int> _cell_mask_vec;
129
130 CubatureRuleType _cubature_rule;
131
134
137 {
138 bool no_intersect = false;
139 for(int d = 0; d < domain_dim; ++d)
140 {
141 no_intersect |= (bb_a[0][d] > bb_b[1][d]) || (bb_b[0][d] > bb_a[1][d]);
142 }
143
144 return !no_intersect;
145 }
146
147 public:
148
149 void set_normal_view(bool inside_view)
150 {
151 _inside_view = inside_view;
152 }
153
154 bool get_normal_view() const
155 {
156 return _inside_view;
157 }
158
159 void compile()
160 {
161 // move our helper vector
162 _shape_indices.set_indices(std::move(_shape_index_tmp));
163 // get mesh
164 const MeshType& mesh = _trafo.get_mesh();
165 const auto& vtx_set = mesh.get_vertex_set();
166 // first of all, get bounding box of this mesh
167 BoundingBoxType<DataType, domain_dim> mesh_bb;
168 for(int k = 0; k < domain_dim; ++k)
169 {
170 mesh_bb[0][k] = Math::Limits<DataType>::max();
171 mesh_bb[1][k] = Math::Limits<DataType>::lowest();
172 }
173 {
174 for(const auto* verts = vtx_set.begin(); verts != vtx_set.end(); ++verts)
175 {
176 for(int k = 0; k < domain_dim; ++k)
177 Math::minimax((*verts)[k], mesh_bb[0][k], mesh_bb[1][k]);
178 }
179 }
180
181 // preprocess which surface elements should be checked at all
182 _active_surfaces.clear();
183 for(IndexType k = 0; k < _shape_indices.get_num_entities(); ++k)
184 {
185 const auto& loc_ind_tuple = _shape_indices[k];
186 // domain_dim boundingbox of our shape
187 auto shape_bb = Intern::get_boundingbox<DataType, domain_dim>(_shape_vertices, loc_ind_tuple, DataType(1E-2));
188 // are we inside of our local domain boundingbox ?
189 if(boundingbox_intersect_test(shape_bb, mesh_bb))
190 _active_surfaces.push_back(k);
191 }
192
193 // now process which mesh cells are candidates for each surface cell
194 _cell_helper.clear();
195 _cell_helper_offsets.resize(_shape_indices.get_num_entities()+1);
196 for(auto& c : _cell_helper_offsets)
197 {
198 c = IndexType(0);
199 }
200
201
202 // get mapping element to vertices
203 const auto& ele_to_vert = mesh.template get_index_set<domain_dim, 0>();
204 const Index num_elements = mesh.get_num_elements();
205 constexpr Index batch_size = 1 << 19;
206 std::vector<std::vector<IndexType>> gather_helper(std::min(batch_size, Index(_active_surfaces.size())));
207 FEAT_PRAGMA_OMP(parallel for)
208 for(std::size_t i = 0; i < gather_helper.size(); ++i)
209 {
210 // reserve 128 elements per entry, this should be enough without triggering a reallocation besides extrem cases
211 gather_helper[i].reserve(128);
212 }
213 // now iterate over our surface elements in a batched manner
214 for(Index num_batch = 0; num_batch*batch_size < _active_surfaces.size(); ++num_batch)
215 {
216 FEAT_PRAGMA_OMP(parallel for)
217 for(Index t = 0; t < gather_helper.size(); ++t)
218 {
219 gather_helper.at(t).clear();
220 }
221
222 const Index offset = num_batch*batch_size;
223 // now iterate through our mesh and test each cell for its boundingbox... TODO: we could speed this up by using macro elements
224 FEAT_PRAGMA_OMP(parallel for)
225 for(Index t = 0; t < num_elements; ++t)
226 {
227 // check if our current element is outside of our fluid domain, an thus should be ignored
228 if(!_cell_mask_vec.empty() && _cell_mask_vec.at(t) == 3)
229 {
230 continue;
231 }
232
233 // create boundingbox
234 auto cell_bb = Intern::get_boundingbox<DataType, domain_dim>(vtx_set, ele_to_vert[t], DataType(1E-4));
235
236 // now test for each shape if the current cell interesects
237 for(Index k = 0; k+offset < std::min(_active_surfaces.size(), offset+batch_size); ++k)
238 {
239 const auto& loc_ind_tuple = _shape_indices[_active_surfaces.at(k+offset)];
240 // domain_dim boundingbox of our shape
241 auto shape_bb = Intern::get_boundingbox<DataType, domain_dim>(_shape_vertices, loc_ind_tuple, DataType(1E-4));
242 // are we inside of our local domain boundingbox ?
243 if(boundingbox_intersect_test(shape_bb, cell_bb))
244 {
245 FEAT_PRAGMA_OMP(critical)
246 gather_helper.at(k).push_back(t);
247 }
248 }
249 }
250 // now gather our information into our offset array
251 for(std::size_t k = 0; k < gather_helper.size(); ++k)
252 {
253 const auto& v_info = gather_helper[k];
254 _cell_helper_offsets.at(_active_surfaces.at(k+offset)+1) = v_info.size();
255 for(const auto& val : v_info)
256 _cell_helper.push_back(val);
257 }
258 // now gather the values
259 }
260 // now perform partial sum to get actual offsets
261 std::partial_sum(_cell_helper_offsets.begin(), _cell_helper_offsets.end(), _cell_helper_offsets.begin());
262
263 XASSERTM(_cell_helper_offsets.back() == _cell_helper.size(), "Offsets do not match size of cell helper array");
264
265 }
266
267
268 public:
275 explicit SurfaceIntegrator(const Trafo_& trafo, const Cubature::DynamicFactory& cubature_factory_) :
276 _trafo(trafo),
277 _cubature_rule(Cubature::ctor_factory, cubature_factory_),
278 _inside_view(false)
279 {
280 }
281
285 template<typename Vtxs_>
286 void set_vertices(const std::vector<Vtxs_>& vertices)
287 {
288 _shape_index_tmp.clear();
289 _shape_vertices.resize(vertices.size());
290 FEAT_PRAGMA_OMP(parallel for)
291 for(std::size_t k = 0; k < std::size_t(vertices.size()); ++k)
292 {
293 _shape_vertices[k] = vertices[k];
294 }
295 }
296
300 template<typename Vtxs_>
301 void add_face_vertices(const Vtxs_& face_vertices)
302 {
303 for(std::size_t i = 0; i < std::size_t(verts_per_face); ++i)
304 {
305 _shape_vertices.push_back(face_vertices[i]);
306 }
307
308 const IndexType num_indx = _shape_index_tmp.size();
309 _shape_index_tmp.emplace_back();
310 for(IndexType i = 0; i < IndexType(verts_per_face); ++i)
311 {
312 _shape_index_tmp.back()[int(i)] = num_indx*verts_per_face + IndexType(i);
313 }
314 }
315
319 template<typename VertIdx_>
320 void add_face(const VertIdx_& vert_idx)
321 {
322 _shape_index_tmp.emplace_back();
323 for(int i = 0; i < verts_per_face; ++i)
324 {
325 _shape_index_tmp.back()[i] = IndexType(vert_idx[std::size_t(i)]);
326 }
327 }
328
329
330 template<typename Job_>
331 void assemble(Job_& job)
332 {
333 if(this->_shape_indices.get_num_entities() == 0u)
334 {
335 return;
336 }
337
338 assemble_omp(job);
339 }
340
351 template<typename MaskVec_>
352 void set_mask_vector(const MaskVec_& cell_mask_vec)
353 {
354 _cell_mask_vec.resize(cell_mask_vec.size());
355 FEAT_PRAGMA_OMP(parallel for)
356 for(Index k = 0; k < cell_mask_vec.size(); ++k)
357 {
358 _cell_mask_vec[k] = int(cell_mask_vec[k]);
359 }
360 }
361
362 template<typename Job_>
363 void assemble_omp(Job_& job)
364 {
365 typedef typename Job_::Task TaskType;
366 // typedef for our evalhelper
368
369 FEAT_PRAGMA_OMP(parallel)
370 {
371 // create a task for this thread
372 std::unique_ptr<TaskType> task(new TaskType(job));
373
374 FEAT_PRAGMA_OMP(for)
375 for(IndexType k = 0; k < _active_surfaces.size(); ++k)
376 {
377 // construct actual surface
379 for(int i = 0; i < verts_per_face; ++i)
380 {
381 surface_verts[i] = _shape_vertices[_shape_indices[_active_surfaces[k]][i]];
382 }
383 // we now have to construct the transformation from the surface to the reference element, here we use our trafo capabilities
384 // we assume we have a id 0 orientation
386 coefficients.format();
387 EvalHelper::set_coefficients(coefficients, _shape_vertices, _shape_indices, _active_surfaces[k]);
388
389 // our jacobian of the transformation
390 JacobianType jac;
391 jac.format();
392 // since we have standard trafo, the jacobian is independent of our actual domain point
393 // our determinant
394 DataType jac_det = DataType(0);
395
396 DataType face_volume = EvalHelper::volume(coefficients);
397
398 // our cubature points
399 std::vector<CoordVecType> points(std::size_t(_cubature_rule.get_num_points()));
400 // our weights
401 std::vector<DataType> weights(std::size_t(_cubature_rule.get_num_points()));
402 // averaged normal (we assume we have planar normals, in theory this should also be a vector)
403 NormalVecType normal(DataType(0));
404
405 // now iterate through our cubature points
406 for(int pt = 0; pt < _cubature_rule.get_num_points(); ++pt)
407 {
408 RefCoordVecType domain_point = _cubature_rule.get_point(pt);
409 DataType weight = _cubature_rule.get_weight(pt);
410 CoordVecType img_point;
411 EvalHelper::map_point(img_point, domain_point, coefficients);
412 EvalHelper::calc_jac_mat(jac, domain_point, coefficients);
413 jac_det = jac.vol();
414 // calculate normal <- normal should point outside
415 // generally normal only makes sense for a dim-1 manifold
416 if constexpr(sub_dim+1==domain_dim)
417 {
418 normal = Tiny::orthogonal(jac).normalize();
419 if(_inside_view)
420 normal = normal.negate();
421 }
422 else
423 {
424 normal.format();
425 }
426 DataType point_weight = jac_det * weight;
427 points[std::size_t(pt)] = img_point;
428 weights[std::size_t(pt)] = point_weight;
429 }
430
431 // prepare our job, we simply provide all the candidate cells, its the jobs job to handle the information
432 task->prepare(_cell_helper, _cell_helper_offsets, points, weights, normal, face_volume, _active_surfaces[k]);
433
434 // now, assemble our task
435 task->assemble();
436
437 // scatter
438 FEAT_PRAGMA_OMP(critical)
439 task->scatter();
440
441 }
442 // and combine
443 FEAT_PRAGMA_OMP(critical)
444 task->combine();
445 }
446
447 }
448
449 }; // class SurfaceIntegrator
450}
#define XASSERTM(expr, msg)
Assertion macro definition with custom message.
Definition: assertion.hpp:263
FEAT Kernel base header.
Assemble class Discrete Integrale of an FE function over a d-1 manifold.
void add_face_vertices(const Vtxs_ &face_vertices)
Simply pushes new vertices to the end of the current vertex array.
SurfaceIntegrator(const Trafo_ &trafo, const Cubature::DynamicFactory &cubature_factory_)
Initializes the integrator with a cell wise list of vertices.
bool _inside_view
Normal defined from the inside or outside?
void set_mask_vector(const MaskVec_ &cell_mask_vec)
Sets the cell mask vector, which sorts out any cells completelly outside the fluid domain.
void add_face(const VertIdx_ &vert_idx)
bool boundingbox_intersect_test(const BoundingBoxType< DataType, domain_dim > &bb_a, const BoundingBoxType< DataType, domain_dim > &bb_b)
tests whether two boundingboxes intersect in any way
void set_vertices(const std::vector< Vtxs_ > &vertices)
Cubature Rule class template.
Definition: rule.hpp:38
Conformal Index-Set class template.
Definition: index_set.hpp:82
Index get_num_entities() const
Returns the number of entities.
Definition: index_set.hpp:185
Tiny Matrix class template.
CUDA_HOST_DEVICE void format(DataType alpha=DataType(0))
Formats the matrix.
Tiny Tensor3 class template.
Tiny Vector class template.
Assembly namespace.
void minimax(T_ x, T_ &a, T_ &b)
Updates the minimum and maximum.
Definition: math.hpp:195
Vector< T_, m_ > orthogonal(const Matrix< T_, m_, m_-1, sm_, sn_ > &tau)
Computes the positively oriented orthogonal vector to the columns of a m_ x (m_-1) Matrix.
std::uint64_t Index
Index data type.
@ img_point
specifies whether the trafo should supply image point coordinates
@ jac_det
specifies whether the trafo should supply jacobian determinants
Face traits tag struct template.
Definition: shape.hpp:106
Evalautator helper class for standard trafo.
Definition: details.hpp:48