Hermes2D
3.0
Main Page
Related Pages
Modules
Namespaces
Classes
Files
File List
File Members
kelly_type_adapt.cpp
1
// This file is part of Hermes2D.
2
//
3
// Hermes2D is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, either version 2 of the License, or
6
// (at your option) any later version.
7
//
8
// Hermes2D is distributed in the hope that it will be useful,
9
// but WITHOUT ANY WARRANTY; without even the implied warranty of
10
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
// GNU General Public License for more details.
12
//
13
// You should have received a copy of the GNU General Public License
14
// along with Hermes2D. If not, see <http://www.gnu.org/licenses/>.
15
16
/*
17
#include "kelly_type_adapt.h"
18
19
namespace Hermes
20
{
21
namespace Hermes2D
22
{
23
static const int H2D_DG_INNER_EDGE_INT = -1234567;
24
static const std::string H2D_DG_INNER_EDGE = "-1234567";
25
26
template<typename Scalar>
27
BasicKellyAdapt<Scalar>::ErrorEstimatorFormKelly::ErrorEstimatorFormKelly(int i, double const_by_laplacian) : KellyTypeAdapt<Scalar>::ErrorEstimatorForm(i, H2D_DG_INNER_EDGE), const_by_laplacian(const_by_laplacian)
28
{}
29
30
template<typename Scalar>
31
void KellyTypeAdapt<Scalar>::ErrorEstimatorForm::setAsInterface()
32
{
33
this->set_area(H2D_DG_INNER_EDGE);
34
}
35
36
template<typename Scalar>
37
KellyTypeAdapt<Scalar>::KellyTypeAdapt(std::vector< SpaceSharedPtr<Scalar> >& spaces_,
38
bool ignore_visited_segments_,
39
std::vector<const InterfaceEstimatorScalingFunction*> interface_scaling_fns_,
40
std::vector<NormType > norms_)
41
: Adapt<Scalar>(spaces_, norms_)
42
{
43
error_estimators_surf.reserve(this->num);
44
error_estimators_vol.reserve(this->num);
45
46
if(interface_scaling_fns_.size() == 0)
47
{
48
interface_scaling_fns_.reserve(this->num);
49
for (int i = 0; i < this->num; i++)
50
interface_scaling_fns_.push_back(new ScaleByElementDiameter);
51
}
52
use_aposteriori_interface_scaling = true;
53
interface_scaling_fns = interface_scaling_fns_;
54
interface_scaling_const = boundary_scaling_const = volumetric_scaling_const = 1.0;
55
ignore_visited_segments = ignore_visited_segments_;
56
57
element_markers_conversion = spaces_[0]->get_mesh()->element_markers_conversion;
58
boundary_markers_conversion = spaces_[0]->get_mesh()->boundary_markers_conversion;
59
}
60
61
template<typename Scalar>
62
KellyTypeAdapt<Scalar>::KellyTypeAdapt(SpaceSharedPtr<Scalar> space_,
63
bool ignore_visited_segments_,
64
const InterfaceEstimatorScalingFunction* interface_scaling_fn_,
65
NormType norm_)
66
: Adapt<Scalar>(space_, norm_)
67
{
68
if(interface_scaling_fn_ == nullptr)
69
interface_scaling_fns.push_back(new ScaleByElementDiameter);
70
else
71
interface_scaling_fns.push_back(interface_scaling_fn_);
72
73
use_aposteriori_interface_scaling = true;
74
75
interface_scaling_const = boundary_scaling_const = volumetric_scaling_const = 1.0;
76
ignore_visited_segments = ignore_visited_segments_;
77
78
element_markers_conversion = space_->get_mesh()->element_markers_conversion;
79
boundary_markers_conversion = space_->get_mesh()->boundary_markers_conversion;
80
}
81
82
template<typename Scalar>
83
bool KellyTypeAdapt<Scalar>::adapt(double thr, int strat, int regularize, double to_be_processed)
84
{
85
std::vector<RefinementSelectors::Selector<Scalar> *> refinement_selectors;
86
RefinementSelectors::HOnlySelector<Scalar> selector;
87
for (int i = 0; i < this->num; i++)
88
refinement_selectors.push_back(&selector);
89
90
return Adapt<Scalar>::adapt(refinement_selectors, thr, strat, regularize, to_be_processed);
91
}
92
93
template<typename Scalar>
94
void KellyTypeAdapt<Scalar>::add_error_estimator_vol(typename KellyTypeAdapt<Scalar>::ErrorEstimatorForm* form)
95
{
96
if(form->i < 0 || form->i >= this->num)
97
throw Exceptions::ValueException("component number", form->i, 0, this->num);
98
99
form->adapt = this;
100
this->error_estimators_vol.push_back(form);
101
}
102
103
template<typename Scalar>
104
void KellyTypeAdapt<Scalar>::add_error_estimator_surf(typename KellyTypeAdapt<Scalar>::ErrorEstimatorForm* form)
105
{
106
if(form->i < 0 || form->i >= this->num)
107
throw Exceptions::ValueException("component number", form->i, 0, this->num);
108
109
form->adapt = this;
110
this->error_estimators_surf.push_back(form);
111
}
112
113
template<typename Scalar>
114
double KellyTypeAdapt<Scalar>::calc_err_internal(std::vector<MeshFunctionSharedPtr<Scalar> > slns,
115
std::vector<double>* component_errors,
116
unsigned int error_flags)
117
{
118
if(slns.size() != this->num)
119
throw Hermes::Exceptions::LengthException(0, slns.size(), this->num);
120
121
this->tick();
122
123
for (int i = 0; i < this->num; i++)
124
{
125
Solution<Scalar>* solution = dynamic_cast<Solution<Scalar>*>(slns[i].get());
126
if(solution == nullptr)
127
throw Exceptions::Exception("Passed solution is in fact not a Solution instance in KellyTypeAdapt::calc_err_*().");
128
129
this->sln[i] = solution;
130
this->sln[i]->set_quad_2d(&g_quad_2d_std);
131
}
132
133
this->have_coarse_solutions = true;
134
135
MeshSharedPtr* meshes = malloc_with_check(this->num, this);
136
Transformable** fns = malloc_with_check(this->num, this);
137
138
this->num_act_elems = 0;
139
for (int i = 0; i < this->num; i++)
140
{
141
meshes[i] = (this->sln[i]->get_mesh());
142
fns[i] = (this->sln[i]);
143
144
this->num_act_elems += meshes[i]->get_num_active_elements();
145
int max = meshes[i]->get_max_element_id();
146
147
if(this->errors[i] != nullptr) free_with_check(this->errors[i]);
148
this->errors[i] = malloc_with_check(max, this);
149
memset(this->errors[i], 0, sizeof(double) * max);
150
}
151
152
double total_norm = 0.0;
153
154
bool calc_norm = false;
155
if((error_flags & this->HERMES_ELEMENT_ERROR_MASK) == HERMES_ELEMENT_ERROR_REL ||
156
(error_flags & this->HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_REL) calc_norm = true;
157
158
double *norms = nullptr;
159
if(calc_norm)
160
{
161
norms = malloc_with_check(this->num, this);
162
memset(norms, 0, this->num * sizeof(double));
163
}
164
165
double *errors_components = malloc_with_check(this->num, this);
166
memset(errors_components, 0, this->num * sizeof(double));
167
this->errors_squared_sum = 0.0;
168
double total_error = 0.0;
169
170
bool bnd[H2D_MAX_NUMBER_EDGES];
171
SurfPos surf_pos[H2D_MAX_NUMBER_EDGES];
172
Traverse::State *ee;
173
Traverse trav(this->num);
174
175
// Reset the e->visited status of each element of each mesh (most likely it will be set to true from
176
// the latest assembling procedure).
177
if(ignore_visited_segments)
178
{
179
for (int i = 0; i < this->num; i++)
180
{
181
Element* e;
182
for_all_active_elements(e, meshes[i])
183
e->visited = false;
184
}
185
}
186
187
// Begin the multimesh traversal.
188
trav.begin(this->num, meshes, fns);
189
while ((ee = trav.get_next_state()) != nullptr)
190
{
191
bnd[0] = ee->bnd[0];
192
bnd[1] = ee->bnd[1];
193
bnd[2] = ee->bnd[2];
194
bnd[3] = ee->bnd[3];
195
196
surf_pos[0].marker = ee->rep->en[0]->marker;
197
surf_pos[1].marker = ee->rep->en[1]->marker;
198
surf_pos[2].marker = ee->rep->en[2]->marker;
199
surf_pos[3].marker = ee->rep->en[3]->marker;
200
201
surf_pos[0].surf_num = 0;
202
surf_pos[1].surf_num = 1;
203
surf_pos[2].surf_num = 2;
204
surf_pos[3].surf_num = 3;
205
206
// Go through all solution components.
207
for (int i = 0; i < this->num; i++)
208
{
209
if(ee->e[i] == nullptr)
210
continue;
211
212
// Set maximum integration order for use in integrals, see limit_order()
213
update_limit_table(ee->e[i]->get_mode());
214
215
RefMap *rm = this->sln[i]->get_refmap();
216
217
double err = 0.0;
218
219
// Go through all volumetric error estimators.
220
for (unsigned int iest = 0; iest < error_estimators_vol.size(); iest++)
221
{
222
// Skip current error estimator if it is assigned to a different component or geometric area
223
// different from that of the current active element.
224
225
if(error_estimators_vol[iest]->i != i)
226
continue;
227
228
if(error_estimators_vol[iest]->area != HERMES_ANY)
229
if(!element_markers_conversion.get_internal_marker(error_estimators_vol[iest]->area).valid || element_markers_conversion.get_internal_marker(error_estimators_vol[iest]->area).marker != ee->e[i]->marker)
230
continue;
231
232
err += eval_volumetric_estimator(error_estimators_vol[iest], rm);
233
}
234
235
// Go through all surface error estimators (includes both interface and boundary est's).
236
for (unsigned int iest = 0; iest < error_estimators_surf.size(); iest++)
237
{
238
if(error_estimators_surf[iest]->i != i)
239
continue;
240
241
for (unsigned char isurf = 0; isurf < ee->e[i]->get_nvert(); isurf++)
242
{
243
if(bnd[isurf]) // Boundary
244
{
245
if(error_estimators_surf[iest]->area != HERMES_ANY)
246
{
247
if(!boundary_markers_conversion.get_internal_marker(error_estimators_surf[iest]->area).valid)
248
continue;
249
int imarker = boundary_markers_conversion.get_internal_marker(error_estimators_surf[iest]->area).marker;
250
251
if(imarker == H2D_DG_INNER_EDGE_INT)
252
continue;
253
if(imarker != surf_pos[isurf].marker)
254
continue;
255
}
256
257
err += eval_boundary_estimator(error_estimators_surf[iest], rm, &surf_pos[isurf]);
258
}
259
else // Interface
260
{
261
if(error_estimators_surf[iest]->area != H2D_DG_INNER_EDGE)
262
continue;
263
264
// BEGIN COPY FROM DISCRETE_PROBLEM.CPP
265
266
// 5 is for bits per page in the array.
267
LightArray<NeighborSearch<Scalar>*> neighbor_searches(5);
268
unsigned int num_neighbors = 0;
269
NeighborNode* root;
270
int ns_index;
271
272
// Determine the minimum mesh seq in this stage.
273
unsigned int min_dg_mesh_seq = 0;
274
for(unsigned int j = 0; j < this->spaces.size(); j++)
275
if(this->spaces[j]->get_mesh()->get_seq() < min_dg_mesh_seq || j == 0)
276
min_dg_mesh_seq = this->spaces[j]->get_mesh()->get_seq();
277
278
// = 0 for single mesh
279
ns_index = meshes[i]->get_seq() - min_dg_mesh_seq;
280
281
// Initialize the NeighborSearches.
282
this->dp.init_neighbors(neighbor_searches, ee, min_dg_mesh_seq);
283
284
// Create a multimesh tree;
285
root = new NeighborNode(nullptr, 0);
286
this->dp.build_multimesh_tree(root, neighbor_searches);
287
288
// Update all NeighborSearches according to the multimesh tree.
289
// After this, all NeighborSearches in neighbor_searches should have the same count
290
// of neighbors and proper set of transformations
291
// for the central and the neighbor element(s) alike.
292
// Also check that every NeighborSearch has the same number of neighbor elements.
293
for(unsigned int j = 0; j < neighbor_searches.get_size(); j++)
294
{
295
if(neighbor_searches.present(j))
296
{
297
NeighborSearch<Scalar>* ns = neighbor_searches.get(j);
298
this->dp.update_neighbor_search(ns, root);
299
if(num_neighbors == 0)
300
num_neighbors = ns->n_neighbors;
301
if(ns->n_neighbors != num_neighbors)
302
throw Hermes::Exceptions::Exception("Num_neighbors of different NeighborSearches not matching in KellyTypeAdapt<Scalar>::calc_err_internal.");
303
}
304
}
305
306
// Go through all segments of the currently processed interface (segmentation is caused
307
// by hanging nodes on the other side of the interface).
308
for (unsigned int neighbor = 0; neighbor < num_neighbors; neighbor++)
309
{
310
if(ignore_visited_segments)
311
{
312
bool processed = true;
313
for(unsigned int j = 0; j < neighbor_searches.get_size(); j++)
314
if(neighbor_searches.present(j))
315
if(!neighbor_searches.get(j)->neighbors.at(neighbor)->visited)
316
{
317
processed = false;
318
break;
319
}
320
321
if(processed) continue;
322
}
323
324
// We do not use cache_e and cache_jwt here.
325
326
// Set the active segment in all NeighborSearches
327
for(unsigned int j = 0; j < neighbor_searches.get_size(); j++)
328
{
329
if(neighbor_searches.present(j))
330
{
331
neighbor_searches.get(j)->active_segment = neighbor;
332
neighbor_searches.get(j)->neighb_el = neighbor_searches.get(j)->neighbors[neighbor];
333
neighbor_searches.get(j)->neighbor_edge = neighbor_searches.get(j)->neighbor_edges[neighbor];
334
}
335
}
336
337
// Push all the necessary transformations to all functions of this stage.
338
// The important thing is that the transformations to the current subelement are already there.
339
// Also store the current neighbor element and neighbor edge in neighb_el, neighbor_edge.
340
for(unsigned int fns_i = 0; fns_i < this->num; fns_i++)
341
{
342
NeighborSearch<Scalar> *ns = neighbor_searches.get(meshes[fns_i]->get_seq() - min_dg_mesh_seq);
343
if(ns->central_transformations[neighbor])
344
ns->central_transformations[neighbor]->apply_on(fns[fns_i]);
345
}
346
347
// END COPY FROM DISCRETE_PROBLEM.CPP
348
rm->force_transform(this->sln[i]->get_transform(), this->sln[i]->get_ctm());
349
350
// The estimate is multiplied by 0.5 in order to distribute the error equally onto
351
// the two neighboring elements.
352
double central_err = 0.5 * eval_interface_estimator(error_estimators_surf[iest],
353
rm, &surf_pos[isurf], neighbor_searches,
354
ns_index);
355
double neighb_err = central_err;
356
357
// Scale the error estimate by the scaling function dependent on the element diameter
358
// (use the central element's diameter).
359
if(use_aposteriori_interface_scaling && interface_scaling_fns[i])
360
if(!element_markers_conversion.get_user_marker(ee->e[i]->marker).valid)
361
throw Hermes::Exceptions::Exception("Marker not valid.");
362
else
363
central_err *= interface_scaling_fns[i]->value(ee->e[i]->get_diameter(), element_markers_conversion.get_user_marker(ee->e[i]->marker).marker);
364
365
// In the case this edge will be ignored when calculating the error for the element on
366
// the other side, add the now computed error to that element as well.
367
if(ignore_visited_segments)
368
{
369
Element *neighb = neighbor_searches.get(ns_index)->neighb_el;
370
371
// Scale the error estimate by the scaling function dependent on the element diameter
372
// (use the diameter of the element on the other side).
373
if(use_aposteriori_interface_scaling && interface_scaling_fns[i])
374
if(!element_markers_conversion.get_user_marker(neighb->marker).valid)
375
throw Hermes::Exceptions::Exception("Marker not valid.");
376
else
377
neighb_err *= interface_scaling_fns[i]->value(neighb->get_diameter(), element_markers_conversion.get_user_marker(neighb->marker).marker);
378
379
errors_components[i] += central_err + neighb_err;
380
total_error += central_err + neighb_err;
381
this->errors[i][ee->e[i]->id] += central_err;
382
this->errors[i][neighb->id] += neighb_err;
383
}
384
else
385
err += central_err;
386
387
// BEGIN COPY FROM DISCRETE_PROBLEM.CPP
388
389
// Clear the transformations from the RefMaps and all functions.
390
for(unsigned int fns_i = 0; fns_i < this->num; fns_i++)
391
fns[fns_i]->set_transform(neighbor_searches.get(meshes[fns_i]->get_seq() - min_dg_mesh_seq)->original_central_el_transform);
392
393
rm->set_transform(neighbor_searches.get(ns_index)->original_central_el_transform);
394
395
// END COPY FROM DISCRETE_PROBLEM.CPP
396
}
397
398
// BEGIN COPY FROM DISCRETE_PROBLEM.CPP
399
400
// Delete the multimesh tree;
401
delete root;
402
403
// Delete the neighbor_searches array.
404
for(unsigned int j = 0; j < neighbor_searches.get_size(); j++)
405
if(neighbor_searches.present(j))
406
delete neighbor_searches.get(j);
407
408
// END COPY FROM DISCRETE_PROBLEM.CPP
409
}
410
}
411
}
412
413
if(calc_norm)
414
{
415
double nrm = eval_solution_norm(this->norm_form[i][i], rm, this->sln[i]);
416
norms[i] += nrm;
417
total_norm += nrm;
418
}
419
420
errors_components[i] += err;
421
total_error += err;
422
this->errors[i][ee->e[i]->id] += err;
423
424
ee->e[i]->visited = true;
425
}
426
}
427
trav.finish();
428
429
// Store the calculation for each solution component separately.
430
if(component_errors != nullptr)
431
{
432
component_errors->clear();
433
for (int i = 0; i < this->num; i++)
434
{
435
if((error_flags & this->HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_ABS)
436
component_errors->push_back(sqrt(errors_components[i]));
437
else if((error_flags & this->HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_REL)
438
component_errors->push_back(sqrt(errors_components[i]/norms[i]));
439
else
440
{
441
throw Hermes::Exceptions::Exception("Unknown total error type (0x%x).", error_flags & this->HERMES_TOTAL_ERROR_MASK);
442
return -1.0;
443
}
444
}
445
}
446
447
this->tick();
448
this->error_time = this->accumulated();
449
450
// Make the error relative if needed.
451
if((error_flags & this->HERMES_ELEMENT_ERROR_MASK) == HERMES_ELEMENT_ERROR_REL)
452
{
453
for (int i = 0; i < this->num; i++)
454
{
455
Element* e;
456
for_all_active_elements(e, meshes[i])
457
this->errors[i][e->id] /= norms[i];
458
}
459
}
460
461
this->errors_squared_sum = total_error;
462
463
// Element error mask is used here, because this variable is used in the adapt()
464
// function, where the processed error (sum of errors of processed element errors)
465
// is matched to this variable.
466
if((error_flags & this->HERMES_ELEMENT_ERROR_MASK) == HERMES_ELEMENT_ERROR_REL)
467
this->errors_squared_sum /= total_norm;
468
469
// Prepare an ordered list of elements according to an error.
470
this->fill_regular_queue(meshes);
471
this->have_errors = true;
472
473
if(calc_norm)
474
free_with_check(norms);
475
free_with_check(errors_components);
476
477
// Return error value.
478
if((error_flags & this->HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_ABS)
479
return sqrt(total_error);
480
else if((error_flags & this->HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_REL)
481
return sqrt(total_error / total_norm);
482
else
483
{
484
throw Hermes::Exceptions::Exception("Unknown total error type (0x%x).", error_flags & this->HERMES_TOTAL_ERROR_MASK);
485
return -1.0;
486
}
487
}
488
489
template<typename Scalar>
490
double KellyTypeAdapt<Scalar>::eval_solution_norm(typename Adapt<Scalar>::MatrixFormVolError* form,
491
RefMap *rm, MeshFunctionSharedPtr<Scalar> sln)
492
{
493
// Determine the integration order.
494
int inc = (sln->get_num_components() == 2) ? 1 : 0;
495
Func<Hermes::Ord>* ou = init_fn_ord(sln->get_fn_order() + inc);
496
497
double fake_wt = 1.0;
498
Geom<Hermes::Ord>* fake_e = init_geom_ord();
499
Hermes::Ord o = form->ord(1, &fake_wt, nullptr, ou, ou, fake_e, nullptr);
500
int order = rm->get_inv_ref_order();
501
order += o.get_order();
502
503
limit_order(order, rm->get_active_element()->get_mode());
504
505
ou->free_ord(); delete ou;
506
delete fake_e;
507
508
// Evaluate the form.
509
Quad2D* quad = sln->get_quad_2d();
510
double3* pt = quad->get_points(order, sln->get_active_element()->get_mode());
511
unsigned char np = quad->get_num_points(order, sln->get_active_element()->get_mode());
512
513
// Initialize geometry and jacobian*weights.
514
Geom<double>* e = init_geom_vol(rm, order);
515
double* jac = rm->get_jacobian(order);
516
double* jwt = malloc_with_check(np, this);
517
for(int i = 0; i < np; i++)
518
jwt[i] = pt[i][2] * jac[i];
519
520
// Function values.
521
Func<Scalar>* u = init_fn(sln.get(), order);
522
Scalar res = form->value(np, jwt, nullptr, u, u, e, nullptr);
523
524
e->free(); delete e;
525
free_with_check(jwt);
526
u->free_fn(); delete u;
527
528
return std::abs(res);
529
}
530
531
template<typename Scalar>
532
double KellyTypeAdapt<Scalar>::eval_volumetric_estimator(typename KellyTypeAdapt<Scalar>::ErrorEstimatorForm* err_est_form,
533
RefMap *rm)
534
{
535
// Determine the integration order.
536
int inc = (this->sln[err_est_form->i]->get_num_components() == 2) ? 1 : 0;
537
538
Func<Hermes::Ord>** oi = malloc_with_check(this->num, this);
539
for (int i = 0; i < this->num; i++)
540
oi[i] = init_fn_ord(this->sln[i]->get_fn_order() + inc);
541
542
// Polynomial order of additional external functions.
543
Func<Hermes::Ord>** fake_ext_fn = malloc_with_check(err_est_form->ext.size(), this);
544
for (int i = 0; i < err_est_form->ext.size(); i++)
545
fake_ext_fn[i] = init_fn_ord(err_est_form->ext[i]->get_fn_order());
546
547
double fake_wt = 1.0;
548
Geom<Hermes::Ord>* fake_e = init_geom_ord();
549
550
DiscontinuousFunc<Hermes::Ord> oi_i(oi[err_est_form->i], false, false);
551
552
Hermes::Ord o = err_est_form->ord(1, &fake_wt, oi, &oi_i, fake_e, fake_ext_fn);
553
int order = rm->get_inv_ref_order();
554
order += o.get_order();
555
556
limit_order(order, rm->get_active_element()->get_mode());
557
558
// Clean up.
559
for (int i = 0; i < this->num; i++)
560
{
561
if(oi[i] != nullptr)
562
{
563
oi[i]->free_ord();
564
delete oi[i];
565
}
566
}
567
free_with_check(oi);
568
delete fake_e;
569
for(int i = 0; i < err_est_form->ext.size(); i++)
570
fake_ext_fn[i]->free_ord();
571
free_with_check(fake_ext_fn);
572
573
// eval the form
574
Quad2D* quad = this->sln[err_est_form->i]->get_quad_2d();
575
double3* pt = quad->get_points(order, rm->get_active_element()->get_mode());
576
unsigned char np = quad->get_num_points(order, rm->get_active_element()->get_mode());
577
578
// Initialize geometry and jacobian*weights
579
Geom<double>* e = init_geom_vol(rm, order);
580
double* jac = rm->get_jacobian(order);
581
double* jwt = malloc_with_check(np, this);
582
for(int i = 0; i < np; i++)
583
jwt[i] = pt[i][2] * jac[i];
584
585
// Function values.
586
Func<Scalar>** ui = malloc_with_check(this->num, this);
587
588
for (int i = 0; i < this->num; i++)
589
ui[i] = init_fn(this->sln[i], order);
590
591
Func<Scalar>** ext_fn = malloc_with_check(err_est_form->ext.size(), this);
592
for (unsigned i = 0; i < err_est_form->ext.size(); i++)
593
{
594
if(err_est_form->ext[i] != nullptr)
595
ext_fn[i] = init_fn(err_est_form->ext[i].get(), order);
596
else
597
ext_fn[i] = nullptr;
598
}
599
600
DiscontinuousFunc<Scalar> ui_i(ui[err_est_form->i], false, false);
601
602
Scalar res = volumetric_scaling_const * err_est_form->value(np, jwt, ui, &ui_i, e, ext_fn);
603
604
for (int i = 0; i < this->num; i++)
605
{
606
if(ui[i] != nullptr)
607
{
608
ui[i]->free_fn();
609
delete ui[i];
610
}
611
}
612
free_with_check(ui);
613
614
for(int i = 0; i < err_est_form->ext.size(); i++)
615
fake_ext_fn[i]->free_fn();
616
free_with_check(fake_ext_fn);
617
618
e->free();
619
delete e;
620
621
free_with_check(jwt);
622
623
return std::abs(res);
624
}
625
626
template<typename Scalar>
627
double KellyTypeAdapt<Scalar>::eval_boundary_estimator(typename KellyTypeAdapt<Scalar>::ErrorEstimatorForm* err_est_form,
628
RefMap *rm, SurfPos* surf_pos)
629
{
630
// Determine the integration order.
631
int inc = (this->sln[err_est_form->i]->get_num_components() == 2) ? 1 : 0;
632
Func<Hermes::Ord>** oi = malloc_with_check(this->num, this);
633
for (int i = 0; i < this->num; i++)
634
oi[i] = init_fn_ord(this->sln[i]->get_edge_fn_order(surf_pos->surf_num) + inc);
635
636
// Polynomial order of additional external functions.
637
Func<Hermes::Ord>** fake_ext_fn = malloc_with_check(err_est_form->ext.size(), this);
638
for (int i = 0; i < err_est_form->ext.size(); i++)
639
fake_ext_fn[i] = init_fn_ord(err_est_form->ext[i]->get_fn_order());
640
641
double fake_wt = 1.0;
642
Geom<Hermes::Ord>* fake_e = init_geom_ord();
643
DiscontinuousFunc<Hermes::Ord> oi_i(oi[err_est_form->i], false, false);
644
Hermes::Ord o = err_est_form->ord(1, &fake_wt, oi, &oi_i, fake_e, fake_ext_fn);
645
int order = rm->get_inv_ref_order();
646
order += o.get_order();
647
648
limit_order(order, rm->get_active_element()->get_mode());
649
650
// Clean up.
651
for (int i = 0; i < this->num; i++)
652
if(oi[i] != nullptr)
653
{
654
oi[i]->free_ord();
655
delete oi[i];
656
}
657
658
free_with_check(oi);
659
delete fake_e;
660
for(int i = 0; i < err_est_form->ext.size(); i++)
661
fake_ext_fn[i]->free_ord();
662
free_with_check(fake_ext_fn);
663
664
// Evaluate the form.
665
Quad2D* quad = this->sln[err_est_form->i]->get_quad_2d();
666
int eo = quad->get_edge_points(surf_pos->surf_num, order, rm->get_active_element()->get_mode());
667
double3* pt = quad->get_points(eo, rm->get_active_element()->get_mode());
668
unsigned char np = quad->get_num_points(eo, rm->get_active_element()->get_mode());
669
670
// Initialize geometry and jacobian*weights.
671
double3* tan;
672
Geom<double>* e = init_geom_surf(rm, surf_pos->surf_num, surf_pos->marker, eo, tan);
673
double* jwt = malloc_with_check(np, this);
674
for(int i = 0; i < np; i++)
675
jwt[i] = pt[i][2] * tan[i][2];
676
677
// Function values
678
Func<Scalar>** ui = malloc_with_check(this->num, this);
679
for (int i = 0; i < this->num; i++)
680
ui[i] = init_fn(this->sln[i], eo);
681
682
Func<Scalar>** ext_fn = malloc_with_check(err_est_form->ext.size(), this);
683
for (unsigned i = 0; i < err_est_form->ext.size(); i++)
684
{
685
if(err_est_form->ext[i] != nullptr)
686
ext_fn[i] = init_fn(err_est_form->ext[i].get(), order);
687
else
688
ext_fn[i] = nullptr;
689
}
690
691
DiscontinuousFunc<Scalar> ui_i(ui[err_est_form->i], false, false);
692
693
Scalar res = boundary_scaling_const *
694
err_est_form->value(np, jwt, ui, &ui_i, e, ext_fn);
695
696
for (int i = 0; i < this->num; i++)
697
if(ui[i] != nullptr)
698
{
699
ui[i]->free_fn();
700
delete ui[i];
701
}
702
703
free_with_check(ui);
704
for(int i = 0; i < err_est_form->ext.size(); i++)
705
fake_ext_fn[i]->free_fn();
706
free_with_check(fake_ext_fn);
707
708
e->free();
709
delete e;
710
711
free_with_check(jwt);
712
713
// Edges are parameterized from 0 to 1 while integration weights
714
return std::abs(0.5*res);
715
// are defined in (-1, 1). Thus multiplying with 0.5 to correct
716
// the weights.
717
}
718
719
template<typename Scalar>
720
double KellyTypeAdapt<Scalar>::eval_interface_estimator(typename KellyTypeAdapt<Scalar>::ErrorEstimatorForm* err_est_form,
721
RefMap *rm, SurfPos* surf_pos,
722
LightArray<NeighborSearch<Scalar>*>& neighbor_searches,
723
int neighbor_index)
724
{
725
NeighborSearch<Scalar>* nbs = neighbor_searches.get(neighbor_index);
726
std::vector<MeshFunctionSharedPtr<Scalar> > slns;
727
for (int i = 0; i < this->num; i++)
728
slns.push_back(this->sln[i]);
729
730
// Determine integration order.
731
Func<Hermes::Ord>** fake_ext_fns = malloc_with_check(err_est_form->ext.size(), this);
732
for (unsigned int j = 0; j < err_est_form->ext.size(); j++)
733
{
734
int inc = (err_est_form->ext[j]->get_num_components() == 2) ? 1 : 0;
735
int central_order = err_est_form->ext[j]->get_edge_fn_order(neighbor_searches.get(err_est_form->ext[j]->get_mesh()->get_seq())->active_edge) + inc;
736
int neighbor_order = err_est_form->ext[j]->get_edge_fn_order(neighbor_searches.get(err_est_form->ext[j]->get_mesh()->get_seq())->neighbor_edge.local_num_of_edge) + inc;
737
fake_ext_fns[j] = new DiscontinuousFunc<Hermes::Ord>(init_fn_ord(central_order), init_fn_ord(neighbor_order));
738
}
739
740
// Polynomial order of geometric attributes (eg. for multiplication of a solution with coordinates, normals, etc.).
741
Geom<Hermes::Ord>* fake_e = new InterfaceGeom<Hermes::Ord>(init_geom_ord(), nbs->neighb_el->marker, nbs->neighb_el->id, Hermes::Ord(nbs->neighb_el->get_diameter()));
742
double fake_wt = 1.0;
743
DiscontinuousFunc<Hermes::Ord> fake_ext_fns_i(fake_ext_fns[err_est_form->i], false, false);
744
745
Hermes::Ord o = err_est_form->ord(1, &fake_wt, fake_ext_fns, &fake_ext_fns_i, fake_e, nullptr);
746
747
int order = rm->get_inv_ref_order();
748
order += o.get_order();
749
750
limit_order(order, rm->get_active_element()->get_mode());
751
752
// Clean up.
753
for (int i = 0; i < this->num; i++)
754
{
755
fake_ext_fns[i]->free_ord();
756
delete fake_ext_fns[i];
757
}
758
free_with_check(fake_ext_fns);
759
760
fake_e->free_ord();
761
delete fake_e;
762
763
//delete fake_ext;
764
765
Quad2D* quad = this->sln[err_est_form->i]->get_quad_2d();
766
int eo = quad->get_edge_points(surf_pos->surf_num, order, rm->get_active_element()->get_mode());
767
unsigned char np = quad->get_num_points(eo, rm->get_active_element()->get_mode());
768
double3* pt = quad->get_points(eo, rm->get_active_element()->get_mode());
769
770
// Initialize geometry and jacobian*weights (do not use the NeighborSearch caching mechanism).
771
double3* tan;
772
Geom<double>* e = new InterfaceGeom<double>(init_geom_surf(rm, surf_pos->surf_num, surf_pos->marker, eo, tan),
773
nbs->neighb_el->marker,
774
nbs->neighb_el->id,
775
nbs->neighb_el->get_diameter());
776
777
double* jwt = malloc_with_check(np, this);
778
for(int i = 0; i < np; i++)
779
jwt[i] = pt[i][2] * tan[i][2];
780
781
// Function values.
782
DiscontinuousFunc<Scalar>** ui = this->dp.init_ext_fns(slns, neighbor_searches, order, 0);
783
784
Scalar res = interface_scaling_const *
785
err_est_form->value(np, jwt, nullptr, ui[err_est_form->i], e, nullptr);
786
787
if(ui != nullptr)
788
{
789
for(unsigned int i = 0; i < slns.size(); i++)
790
ui[i]->free_fn();
791
free_with_check(ui);
792
}
793
794
e->free();
795
delete e;
796
797
free_with_check(jwt);
798
799
// Edges are parameterized from 0 to 1 while integration weights
800
return std::abs(0.5*res);
801
// are defined in (-1, 1). Thus multiplying with 0.5 to correct
802
// the weights.
803
}
804
805
// #endif
806
template HERMES_API class KellyTypeAdapt<double>;
807
template HERMES_API class KellyTypeAdapt<std::complex<double> >;
808
template HERMES_API class BasicKellyAdapt<double>;
809
template HERMES_API class BasicKellyAdapt<std::complex<double> >;
810
}
811
}
812
*/
src
adapt
kelly_type_adapt.cpp
Generated on Fri Sep 19 2014 09:53:56 for Hermes2D by
1.8.8