Hermes2D  2.0
adapt.cpp
1 // This file is part of Hermes2D.
2 //
3 // Hermes2D is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, either version 2 of the License, or
6 // (at your option) any later version.
7 //
8 // Hermes2D is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 // GNU General Public License for more details.
12 //
13 // You should have received a copy of the GNU General Public License
14 // along with Hermes2D. If not, see <http://www.gnu.org/licenses/>.
15 
16 #include "umfpack.h"
17 #include "adapt.h"
18 #include "hermes2d.h"
19 #include "global.h"
20 #include "limit_order.h"
21 #include "solution.h"
22 #include "discrete_problem.h"
23 #include "refmap.h"
24 #include "quad_all.h"
25 #include "traverse.h"
26 #include "refinement_selectors/optimum_selector.h"
27 #include "matrix.h"
28 
29 namespace Hermes
30 {
31  namespace Hermes2D
32  {
33  template<typename Scalar>
34  Adapt<Scalar>::Adapt(Hermes::vector<Space<Scalar>*> spaces,
35  Hermes::vector<ProjNormType> proj_norms) :
36  spaces(spaces),
37  num_act_elems(-1),
38  have_errors(false),
39  have_coarse_solutions(false),
40  have_reference_solutions(false)
41  {
42  // sanity check
43  if(proj_norms.size() > 0 && spaces.size() != proj_norms.size())
44  throw Exceptions::LengthException(1, 2, spaces.size(), proj_norms.size());
45 
46  this->num = spaces.size();
47 
48  // sanity checks
49  if((this->num <= 0) || (this->num > H2D_MAX_COMPONENTS)) throw Exceptions::ValueException("components", this->num, 0, H2D_MAX_COMPONENTS);
50 
51  // reset values
52  memset(errors, 0, sizeof(errors));
53  memset(sln, 0, sizeof(sln));
54  memset(rsln, 0, sizeof(rsln));
55  own_forms = new bool*[H2D_MAX_COMPONENTS];
56  for(int i = 0; i < H2D_MAX_COMPONENTS; i++)
57  {
58  own_forms[i] = new bool[H2D_MAX_COMPONENTS];
59  memset(own_forms[i], 0, H2D_MAX_COMPONENTS * sizeof(bool));
60  }
61 
62  // if norms were not set by the user, set them to defaults
63  // according to spaces
64  if(proj_norms.size() == 0)
65  {
66  for (int i = 0; i < this->num; i++)
67  {
68  switch (spaces[i]->get_type())
69  {
70  case HERMES_H1_SPACE: proj_norms.push_back(HERMES_H1_NORM); break;
71  case HERMES_HCURL_SPACE: proj_norms.push_back(HERMES_HCURL_NORM); break;
72  case HERMES_HDIV_SPACE: proj_norms.push_back(HERMES_HDIV_NORM); break;
73  case HERMES_L2_SPACE: proj_norms.push_back(HERMES_L2_NORM); break;
74  default: throw Hermes::Exceptions::Exception("Unknown space type in Adapt<Scalar>::Adapt().");
75  }
76  }
77  }
78 
79  // assign norm weak forms according to norms selection
80  for (int i = 0; i < this->num; i++)
81  for (int j = 0; j < this->num; j++)
82  {
83  error_form[i][j] = NULL;
84  norm_form[i][j] = NULL;
85  }
86 
87  for (int i = 0; i < this->num; i++)
88  {
89  error_form[i][i] = new MatrixFormVolError(i, i, proj_norms[i]);
90  norm_form[i][i] = error_form[i][i];
91  own_forms[i][i] = true;
92  }
93  }
94 
95  template<typename Scalar>
97  spaces(Hermes::vector<Space<Scalar>*>()),
98  num_act_elems(-1),
99  have_errors(false),
100  have_coarse_solutions(false),
101  have_reference_solutions(false)
102  {
103  if(space == NULL) throw Exceptions::NullException(1);
104  spaces.push_back(space);
105 
106  this->num = 1;
107 
108  // reset values
109  memset(errors, 0, sizeof(errors));
110  memset(sln, 0, sizeof(sln));
111  memset(rsln, 0, sizeof(rsln));
112  own_forms = new bool*[H2D_MAX_COMPONENTS];
113  for(int i = 0; i < H2D_MAX_COMPONENTS; i++)
114  {
115  own_forms[i] = new bool[H2D_MAX_COMPONENTS];
116  memset(own_forms[i], 0, H2D_MAX_COMPONENTS * sizeof(bool));
117  }
118 
119  // if norms were not set by the user, set them to defaults
120  // according to spaces
121  if(proj_norm == HERMES_UNSET_NORM)
122  {
123  switch (space->get_type())
124  {
125  case HERMES_H1_SPACE: proj_norm = HERMES_H1_NORM; break;
126  case HERMES_HCURL_SPACE: proj_norm = HERMES_HCURL_NORM; break;
127  case HERMES_HDIV_SPACE: proj_norm = HERMES_HDIV_NORM; break;
128  case HERMES_L2_SPACE: proj_norm = HERMES_L2_NORM; break;
129  default: throw Hermes::Exceptions::Exception("Unknown space type in Adapt<Scalar>::Adapt().");
130  }
131  }
132 
133  // assign norm weak forms according to norms selection
134  error_form[0][0] = new MatrixFormVolError(0, 0, proj_norm);
135  norm_form[0][0] = error_form[0][0];
136  own_forms[0][0] = true;
137  }
138 
139  template<typename Scalar>
141  {
142  for (int i = 0; i < this->num; i++)
143  delete [] errors[i];
144 
145  for (int i = 0; i < this->num; i++)
146  for (int j = 0; j < this->num; j++)
147  if(error_form[i][j] != NULL && own_forms[i][j])
148  {
149  delete error_form[i][j];
150  own_forms[i][j] = false;
151  }
152 
153  for(int i = 0; i < H2D_MAX_COMPONENTS; i++)
154  delete [] own_forms[i];
155  }
156 
157  template<typename Scalar>
158  bool Adapt<Scalar>::adapt(Hermes::vector<RefinementSelectors::Selector<Scalar> *> refinement_selectors, double thr, int strat,
159  int regularize, double to_be_processed)
160  {
161  this->tick();
162  // Important, sets the current caughtException to NULL.
163  this->caughtException = NULL;
164 
165  if(!have_errors)
166  throw Exceptions::Exception("element errors have to be calculated first, call Adapt<Scalar>::calc_err_est().");
167 
168  if(refinement_selectors.empty())
169  throw Exceptions::NullException(1);
170  if(spaces.size() != refinement_selectors.size())
171  throw Exceptions::LengthException(1, refinement_selectors.size(), spaces.size());
172 
173  //get meshes
174  int max_id = -1;
175  Mesh* meshes[H2D_MAX_COMPONENTS];
176  for (int j = 0; j < this->num; j++)
177  {
178  meshes[j] = this->spaces[j]->get_mesh();
179  if(rsln[j] != NULL)
180  {
181  rsln[j]->set_quad_2d(&g_quad_2d_std);
182  rsln[j]->enable_transform(false);
183  }
184  if(meshes[j]->get_max_element_id() > max_id)
185  max_id = meshes[j]->get_max_element_id();
186  }
187 
188  //reset element refinement info
189  int** idx = new int*[max_id];
190  for(int i = 0; i < max_id; i++)
191  idx[i] = new int[num];
192 
193  Element* e;
194  for(int j = 0; j < max_id; j++)
195  for(int l = 0; l < this->num; l++)
196  idx[j][l] = -1; // element not refined
197 
198  double err0_squared = 1000.0;
199  double processed_error_squared = 0.0;
200 
201  std::vector<ElementToRefine> elem_inx_to_proc; //list of indices of elements that are going to be processed
202  elem_inx_to_proc.reserve(num_act_elems);
203 
204  //adaptivity loop
205  double error_squared_threshold = -1; //an error threshold that breaks the adaptivity loop in a case of strategy 1
206  int num_ignored_elem = 0; //a number of ignored elements
207  int num_not_changed = 0; //a number of element that were not changed
208  int num_priority_elem = 0; //a number of elements that were processed using priority queue
209 
210  // Structures traversed in reality using strategies.
211  Hermes::vector<int> ids;
212  Hermes::vector<int> components;
213  Hermes::vector<int> current_orders;
214  bool first_regular_element = true; // true if first regular element was not processed yet
215  bool error_level_reached = false;
216 
217  for(int inx_regular_element = 0; inx_regular_element < num_act_elems || !priority_queue.empty();)
218  {
219  int id, comp;
220 
221  // Process the queuse(s) to see what elements to really refine.
222  if(priority_queue.empty())
223  {
224  id = regular_queue[inx_regular_element].id;
225  comp = regular_queue[inx_regular_element].comp;
226  inx_regular_element++;
227 
228  // Get info linked with the element
229  double err_squared = errors[comp][id];
230 
231  if(first_regular_element)
232  {
233  error_squared_threshold = thr * err_squared;
234  first_regular_element = false;
235  }
236 
237  // first refinement strategy:
238  // refine elements until prescribed amount of error is processed
239  // if more elements have similar error refine all to keep the mesh symmetric
240  if((strat == 0) && (processed_error_squared > sqrt(thr) * errors_squared_sum)
241  && fabs((err_squared - err0_squared)/err0_squared) > 1e-3)
242  error_level_reached = true;
243 
244  // second refinement strategy:
245  // refine all elements whose error is bigger than some portion of maximal error
246  if((strat == 1) && (err_squared < error_squared_threshold))
247  error_level_reached = true;
248 
249  if((strat == 2) && (err_squared < thr))
250  error_level_reached = true;
251 
252  if((strat == 3) && ((err_squared < error_squared_threshold) || (processed_error_squared > 1.5 * to_be_processed)))
253  error_level_reached = true;
254 
255  // Insert the element only if it complies with the strategy.
256  if(!error_level_reached)
257  {
258  err0_squared = err_squared;
259  processed_error_squared += err_squared;
260  ids.push_back(id);
261  components.push_back(comp);
262  current_orders.push_back(this->spaces[comp]->get_element_order(id));
263  spaces[comp]->edata[id].changed_in_last_adaptation = true;
264  }
265  else
266  if(priority_queue.empty())
267  break;
268  }
269  // Priority - refine no matter what.
270  else
271  {
272  id = priority_queue.front().id;
273  comp = priority_queue.front().comp;
274  priority_queue.pop();
275 
276  // Insert into appropripate arrays.
277  ids.push_back(id);
278  components.push_back(comp);
279  current_orders.push_back(this->spaces[comp]->get_element_order(id));
280  spaces[comp]->edata[id].changed_in_last_adaptation = true;
281  }
282  }
283 
284  if(ids.empty())
285  {
286  this->warn("None of the elements selected for refinement was refined. Adaptivity step successful, returning 'true'.");
287  return true;
288  }
289 
290  // RefinementSelectors cloning.
291  RefinementSelectors::Selector<Scalar>*** global_refinement_selectors = new RefinementSelectors::Selector<Scalar>**[Hermes::Hermes2D::Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
292 
293  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
294  {
295  global_refinement_selectors[i] = new RefinementSelectors::Selector<Scalar>*[refinement_selectors.size()];
296  for (unsigned int j = 0; j < refinement_selectors.size(); j++)
297  {
298  if(i == 0)
299  global_refinement_selectors[i][j] = refinement_selectors[j];
300  else
301  {
302  global_refinement_selectors[i][j] = refinement_selectors[j]->clone();
303  if(dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(global_refinement_selectors[i][j]) != NULL)
304  {
305  dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(global_refinement_selectors[i][j])->cached_shape_vals_valid = dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(refinement_selectors[j])->cached_shape_vals_valid;
306  dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(global_refinement_selectors[i][j])->cached_shape_ortho_vals = dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(refinement_selectors[j])->cached_shape_ortho_vals;
307  dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(global_refinement_selectors[i][j])->cached_shape_vals = dynamic_cast<RefinementSelectors::ProjBasedSelector<Scalar>*>(refinement_selectors[j])->cached_shape_vals;
308  }
309  if(dynamic_cast<RefinementSelectors::OptimumSelector<Scalar>*>(global_refinement_selectors[i][j]) != NULL)
310  dynamic_cast<RefinementSelectors::OptimumSelector<Scalar>*>(global_refinement_selectors[i][j])->num_shapes = dynamic_cast<RefinementSelectors::OptimumSelector<Scalar>*>(refinement_selectors[j])->num_shapes;
311  }
312  }
313  }
314 
315  // Solution cloning.
316  Solution<Scalar>*** rslns = new Solution<Scalar>**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
317 
318  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
319  {
320  rslns[i] = new Solution<Scalar>*[this->num];
321  for (int j = 0; j < this->num; j++)
322  {
323  if(rsln[j] != NULL)
324  rslns[i][j] = dynamic_cast<Solution<Scalar>*>(rsln[j]->clone());
325  }
326  }
327 
328  this->tick();
329  this->info("Adaptivity: data preparation duration: %f s.", this->last());
330 
331  // For statistics.
332  Hermes::vector<int> numberOfCandidates;
333 
334 
335  // The loop
336  RefinementSelectors::Selector<Scalar>** current_refinement_selectors;
337  Solution<Scalar>** current_rslns;
338  int id_to_refine;
339 #define CHUNKSIZE 1
340  int num_threads_used = Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads);
341 #pragma omp parallel shared(ids, components, elem_inx_to_proc, meshes, current_orders) private(current_refinement_selectors, current_rslns, id_to_refine) num_threads(num_threads_used)
342  {
343 #pragma omp for schedule(dynamic, CHUNKSIZE)
344  for(id_to_refine = 0; id_to_refine < ids.size(); id_to_refine++)
345  {
346  try
347  {
348  current_refinement_selectors = global_refinement_selectors[omp_get_thread_num()];
349  current_rslns = rslns[omp_get_thread_num()];
350 
351  // Get refinement suggestion
352  ElementToRefine elem_ref(ids[id_to_refine], components[id_to_refine]);
353 
354  // rsln[comp] may be unset if refinement_selectors[comp] == HOnlySelector or POnlySelector
355  bool refined = current_refinement_selectors[components[id_to_refine]]->select_refinement(meshes[components[id_to_refine]]->get_element(ids[id_to_refine]), current_orders[id_to_refine], current_rslns[components[id_to_refine]], elem_ref);
356 
357 #pragma omp critical (number_of_candidates)
358  {
359  if(dynamic_cast<Hermes::Hermes2D::RefinementSelectors::OptimumSelector<Scalar>*>(current_refinement_selectors[components[id_to_refine]]) != NULL)
360  numberOfCandidates.push_back(dynamic_cast<Hermes::Hermes2D::RefinementSelectors::OptimumSelector<Scalar>*>(current_refinement_selectors[components[id_to_refine]])->get_candidates().size());
361  }
362 
363  //add to a list of elements that are going to be refined
364  #pragma omp critical (elem_inx_to_proc)
365  {
366  idx[ids[id_to_refine]][components[id_to_refine]] = (int)elem_inx_to_proc.size();
367  elem_inx_to_proc.push_back(elem_ref);
368  }
369  }
370  catch(Hermes::Exceptions::Exception& exception)
371  {
372  if(this->caughtException == NULL)
373  this->caughtException = exception.clone();
374  }
375  catch(std::exception& exception)
376  {
377  if(this->caughtException == NULL)
378  this->caughtException = new Hermes::Exceptions::Exception(exception.what());
379  }
380  }
381  }
382 
383  if(this->caughtException == NULL)
384  {
385  int averageNumberOfCandidates = 0;
386  for(int i = 0; i < numberOfCandidates.size(); i++)
387  averageNumberOfCandidates += numberOfCandidates[i];
388  averageNumberOfCandidates = averageNumberOfCandidates / numberOfCandidates.size();
389 
390  this->info("Adaptivity: total number of refined Elements: %i.", ids.size());
391  this->info("Adaptivity: average number of candidates per refined Element: %i.", averageNumberOfCandidates);
392  }
393 
394  this->tick();
395  this->info("Adaptivity: refinement selection duration: %f s.", this->last());
396 
397  if(this->caughtException == NULL)
398  fix_shared_mesh_refinements(meshes, elem_inx_to_proc, idx, global_refinement_selectors);
399 
400  for(unsigned int i = 0; i < Hermes::Hermes2D::Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
401  {
402  if(i > 0)
403  for (unsigned int j = 0; j < refinement_selectors.size(); j++)
404  delete global_refinement_selectors[i][j];
405  delete [] global_refinement_selectors[i];
406  }
407  delete [] global_refinement_selectors;
408 
409  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
410  {
411  if(rslns[i] != NULL)
412  {
413  for (unsigned int j = 0; j < this->num; j++)
414  if(rsln[j] != NULL)
415  delete rslns[i][j];
416  delete [] rslns[i];
417  }
418  }
419  delete [] rslns;
420 
421  for(int i = 0; i < max_id; i++)
422  delete [] idx[i];
423  delete [] idx;
424 
425  if(this->caughtException != NULL)
426  throw *(this->caughtException);
427 
428  //apply refinements
429  apply_refinements(elem_inx_to_proc);
430 
431  // in singlemesh case, impose same orders across meshes
432  homogenize_shared_mesh_orders(meshes);
433 
434  // mesh regularization
435  if(regularize >= 0)
436  {
437  if(regularize == 0)
438  {
439  regularize = 1;
440  this->warn("Total mesh regularization is not supported in adaptivity. 1-irregular mesh is used instead.");
441  }
442  for (int i = 0; i < this->num; i++)
443  {
444  int* parents;
445  parents = meshes[i]->regularize(regularize);
446  this->spaces[i]->distribute_orders(meshes[i], parents);
447  ::free(parents);
448  }
449  }
450 
451  for (int j = 0; j < this->num; j++)
452  if(rsln[j] != NULL)
453  rsln[j]->enable_transform(true);
454 
455  //store for the user to retrieve
456  last_refinements.swap(elem_inx_to_proc);
457 
458  have_errors = false;
459  if(strat == 2)
460  have_errors = true; // space without changes
461 
462  // since space changed, assign dofs:
463  for(unsigned int i = 0; i < this->spaces.size(); i++)
464  this->spaces[i]->assign_dofs();
465 
466  for (int i = 0; i < this->num; i++)
467  {
468  for_all_active_elements(e, this->spaces[i]->get_mesh())
469  this->spaces[i]->edata[e->id].changed_in_last_adaptation = false;
470  for(id_to_refine = 0; id_to_refine < ids.size(); id_to_refine++)
471  this->spaces[i]->edata[ids[id_to_refine]].changed_in_last_adaptation = false;
472  }
473 
474  return false;
475  }
476 
477  template<typename Scalar>
479  {
480  }
481 
482  template<typename Scalar>
484  {
485  return new MatrixFormVolError(*this);
486  }
487 
488  template<typename Scalar>
490  {
491  }
492 
493  template<typename Scalar>
494  template<typename TestFunctionDomain, typename SolFunctionDomain>
497  {
498  SolFunctionDomain result = SolFunctionDomain(0);
499  for (int i = 0; i < n; i++)
500  result += wt[i] * (u->val[i] * conj(v->val[i]));
501  return result;
502  }
503 
504  template<typename Scalar>
505  template<typename TestFunctionDomain, typename SolFunctionDomain>
508  {
509  SolFunctionDomain result = SolFunctionDomain(0);
510  for (int i = 0; i < n; i++)
511  result += wt[i] * (u->val[i] * conj(v->val[i]) + u->dx[i] * conj(v->dx[i])
512  + u->dy[i] * conj(v->dy[i]));
513  return result;
514  }
515 
516  template<typename Scalar>
517  template<typename TestFunctionDomain, typename SolFunctionDomain>
518  SolFunctionDomain Adapt<Scalar>::MatrixFormVolError::h1_error_semi_form(int n, double *wt, Func<SolFunctionDomain> *u_ext[], Func<SolFunctionDomain> *u,
519  Func<SolFunctionDomain> *v, Geom<TestFunctionDomain> *e, Func<SolFunctionDomain> **ext)
520  {
521  SolFunctionDomain result = SolFunctionDomain(0);
522  for (int i = 0; i < n; i++)
523  result += wt[i] * (u->dx[i] * conj(v->dx[i]) + u->dy[i] * conj(v->dy[i]));
524  return result;
525  }
526 
527  template<typename Scalar>
528  template<typename TestFunctionDomain, typename SolFunctionDomain>
529  SolFunctionDomain Adapt<Scalar>::MatrixFormVolError::hdiv_error_form(int n, double *wt, Func<SolFunctionDomain> *u_ext[], Func<SolFunctionDomain> *u,
530  Func<SolFunctionDomain> *v, Geom<TestFunctionDomain> *e, Func<SolFunctionDomain> **ext)
531  {
532  throw Hermes::Exceptions::Exception("hdiv error form not implemented yet in hdiv.h.");
533 
534  // this is Hcurl code:
535  SolFunctionDomain result = SolFunctionDomain(0);
536  for (int i = 0; i < n; i++)
537  result += wt[i] * (u->curl[i] * conj(v->curl[i]) +
538  u->val0[i] * conj(v->val0[i]) + u->val1[i] * conj(v->val1[i]));
539  return result;
540  }
541 
542  template<typename Scalar>
543  template<typename TestFunctionDomain, typename SolFunctionDomain>
544  SolFunctionDomain Adapt<Scalar>::MatrixFormVolError::hcurl_error_form(int n, double *wt, Func<SolFunctionDomain> *u_ext[], Func<SolFunctionDomain> *u,
545  Func<SolFunctionDomain> *v, Geom<TestFunctionDomain> *e, Func<SolFunctionDomain> **ext)
546  {
547  SolFunctionDomain result = SolFunctionDomain(0);
548  for (int i = 0; i < n; i++)
549  result += wt[i] * (u->curl[i] * conj(v->curl[i]) +
550  u->val0[i] * conj(v->val0[i]) + u->val1[i] * conj(v->val1[i]));
551  return result;
552  }
553 
554  template<typename Scalar>
555  Scalar Adapt<Scalar>::MatrixFormVolError::value(int n, double *wt, Func<Scalar> *u_ext[],
557  Func<Scalar> **ext) const
558  {
559  switch (projNormType)
560  {
561  case HERMES_L2_NORM:
562  return l2_error_form<double, Scalar>(n, wt, u_ext, u, v, e, ext);
563  case HERMES_H1_NORM:
564  return h1_error_form<double, Scalar>(n, wt, u_ext, u, v, e, ext);
565  case HERMES_H1_SEMINORM:
566  return h1_error_semi_form<double, Scalar>(n, wt, u_ext, u, v, e, ext);
567  case HERMES_HCURL_NORM:
568  return hcurl_error_form<double, Scalar>(n, wt, u_ext, u, v, e, ext);
569  case HERMES_HDIV_NORM:
570  return hdiv_error_form<double, Scalar>(n, wt, u_ext, u, v, e, ext);
571  default:
572  throw Hermes::Exceptions::Exception("Unknown projection type");
573  return 0.0;
574  }
575  }
576 
577  template<typename Scalar>
578  Hermes::Ord Adapt<Scalar>::MatrixFormVolError::ord(int n, double *wt, Func<Hermes::Ord> *u_ext[],
580  Func<Ord> **ext) const
581  {
582  switch (projNormType)
583  {
584  case HERMES_L2_NORM:
585  return l2_error_form<Hermes::Ord, Hermes::Ord>(n, wt, u_ext, u, v, e, ext);
586  case HERMES_H1_NORM:
587  return h1_error_form<Hermes::Ord, Hermes::Ord>(n, wt, u_ext, u, v, e, ext);
588  case HERMES_H1_SEMINORM:
589  return h1_error_semi_form<Hermes::Ord, Hermes::Ord>(n, wt, u_ext, u, v, e, ext);
590  case HERMES_HCURL_NORM:
591  return hcurl_error_form<Hermes::Ord, Hermes::Ord>(n, wt, u_ext, u, v, e, ext);
592  case HERMES_HDIV_NORM:
593  return hdiv_error_form<Hermes::Ord, Hermes::Ord>(n, wt, u_ext, u, v, e, ext);
594  default:
595  throw Hermes::Exceptions::Exception("Unknown projection type");
596  return Hermes::Ord();
597  }
598  }
599 
600  template<typename Scalar>
602  unsigned int error_flags)
603  {
604  this->tick();
605  if(num != 1)
606  throw Exceptions::LengthException(1, 1, num);
607  double result = calc_err_internal(sln, rsln, NULL, solutions_for_adapt, error_flags);
608  this->tick();
609  this->info("Adaptivity: error estimate calculation duration: %f s.", this->last());
610  return result;
611  }
612 
613  template<typename Scalar>
614  double Adapt<Scalar>::calc_err_est(Hermes::vector<Solution<Scalar>*> slns, Hermes::vector<Solution<Scalar>*> rslns,
615  Hermes::vector<double>* component_errors, bool solutions_for_adapt,
616  unsigned int error_flags)
617  {
618  this->tick();
619  if(slns.size() != num)
620  throw Exceptions::LengthException(1, slns.size(), num);
621  if(rslns.size() != num)
622  throw Exceptions::LengthException(2, rslns.size(), num);
623  double result = calc_err_internal(slns, rslns, component_errors, solutions_for_adapt, error_flags);
624  this->tick();
625  this->info("Adaptivity: error estimate calculation duration: %f s.", this->last());
626  return result;
627  }
628 
629  template<typename Scalar>
631  unsigned int error_flags)
632  {
633  this->tick();
634  if(num != 1)
635  throw Exceptions::LengthException(1, 1, num);
636  OGProjection<Scalar> ogProjection;
637  typename Mesh::ReferenceMeshCreator ref_mesh_creator(this->spaces[0]->get_mesh());
638  Mesh* ref_mesh = ref_mesh_creator.create_ref_mesh();
639  typename Space<Scalar>::ReferenceSpaceCreator ref_space_creator(this->spaces[0], ref_mesh, 0);
640  Space<Scalar>* ref_space = ref_space_creator.create_ref_space();
641  Solution<Scalar> ref_sln_local;
642  ogProjection.project_global(ref_space, rsln, &ref_sln_local);
643  double result = calc_err_internal(sln, &ref_sln_local, NULL, solutions_for_adapt, error_flags);
644  delete ref_space;
645  delete ref_mesh;
646  this->tick();
647  this->info("Adaptivity: exact error calculation duration: %f s.", this->last());
648  return result;
649  }
650 
651  template<typename Scalar>
652  double Adapt<Scalar>::calc_err_exact(Hermes::vector<Solution<Scalar>*> slns, Hermes::vector<Solution<Scalar>*> rslns,
653  Hermes::vector<double>* component_errors, bool solutions_for_adapt,
654  unsigned int error_flags)
655  {
656  this->tick();
657  if(slns.size() != num)
658  throw Exceptions::LengthException(1, slns.size(), num);
659  if(rslns.size() != num)
660  throw Exceptions::LengthException(2, rslns.size(), num);
661  Mesh** ref_meshes = new Mesh*[num];
662  Space<Scalar>** ref_spaces = new Space<Scalar>*[num];
663  Hermes::vector<Solution<Scalar>*> ref_slns_local;
664  for(unsigned int i = 0; i < num; i++)
665  {
666  OGProjection<Scalar> ogProjection;
667  typename Mesh::ReferenceMeshCreator ref_mesh_creator(this->spaces[i]->get_mesh());
668  ref_meshes[i] = ref_mesh_creator.create_ref_mesh();
669  typename Space<Scalar>::ReferenceSpaceCreator ref_space_creator(this->spaces[i], ref_mesh_creator.create_ref_mesh(), 0);
670  ref_spaces[i] = ref_space_creator.create_ref_space();
671  ref_slns_local.push_back(new Solution<Scalar>);
672  ogProjection.project_global(ref_space_creator.create_ref_space(), rslns[i], ref_slns_local.back());
673  }
674  double result = calc_err_internal(slns, ref_slns_local, component_errors, solutions_for_adapt, error_flags);
675  for(unsigned int i = 0; i < num; i++)
676  {
677  delete ref_spaces[i];
678  delete ref_meshes[i];
679  }
680  delete [] ref_meshes;
681  delete [] ref_spaces;
682  this->tick();
683  this->info("Adaptivity: exact error calculation duration: %f s.", this->last());
684  return result;
685  }
686 
687  template<typename Scalar>
688  bool Adapt<Scalar>::adapt(RefinementSelectors::Selector<Scalar>* refinement_selector, double thr, int strat,
689  int regularize, double to_be_processed)
690  {
691  if(refinement_selector==NULL)
692  throw Exceptions::NullException(1);
693  Hermes::vector<RefinementSelectors::Selector<Scalar> *> refinement_selectors;
694  refinement_selectors.push_back(refinement_selector);
695  return adapt(refinement_selectors, thr, strat, regularize, to_be_processed);
696  }
697 
698  template<typename Scalar>
699  void Adapt<Scalar>::fix_shared_mesh_refinements(Mesh** meshes, std::vector<ElementToRefine>& elems_to_refine,
700  int** idx, RefinementSelectors::Selector<Scalar> *** refinement_selectors)
701  {
702  int num_elem_to_proc = elems_to_refine.size();
703 
704  RefinementSelectors::Selector<Scalar>** current_refinement_selectors;
705 
706  for(int inx = 0; inx < num_elem_to_proc; inx++)
707  {
708  current_refinement_selectors = refinement_selectors[omp_get_thread_num()];
709  ElementToRefine& elem_ref = elems_to_refine[inx];
710  int current_quad_order = this->spaces[elem_ref.comp]->get_element_order(elem_ref.id);
711  Element* current_elem = meshes[elem_ref.comp]->get_element(elem_ref.id);
712 
713  //select a refinement used by all components that share a mesh which is about to be refined
714  int selected_refinement = elem_ref.split;
715  for (int j = 0; j < this->num; j++)
716  {
717  if(selected_refinement == H2D_REFINEMENT_H) break; // iso refinement is max what can be recieved
718  if(j != elem_ref.comp && meshes[j] == meshes[elem_ref.comp]) { // if a mesh is shared
719  int ii = idx[elem_ref.id][j];
720  if(ii >= 0) { // and the sample element is about to be refined by another compoment
721  const ElementToRefine& elem_ref_ii = elems_to_refine[ii];
722  if(elem_ref_ii.split != selected_refinement && elem_ref_ii.split != H2D_REFINEMENT_P) { //select more complicated refinement
723  if((elem_ref_ii.split == H2D_REFINEMENT_ANISO_H || elem_ref_ii.split == H2D_REFINEMENT_ANISO_V) && selected_refinement == H2D_REFINEMENT_P)
724  selected_refinement = elem_ref_ii.split;
725  else
726  selected_refinement = H2D_REFINEMENT_H;
727  }
728  }
729  }
730  }
731 
732  //fix other refinements according to the selected refinement
733  if(selected_refinement != H2D_REFINEMENT_P)
734  {
735  //get suggested orders for the selected refinement
736  const int* suggested_orders = NULL;
737  if(selected_refinement == H2D_REFINEMENT_H)
738  suggested_orders = elem_ref.q;
739 
740  //update orders
741  for (int j = 0; j < this->num; j++)
742  {
743  if(j != elem_ref.comp && meshes[j] == meshes[elem_ref.comp]) { // if components share the mesh
744  // change currently processed refinement
745  if(elem_ref.split != selected_refinement)
746  {
747  elem_ref.split = selected_refinement;
748  current_refinement_selectors[j]->generate_shared_mesh_orders(current_elem, current_quad_order, elem_ref.split, elem_ref.p, suggested_orders);
749  }
750 
751  // change other refinements
752  int ii = idx[elem_ref.id][j];
753  if(ii >= 0)
754  {
755  ElementToRefine& elem_ref_ii = elems_to_refine[ii];
756  if(elem_ref_ii.split != selected_refinement)
757  {
758  elem_ref_ii.split = selected_refinement;
759  current_refinement_selectors[j]->generate_shared_mesh_orders(current_elem, current_quad_order, elem_ref_ii.split, elem_ref_ii.p, suggested_orders);
760  }
761  }
762  else
763  { // element (of the other comp.) not refined at all: assign refinement
764  ElementToRefine elem_ref_new(elem_ref.id, j);
765  elem_ref_new.split = selected_refinement;
766  current_refinement_selectors[j]->generate_shared_mesh_orders(current_elem, current_quad_order, elem_ref_new.split, elem_ref_new.p, suggested_orders);
767  elems_to_refine.push_back(elem_ref_new);
768  }
769  }
770  }
771  }
772  }
773  }
774 
775  template<typename Scalar>
776  double Adapt<Scalar>::get_element_error_squared(int component, int id) const
777  {
778  if(!have_errors)
779  throw Exceptions::Exception("element errors have to be calculated first, call Adapt<Scalar>::calc_err_est().");
780  return errors[component][id];
781  };
782 
783  template<typename Scalar>
784  const Hermes::vector<typename Adapt<Scalar>::ElementReference>& Adapt<Scalar>::get_regular_queue() const
785  {
786  return regular_queue;
787  };
788 
789  template<typename Scalar>
791  {
792  Element* e;
793  for (int i = 0; i < this->num; i++)
794  {
795  for_all_active_elements(e, meshes[i])
796  {
797  int current_quad_order = this->spaces[i]->get_element_order(e->id);
798  int current_order_h = H2D_GET_H_ORDER(current_quad_order), current_order_v = H2D_GET_V_ORDER(current_quad_order);
799 
800  for (int j = 0; j < this->num; j++)
801  if((j != i) && (meshes[j] == meshes[i])) // components share the mesh
802  {
803  int quad_order = this->spaces[j]->get_element_order(e->id);
804  current_order_h = std::max(current_order_h, H2D_GET_H_ORDER(quad_order));
805  current_order_v = std::max(current_order_v, H2D_GET_V_ORDER(quad_order));
806  }
807 
808  this->spaces[i]->set_element_order_internal(e->id, H2D_MAKE_QUAD_ORDER(current_order_h, current_order_v));
809  }
810  }
811  }
812 
813  template<typename Scalar>
814  const std::vector<ElementToRefine>& Adapt<Scalar>::get_last_refinements() const
815  {
816  return last_refinements;
817  }
818 
819  template<typename Scalar>
820  void Adapt<Scalar>::apply_refinements(std::vector<ElementToRefine>& elems_to_refine)
821  {
822  for (std::vector<ElementToRefine>::const_iterator elem_ref = elems_to_refine.begin();
823  elem_ref != elems_to_refine.end(); elem_ref++)
824  apply_refinement(*elem_ref);
825  }
826 
827  template<typename Scalar>
829  {
830  Space<Scalar>* space = this->spaces[elem_ref.comp];
831  Mesh* mesh = space->get_mesh();
832 
833  Element* e;
834  e = mesh->get_element(elem_ref.id);
835 
836  if(elem_ref.split == H2D_REFINEMENT_P)
837  {
838  space->set_element_order_internal(elem_ref.id, elem_ref.p[0]);
839  space->edata[elem_ref.id].changed_in_last_adaptation = true;
840  }
841  else if(elem_ref.split == H2D_REFINEMENT_H)
842  {
843  if(e->active)
844  mesh->refine_element_id(elem_ref.id);
845  for (int j = 0; j < 4; j++)
846  {
847  space->set_element_order_internal(e->sons[j]->id, elem_ref.p[j]);
848  space->edata[e->sons[j]->id].changed_in_last_adaptation = true;
849  }
850  }
851  else
852  {
853  if(e->active)
854  mesh->refine_element_id(elem_ref.id, elem_ref.split);
855  for (int j = 0; j < 2; j++)
856  {
857  space->set_element_order_internal(e->sons[ (elem_ref.split == 1) ? j : j + 2 ]->id, elem_ref.p[j]);
858  space->edata[e->sons[ (elem_ref.split == 1) ? j : j + 2 ]->id].changed_in_last_adaptation = true;
859  }
860  }
861  }
862 
863  template<typename Scalar>
864  void Adapt<Scalar>::set_error_form(int i, int j, typename Adapt<Scalar>::MatrixFormVolError* form)
865  {
866  if(form->i < 0 || form->i >= this->num)
867  throw Exceptions::ValueException("component number", form->i, 0, this->num);
868  if(form->j < 0 || form->j >= this->num)
869  throw Exceptions::ValueException("component number", form->j, 0, this->num);
870 
871  // FIXME: Memory leak - always for i == j (see the constructor), may happen for i != j
872  // if user does not delete previously set error forms by himself.
873  if(own_forms[i][j] && error_form[i][j] != NULL)
874  delete error_form[i][j];
875  error_form[i][j] = form;
876  norm_form[i][j] = error_form[i][j];
877  own_forms[i][j] = false;
878  }
879 
880  template<typename Scalar>
881  void Adapt<Scalar>::set_error_form(typename Adapt<Scalar>::MatrixFormVolError* form)
882  {
883  set_error_form(0, 0, form);
884  }
885 
886  template<typename Scalar>
887  void Adapt<Scalar>::set_norm_form(int i, int j, typename Adapt<Scalar>::MatrixFormVolError* form)
888  {
889  if(form->i < 0 || form->i >= this->num)
890  throw Exceptions::ValueException("component number", form->i, 0, this->num);
891  if(form->j < 0 || form->j >= this->num)
892  throw Exceptions::ValueException("component number", form->j, 0, this->num);
893 
894  norm_form[i][j] = form;
895  }
896 
897  template<typename Scalar>
898  void Adapt<Scalar>::set_norm_form(typename Adapt<Scalar>::MatrixFormVolError* form)
899  {
900  set_norm_form(0, 0, form);
901  }
902 
903  template<typename Scalar>
906  MeshFunction<Scalar>*rsln2)
907  {
908  RefMap *rv1 = sln1->get_refmap();
909  RefMap *rv2 = sln2->get_refmap();
910  RefMap *rrv1 = rsln1->get_refmap();
911  RefMap *rrv2 = rsln2->get_refmap();
912 
913  // determine the integration order
914  int inc = (rsln1->get_num_components() == 2) ? 1 : 0;
915  Func<Hermes::Ord>* ou = init_fn_ord(rsln1->get_fn_order() + inc);
916  Func<Hermes::Ord>* ov = init_fn_ord(rsln2->get_fn_order() + inc);
917 
918  double fake_wt = 1.0;
919  Geom<Hermes::Ord>* fake_e = init_geom_ord();
920  Hermes::Ord o = form->ord(1, &fake_wt, NULL, ou, ov, fake_e, NULL);
921  int order = rrv1->get_inv_ref_order();
922  order += o.get_order();
923  if(static_cast<Solution<Scalar>*>(rsln1) || static_cast<Solution<Scalar>*>(rsln2))
924  {
925  if(static_cast<Solution<Scalar>*>(rsln1)->get_type() == HERMES_EXACT)
926  { limit_order_nowarn(order, rv1->get_active_element()->get_mode()); }
927  else
928  limit_order(order, rv1->get_active_element()->get_mode());
929  }
930  else
931  limit_order(order, rv1->get_active_element()->get_mode());
932 
933  ou->free_ord(); delete ou;
934  ov->free_ord(); delete ov;
935  delete fake_e;
936 
937  // eval the form
938  Quad2D* quad = sln1->get_quad_2d();
939  double3* pt = quad->get_points(order, sln1->get_active_element()->get_mode());
940  int np = quad->get_num_points(order, sln1->get_active_element()->get_mode());
941 
942  // init geometry and jacobian*weights
943  Geom<double>* e = init_geom_vol(rrv1, order);
944  double* jac = rrv1->get_jacobian(order);
945  double* jwt = new double[np];
946  for(int i = 0; i < np; i++)
947  jwt[i] = pt[i][2] * jac[i];
948 
949  // function values and values of external functions
950  Func<Scalar>* err1 = init_fn(sln1, order);
951  Func<Scalar>* err2 = init_fn(sln2, order);
952  Func<Scalar>* v1 = init_fn(rsln1, order);
953  Func<Scalar>* v2 = init_fn(rsln2, order);
954 
955  err1->subtract(v1);
956  err2->subtract(v2);
957 
958  Scalar res = form->value(np, jwt, NULL, err1, err2, e, NULL);
959 
960  e->free(); delete e;
961  delete [] jwt;
962  err1->free_fn(); delete err1;
963  err2->free_fn(); delete err2;
964  v1->free_fn(); delete v1;
965  v2->free_fn(); delete v2;
966 
967  return std::abs(res);
968  }
969 
970  template<typename Scalar>
973  {
974  RefMap *rrv1 = rsln1->get_refmap();
975  RefMap *rrv2 = rsln2->get_refmap();
976 
977  // determine the integration order
978  int inc = (rsln1->get_num_components() == 2) ? 1 : 0;
979  Func<Hermes::Ord>* ou = init_fn_ord(rsln1->get_fn_order() + inc);
980  Func<Hermes::Ord>* ov = init_fn_ord(rsln2->get_fn_order() + inc);
981 
982  double fake_wt = 1.0;
983  Geom<Hermes::Ord>* fake_e = init_geom_ord();
984  Hermes::Ord o = form->ord(1, &fake_wt, NULL, ou, ov, fake_e, NULL);
985  int order = rrv1->get_inv_ref_order();
986  order += o.get_order();
987  if(static_cast<Solution<Scalar>*>(rsln1) || static_cast<Solution<Scalar>*>(rsln2))
988  {
989  if(static_cast<Solution<Scalar>*>(rsln1)->get_type() == HERMES_EXACT)
990  { limit_order_nowarn(order, rrv1->get_active_element()->get_mode()); }
991  else
992  limit_order(order, rrv1->get_active_element()->get_mode());
993  }
994  else
995  limit_order(order, rrv1->get_active_element()->get_mode());
996 
997  ou->free_ord(); delete ou;
998  ov->free_ord(); delete ov;
999  delete fake_e;
1000 
1001  // eval the form
1002  Quad2D* quad = rsln1->get_quad_2d();
1003  double3* pt = quad->get_points(order, rrv1->get_active_element()->get_mode());
1004  int np = quad->get_num_points(order, rrv1->get_active_element()->get_mode());
1005 
1006  // init geometry and jacobian*weights
1007  Geom<double>* e = init_geom_vol(rrv1, order);
1008  double* jac = rrv1->get_jacobian(order);
1009  double* jwt = new double[np];
1010  for(int i = 0; i < np; i++)
1011  jwt[i] = pt[i][2] * jac[i];
1012 
1013  // function values
1014  Func<Scalar>* v1 = init_fn(rsln1, order);
1015  Func<Scalar>* v2 = init_fn(rsln2, order);
1016 
1017  Scalar res = form->value(np, jwt, NULL, v1, v2, e, NULL);
1018 
1019  e->free(); delete e;
1020  delete [] jwt;
1021  v1->free_fn(); delete v1;
1022  v2->free_fn(); delete v2;
1023 
1024  return std::abs(res);
1025  }
1026 
1027  template<typename Scalar>
1028  double Adapt<Scalar>::calc_err_internal(Hermes::vector<Solution<Scalar>*> slns, Hermes::vector<Solution<Scalar>*> rslns,
1029  Hermes::vector<double>* component_errors, bool solutions_for_adapt, unsigned int error_flags)
1030  {
1031  int i, j;
1032 
1033  bool compatible_meshes = true;
1034  for (int space_i = 0; space_i < this->num; space_i++)
1035  {
1036  Element* e;
1037  for_all_active_elements(e, slns[space_i]->get_mesh())
1038  {
1039  Element* e_ref = rslns[space_i]->get_mesh()->get_element(e->id);
1040  if(e_ref == NULL)
1041  {
1042  compatible_meshes = false;
1043  break;
1044  }
1045  if(!e_ref->active)
1046  {
1047  if(e_ref->sons[0] == NULL || e_ref->sons[2] == NULL)
1048  {
1049  compatible_meshes = false;
1050  break;
1051  }
1052  if(!e_ref->sons[0]->active || !e_ref->sons[2]->active)
1053  {
1054  compatible_meshes = false;
1055  break;
1056  }
1057  }
1058  }
1059  }
1060 
1061  if(!compatible_meshes)
1062  throw Exceptions::Exception("Reference space not created by an isotropic (p-, h-, or hp-) refinement from the coarse space.");
1063 
1064  if(slns.size() != this->num)
1065  throw Exceptions::LengthException(0, slns.size(), this->num);
1066 
1067  Solution<Scalar>* rslns_original[H2D_MAX_COMPONENTS];
1068  Solution<Scalar>* slns_original[H2D_MAX_COMPONENTS];
1069 
1070  for (i = 0; i < this->num; i++)
1071  {
1072  slns_original[i] = this->sln[i];
1073  this->sln[i] = slns[i];
1074  sln[i]->set_quad_2d(&g_quad_2d_std);
1075  }
1076  for (i = 0; i < this->num; i++)
1077  {
1078  rslns_original[i] = this->rsln[i];
1079  this->rsln[i] = rslns[i];
1080  rsln[i]->set_quad_2d(&g_quad_2d_std);
1081  }
1082 
1083  have_coarse_solutions = true;
1084  have_reference_solutions = true;
1085 
1086  // Prepare multi-mesh traversal and error arrays.
1087  const Mesh **meshes = new const Mesh *[2 * num];
1088  Transformable **tr = new Transformable *[2 * num];
1089  Traverse trav(true);
1090  num_act_elems = 0;
1091  for (i = 0; i < num; i++)
1092  {
1093  meshes[i] = sln[i]->get_mesh();
1094  meshes[i + num] = rsln[i]->get_mesh();
1095  tr[i] = sln[i];
1096  tr[i + num] = rsln[i];
1097 
1098  num_act_elems += sln[i]->get_mesh()->get_num_active_elements();
1099 
1100  int max = meshes[i]->get_max_element_id();
1101  if(solutions_for_adapt)
1102  {
1103  if(errors[i] != NULL) delete [] errors[i];
1104  errors[i] = new double[max];
1105  memset(errors[i], 0, sizeof(double) * max);
1106  }
1107  }
1108 
1109  double total_norm = 0.0;
1110  double *norms = new double[num];
1111  memset(norms, 0, num * sizeof(double));
1112  double *errors_components = new double[num];
1113  memset(errors_components, 0, num * sizeof(double));
1114  if(solutions_for_adapt) this->errors_squared_sum = 0.0;
1115  double total_error = 0.0;
1116 
1117  // Calculate error.
1118  Traverse::State * ee;
1119  trav.begin(2 * num, meshes, tr);
1120  while ((ee = trav.get_next_state()) != NULL)
1121  {
1122  for (i = 0; i < num; i++)
1123  {
1124  for (j = 0; j < num; j++)
1125  {
1126  if(error_form[i][j] != NULL)
1127  {
1128  double err, nrm;
1129  err = eval_error(error_form[i][j], sln[i], sln[j], rsln[i], rsln[j]);
1130  nrm = eval_error_norm(norm_form[i][j], rsln[i], rsln[j]);
1131 
1132  norms[i] += nrm;
1133  total_norm += nrm;
1134  total_error += err;
1135  errors_components[i] += err;
1136  if(solutions_for_adapt)
1137  this->errors[i][ee->e[i]->id] += err;
1138  }
1139  }
1140  }
1141  }
1142  trav.finish();
1143 
1144  // Store the calculation for each solution component separately.
1145  if(component_errors != NULL)
1146  {
1147  component_errors->clear();
1148  for (int i = 0; i < num; i++)
1149  {
1150  if((error_flags & HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_ABS)
1151  component_errors->push_back(sqrt(errors_components[i]));
1152  else if((error_flags & HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_REL)
1153  component_errors->push_back(sqrt(errors_components[i]/norms[i]));
1154  else
1155  {
1156  throw Hermes::Exceptions::Exception("Unknown total error type (0x%x).", error_flags & HERMES_TOTAL_ERROR_MASK);
1157  return -1.0;
1158  }
1159  }
1160  }
1161 
1162  // Make the error relative if needed.
1163  if(solutions_for_adapt)
1164  {
1166  {
1167  for (int i = 0; i < this->num; i++)
1168  {
1169  Element* e;
1170  for_all_active_elements(e, meshes[i])
1171  {
1172  errors[i][e->id] /= norms[i];
1173  }
1174  }
1175  }
1176 
1177  this->errors_squared_sum = total_error;
1178 
1179  // Element error mask is used here, because this variable is used in the adapt()
1180  // function, where the processed error (sum of errors of processed element errors)
1181  // is matched to this variable.
1182  if((error_flags & HERMES_ELEMENT_ERROR_MASK) == HERMES_ELEMENT_ERROR_REL)
1183  errors_squared_sum = errors_squared_sum / total_norm;
1184  }
1185 
1186  // Prepare an ordered list of elements according to an error.
1187  if(solutions_for_adapt)
1188  {
1189  fill_regular_queue(meshes);
1190  have_errors = true;
1191  }
1192  else
1193  {
1194  for (i = 0; i < this->num; i++)
1195  {
1196  this->sln[i] = slns_original[i];
1197  this->rsln[i] = rslns_original[i];
1198  }
1199  }
1200 
1201  delete [] meshes;
1202  delete [] tr;
1203  delete [] norms;
1204  delete [] errors_components;
1205 
1206  // Return error value.
1207  if((error_flags & HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_ABS)
1208  return sqrt(total_error);
1209  else if((error_flags & HERMES_TOTAL_ERROR_MASK) == HERMES_TOTAL_ERROR_REL)
1210  return sqrt(total_error / total_norm);
1211  else
1212  {
1213  throw Hermes::Exceptions::Exception("Unknown total error type (0x%x).", error_flags & HERMES_TOTAL_ERROR_MASK);
1214  return -1.0;
1215  }
1216  }
1217 
1218  template<typename Scalar>
1220  Hermes::vector<double>* component_errors, bool solutions_for_adapt,
1221  unsigned int error_flags)
1222  {
1223  Hermes::vector<Solution<Scalar>*> slns;
1224  slns.push_back(sln);
1225  Hermes::vector<Solution<Scalar>*> rslns;
1226  rslns.push_back(rsln);
1227  return calc_err_internal(slns, rslns, component_errors, solutions_for_adapt, error_flags);
1228  }
1229 
1230  template<typename Scalar>
1232  {
1233  }
1234 
1235  template<typename Scalar>
1236  bool Adapt<Scalar>::CompareElements::operator()(const ElementReference& e1, const ElementReference& e2) const
1237  {
1238  return errors[e1.comp][e1.id] > errors[e2.comp][e2.id];
1239  }
1240 
1241  template<typename Scalar>
1243  {
1244  //prepare space for queue (it is assumed that it will only grow since we can just split)
1245  regular_queue.clear();
1246  if(num_act_elems < (int)regular_queue.capacity())
1247  {
1248  Hermes::vector<ElementReference> empty_refs;
1249  regular_queue.swap(empty_refs); //deallocate
1250  regular_queue.reserve(num_act_elems); //allocate
1251  }
1252 
1253  //prepare initial fill
1254  Element* e;
1255  typename Hermes::vector<ElementReference>::iterator elem_info = regular_queue.begin();
1256  for (int i = 0; i < this->num; i++)
1257  {
1258  for_all_active_elements(e, meshes[i])
1259  {
1260  regular_queue.push_back(ElementReference(e->id, i));
1261  }
1262  }
1263  //sort
1264  std::sort(regular_queue.begin(), regular_queue.end(), CompareElements(errors));
1265  }
1266 
1267  template HERMES_API class Adapt<double>;
1268  template HERMES_API class Adapt<std::complex<double> >;
1269  }
1270 }