Hermes2D  2.0
proj_based_selector.cpp
1 #include "proj_based_selector.h"
2 #include <algorithm>
3 #include "global.h"
4 #include "solution.h"
5 #include "discrete_problem.h"
6 #include "quad_all.h"
7 #include "element_to_refine.h"
8 #include "order_permutator.h"
9 
10 namespace Hermes
11 {
12  namespace Hermes2D
13  {
14  namespace RefinementSelectors
15  {
16  template<typename Scalar>
18  max_order, Shapeset* shapeset, const typename OptimumSelector<Scalar>::Range& vertex_order, const
19  typename OptimumSelector<Scalar>::Range& edge_bubble_order) :
20  OptimumSelector<Scalar>(cand_list, conv_exp, max_order, shapeset, vertex_order, edge_bubble_order),
21  warn_uniform_orders(false),
22  error_weight_h(H2DRS_DEFAULT_ERR_WEIGHT_H),
23  error_weight_p(H2DRS_DEFAULT_ERR_WEIGHT_P),
24  error_weight_aniso(H2DRS_DEFAULT_ERR_WEIGHT_ANISO)
25  {
26  cached_shape_vals_valid = new bool[2];
28  cached_shape_vals = new TrfShape[2];
29 
30  //clean svals initialization state
32 
33  //clear matrix cache
34  for(int m = 0; m < H2D_NUM_MODES; m++)
35  for(int i = 0; i < H2DRS_MAX_ORDER + 1; i++)
36  for(int k = 0; k < H2DRS_MAX_ORDER + 1; k++)
37  proj_matrix_cache[m][i][k] = NULL;
38 
39  //allocate caches
40  int max_inx = this->max_shape_inx[0];
41  for(int i = 1; i < H2D_NUM_MODES; i++)
42  max_inx = std::max(max_inx, this->max_shape_inx[i]);
43  nonortho_rhs_cache.resize(max_inx + 1);
44  ortho_rhs_cache.resize(max_inx + 1);
45  }
46 
47  template<typename Scalar>
49  {
50  //delete matrix cache
51  for(int m = 0; m < H2D_NUM_MODES; m++)
52  {
53  for(int i = 0; i < H2DRS_MAX_ORDER + 1; i++)
54  for(int k = 0; k < H2DRS_MAX_ORDER + 1; k++)
55  {
56  if(proj_matrix_cache[m][i][k] != NULL)
57  delete [] proj_matrix_cache[m][i][k];
58  }
59  }
60 
61  if(!this->isAClone)
62  {
63  delete [] cached_shape_vals_valid;
64  delete [] cached_shape_ortho_vals;
65  delete [] cached_shape_vals;
66  }
67  }
68 
69  template<typename Scalar>
70  void ProjBasedSelector<Scalar>::set_error_weights(double weight_h, double weight_p, double weight_aniso)
71  {
72  error_weight_h = weight_h;
73  error_weight_p = weight_p;
74  error_weight_aniso = weight_aniso;
75  }
76 
77  template<typename Scalar>
79  {
80  return error_weight_h;
81  }
82 
83  template<typename Scalar>
84  double ProjBasedSelector<Scalar>::get_error_weight_p() const
85  {
86  return error_weight_p;
87  }
88 
89  template<typename Scalar>
90  double ProjBasedSelector<Scalar>::get_error_weight_aniso() const
91  {
92  return error_weight_aniso;
93  }
94 
95  template<typename Scalar>
96  ProjBasedSelector<Scalar>::TrfShapeExp::TrfShapeExp() : num_gip(0), num_expansion(0), values(NULL) {};
97 
98  template<typename Scalar>
100  {
101  delete [] values;
102  }
103 
104  template<typename Scalar>
105  void ProjBasedSelector<Scalar>::TrfShapeExp::allocate(int num_expansion, int num_gip)
106  {
107  delete [] values;
108  values = new_matrix<double>(num_expansion, num_gip);
109  this->num_expansion = num_expansion;
110  this->num_gip = num_gip;
111  }
112 
113  template<typename Scalar>
114  double* ProjBasedSelector<Scalar>::TrfShapeExp::operator[](int inx_expansion)
115  {
116  return values[inx_expansion];
117  }
118 
119  template<typename Scalar>
120  bool ProjBasedSelector<Scalar>::TrfShapeExp::empty() const
121  {
122  return values == NULL;
123  }
124 
125  template<typename Scalar>
126  void ProjBasedSelector<Scalar>::evaluate_cands_error(Element* e, Solution<Scalar>* rsln, double* avg_error, double* dev_error)
127  {
128  bool tri = e->is_triangle();
129 
130  // find range of orders
131  typename OptimumSelector<Scalar>::CandsInfo info_h, info_p, info_aniso;
132  this->update_cands_info(info_h, info_p, info_aniso);
133 
134  // calculate squared projection errors of elements of candidates
135  CandElemProjError herr[4], anisoerr[4], perr;
136  calc_projection_errors(e, info_h, info_p, info_aniso, rsln, herr, perr, anisoerr);
137 
138  //evaluate errors and dofs
139  double sum_err = 0.0;
140  double sum_sqr_err = 0.0;
141  int num_processed = 0;
142  typename OptimumSelector<Scalar>::Cand& unrefined_c = this->candidates[0];
143  for (unsigned i = 0; i < this->candidates.size(); i++)
144  {
145  typename OptimumSelector<Scalar>::Cand& c = this->candidates[i];
146  double error_squared = 0.0;
147  if(tri) { //triangle
148  switch(c.split)
149  {
150  case H2D_REFINEMENT_H:
151  error_squared = 0.0;
152  for (int j = 0; j < H2D_MAX_ELEMENT_SONS; j++)
153  {
154  int order = H2D_GET_H_ORDER(c.p[j]);
155  error_squared += herr[j][order][order];
156  }
157  error_squared *= 0.25; //element of a candidate occupies 1/4 of the reference domain defined over a candidate
158  break;
159 
160  case H2D_REFINEMENT_P:
161  {
162  int order = H2D_GET_H_ORDER(c.p[0]);
163  error_squared = perr[order][order];
164  }
165  break;
166 
167  default:
168  throw Hermes::Exceptions::Exception("Unknown split type \"%d\" at candidate %d", c.split, i);
169  }
170  }
171  else { //quad
172  switch(c.split)
173  {
174  case H2D_REFINEMENT_H:
175  error_squared = 0.0;
176  for (int j = 0; j < H2D_MAX_ELEMENT_SONS; j++)
177  {
178  int order_h = H2D_GET_H_ORDER(c.p[j]), order_v = H2D_GET_V_ORDER(c.p[j]);
179  error_squared += herr[j][order_h][order_v];
180  }
181  error_squared *= 0.25; //element of a candidate occupies 1/4 of the reference domain defined over a candidate
182  break;
183 
184  case H2D_REFINEMENT_ANISO_H:
185  case H2D_REFINEMENT_ANISO_V:
186  {
187  error_squared = 0.0;
188  for (int j = 0; j < 2; j++)
189  error_squared += anisoerr[(c.split == H2D_REFINEMENT_ANISO_H) ? j : j + 2][H2D_GET_H_ORDER(c.p[j])][H2D_GET_V_ORDER(c.p[j])];
190  error_squared *= 0.5; //element of a candidate occupies 1/2 of the reference domain defined over a candidate
191  }
192  break;
193 
194  case H2D_REFINEMENT_P:
195  {
196  int order_h = H2D_GET_H_ORDER(c.p[0]), order_v = H2D_GET_V_ORDER(c.p[0]);
197  error_squared = perr[order_h][order_v];
198  }
199  break;
200 
201  default:
202  throw Hermes::Exceptions::Exception("Unknown split type \"%d\" at candidate %d", c.split, i);
203  }
204  }
205 
206  //calculate error from squared error
207  c.error = sqrt(error_squared);
208 
209  //apply weights
210  switch(c.split)
211  {
212  case H2D_REFINEMENT_H: c.error *= error_weight_h; break;
213  case H2D_REFINEMENT_ANISO_H:
214  case H2D_REFINEMENT_ANISO_V: c.error *= error_weight_aniso; break;
215  case H2D_REFINEMENT_P: c.error *= error_weight_p; break;
216  default: throw Hermes::Exceptions::Exception("Unknown split type \"%d\" at candidate %d", c.split, i);
217  }
218 
219  //calculate statistics
220  if(i == 0 || c.error <= unrefined_c.error)
221  {
222  sum_err += log10(c.error);
223  sum_sqr_err += Hermes::sqr(log10(c.error));
224  num_processed++;
225  }
226  }
227 
228  *avg_error = sum_err / num_processed; // mean
229  *dev_error = sqrt(sum_sqr_err/num_processed - Hermes::sqr(*avg_error)); // deviation is square root of variance
230  }
231 
232  template<typename Scalar>
234  {
235  ElementMode2D mode = e->get_mode();
236 
237  // select quadrature, obtain integration points and weights
238  Quad2D* quad = &g_quad_2d_std;
239  rsln->set_quad_2d(quad);
240  double3* gip_points = quad->get_points(H2DRS_INTR_GIP_ORDER, mode);
241  int num_gip_points = quad->get_num_points(H2DRS_INTR_GIP_ORDER, mode);
242 
243  // everything is done on the reference domain
244  rsln->enable_transform(false);
245 
246  // obtain reference solution values on all four refined sons
247  Scalar** rval[H2D_MAX_ELEMENT_SONS];
248  Element* base_element = rsln->get_mesh()->get_element(e->id);
249  if(base_element->active)
250  {
251  this->info("Have you calculated element errors twice with solutions_for_adaptivity == true?");
252  };
253 
254  // value on base element.
255  if(base_element->active)
256  {
257  for (int son = 0; son < H2D_MAX_ELEMENT_SONS; son++)
258  {
259  //set element
260  rsln->set_active_element(base_element);
262  rsln->push_transform(son);
263 
264  //obtain precalculated values
265  rval[son] = precalc_ref_solution(son, rsln, e, H2DRS_INTR_GIP_ORDER);
266  }
267  }
268  else
269  {
270  for (int son = 0; son < H2D_MAX_ELEMENT_SONS; son++)
271  {
272  //set element
273  Element* e = base_element->sons[son];
274  assert(e != NULL);
275  rsln->set_active_element(e);
277 
278  //obtain precalculated values
279  rval[son] = precalc_ref_solution(son, rsln, e, H2DRS_INTR_GIP_ORDER);
280  }
281  }
282 
283  //retrieve transformations
284  Trf* trfs = NULL;
285  int num_noni_trfs = 0;
286  if(mode == HERMES_MODE_TRIANGLE)
287  {
288  trfs = tri_trf;
289  num_noni_trfs = H2D_TRF_TRI_NUM;
290  }
291  else
292  {
293  trfs = quad_trf;
294  num_noni_trfs = H2D_TRF_QUAD_NUM;
295  }
296 
297  // precalculate values of shape functions
298  TrfShape empty_shape_vals;
299 
300  if(!cached_shape_vals_valid[mode])
301  {
302 #pragma omp critical (cached_shape_vals_valid)
303  if(!cached_shape_vals_valid[mode])
304  {
305  precalc_ortho_shapes(gip_points, num_gip_points, trfs, num_noni_trfs, this->shape_indices[mode], this->max_shape_inx[mode], cached_shape_ortho_vals[mode], mode);
306  precalc_shapes(gip_points, num_gip_points, trfs, num_noni_trfs, this->shape_indices[mode], this->max_shape_inx[mode], cached_shape_vals[mode], mode);
307  cached_shape_vals_valid[mode] = true;
308  }
309  }
310 
311  //issue a warning if ortho values are defined and the selected cand_list might benefit from that but it cannot because elements do not have uniform orders
312  if(!warn_uniform_orders && mode == HERMES_MODE_QUAD && !cached_shape_ortho_vals[mode][H2D_TRF_IDENTITY].empty())
313  {
314  warn_uniform_orders = true;
315  if(this->cand_list == H2D_H_ISO || this->cand_list == H2D_H_ANISO || this->cand_list == H2D_P_ISO || this->cand_list == H2D_HP_ISO || this->cand_list == H2D_HP_ANISO_H)
316  {
317  this->warn_if(!info_h.uniform_orders || !info_aniso.uniform_orders || !info_p.uniform_orders, "Possible inefficiency: %s might be more efficient if the input mesh contains elements with uniform orders strictly.", get_cand_list_str(this->cand_list));
318  }
319  }
320 
321  TrfShape& svals = cached_shape_vals[mode];
322  TrfShape& ortho_svals = cached_shape_ortho_vals[mode];
323 
324  //H-candidates
325  if(!info_h.is_empty())
326  {
327  if(base_element->active)
328  {
329  Trf* sub_trfs[4] = { &trfs[0], &trfs[1], &trfs[2], &trfs[3] };
330  Hermes::vector<TrfShapeExp>* p_trf_svals[4] = { &svals[0], &svals[1], &svals[2], &svals[3] };
331  Hermes::vector<TrfShapeExp>* p_trf_ortho_svals[4] = { &ortho_svals[0], &ortho_svals[1], &ortho_svals[2], &ortho_svals[3] };
332  Scalar **sub_rval[4] = { rval[0], rval[1], rval[2], rval[3] };;
333  for(int son = 0; son < H2D_MAX_ELEMENT_SONS; son++)
334  {
335  Scalar **sub_rval[1] = { rval[son] };
336  calc_error_cand_element(mode, gip_points, num_gip_points
337  , 1, &base_element, &sub_trfs[son], sub_rval
338  , &p_trf_svals[son], &p_trf_ortho_svals[son]
339  , info_h, herr[son]);
340  }
341  }
342  else
343  {
344  Trf* p_trf_identity[1] = { &trfs[H2D_TRF_IDENTITY] };
345  Hermes::vector<TrfShapeExp>* p_trf_svals[1] = { &svals[H2D_TRF_IDENTITY] };
346  Hermes::vector<TrfShapeExp>* p_trf_ortho_svals[1] = { &ortho_svals[H2D_TRF_IDENTITY] };
347  for(int son = 0; son < H2D_MAX_ELEMENT_SONS; son++)
348  {
349  Scalar **sub_rval[1] = { rval[son] };
350  calc_error_cand_element(mode, gip_points, num_gip_points
351  , 1, &base_element->sons[son], p_trf_identity, sub_rval
352  , p_trf_svals, p_trf_ortho_svals
353  , info_h, herr[son]);
354  }
355  }
356  }
357 
358  //ANISO-candidates
359  if(!info_aniso.is_empty())
360  {
361  if(base_element->active)
362  {
363  const int tr[4][2] = { {0, 1}, {3, 2}, {0, 3}, {1, 2} };
364  for(int version = 0; version < 4; version++)
365  {
366  Trf* sub_trfs[2] = { &trfs[tr[version][0]], &trfs[tr[version][1]] };
367  Element* sub_domains[2] = { base_element, base_element };
368  Scalar **sub_rval[2] = { rval[tr[version][0]], rval[tr[version][1]] };
369  Hermes::vector<TrfShapeExp>* sub_svals[2] = { &svals[tr[version][0]], &svals[tr[version][1]] };
370  Hermes::vector<TrfShapeExp>* sub_ortho_svals[2] = { &ortho_svals[tr[version][0]], &ortho_svals[tr[version][1]] };
371  calc_error_cand_element(mode, gip_points, num_gip_points
372  , 2, sub_domains, sub_trfs, sub_rval
373  , sub_svals, sub_ortho_svals
374  , info_aniso, anisoerr[version]);
375  }
376  }
377  else
378  {
379  const int sons[4][2] = { {0, 1}, {3, 2}, {0, 3}, {1, 2} }; //indices of sons for sub-areas
380  const int tr[4][2] = { {6, 7}, {6, 7}, {4, 5}, {4, 5} }; //indices of ref. domain transformations for sub-areas
381  for(int version = 0; version < 4; version++)
382  { // 2 elements for vertical split, 2 elements for horizontal split
383  Trf* sub_trfs[2] = { &trfs[tr[version][0]], &trfs[tr[version][1]] };
384  Element* sub_domains[2] = { base_element->sons[sons[version][0]], base_element->sons[sons[version][1]] };
385  Scalar **sub_rval[2] = { rval[sons[version][0]], rval[sons[version][1]] };
386  Hermes::vector<TrfShapeExp>* sub_svals[2] = { &svals[tr[version][0]], &svals[tr[version][1]] };
387  Hermes::vector<TrfShapeExp>* sub_ortho_svals[2] = { &ortho_svals[tr[version][0]], &ortho_svals[tr[version][1]] };
388  calc_error_cand_element(mode, gip_points, num_gip_points
389  , 2, sub_domains, sub_trfs, sub_rval
390  , sub_svals, sub_ortho_svals
391  , info_aniso, anisoerr[version]);
392  }
393  }
394  }
395 
396  //P-candidates
397  if(!info_p.is_empty())
398  {
399  if(base_element->active)
400  {
401  Trf* sub_trfs[4] = { &trfs[0], &trfs[1], &trfs[2], &trfs[3] };
402  Scalar **sub_rval[4] = { rval[0], rval[1], rval[2], rval[3] };
403  Hermes::vector<TrfShapeExp>* sub_svals[4] = { &svals[0], &svals[1], &svals[2], &svals[3] };
404  Hermes::vector<TrfShapeExp>* sub_ortho_svals[4] = { &ortho_svals[0], &ortho_svals[1], &ortho_svals[2], &ortho_svals[3] };
405  Element* sub_domains[4] = { base_element, base_element, base_element, base_element };
406 
407  calc_error_cand_element(mode, gip_points, num_gip_points
408  , 4, sub_domains, sub_trfs, sub_rval
409  , sub_svals, sub_ortho_svals
410  , info_p, perr);
411  }
412  else
413  {
414  Trf* sub_trfs[4] = { &trfs[0], &trfs[1], &trfs[2], &trfs[3] };
415  Scalar **sub_rval[4] = { rval[0], rval[1], rval[2], rval[3] };
416  Hermes::vector<TrfShapeExp>* sub_svals[4] = { &svals[0], &svals[1], &svals[2], &svals[3] };
417  Hermes::vector<TrfShapeExp>* sub_ortho_svals[4] = { &ortho_svals[0], &ortho_svals[1], &ortho_svals[2], &ortho_svals[3] };
418 
419  calc_error_cand_element(mode, gip_points, num_gip_points
420  , 4, base_element->sons, sub_trfs, sub_rval
421  , sub_svals, sub_ortho_svals
422  , info_p, perr);
423  }
424  }
425  }
426 
427  template<typename Scalar>
429  , double3* gip_points, int num_gip_points
430  , const int num_sub, Element** sub_domains, Trf** sub_trfs, Scalar*** sub_rvals
431  , Hermes::vector<TrfShapeExp>** sub_nonortho_svals, Hermes::vector<TrfShapeExp>** sub_ortho_svals
432  , const typename OptimumSelector<Scalar>::CandsInfo& info
433  , CandElemProjError errors_squared
434  )
435  {
436  //allocate space
437  int max_num_shapes = this->next_order_shape[mode][this->current_max_order];
438  Scalar* right_side = new Scalar[max_num_shapes];
439  int* shape_inxs = new int[max_num_shapes];
440  int* indx = new int[max_num_shapes]; //solver data
441  double* d = new double[max_num_shapes]; //solver data
442  double** proj_matrix = new_matrix<double>(max_num_shapes, max_num_shapes);
443  ProjMatrixCache& proj_matrices = proj_matrix_cache[mode];
444  Hermes::vector<typename OptimumSelector<Scalar>::ShapeInx>& full_shape_indices = this->shape_indices[mode];
445 
446  //check whether ortho-svals are available
447  bool ortho_svals_available = true;
448  for(int i = 0; i < num_sub && ortho_svals_available; i++)
449  ortho_svals_available &= !sub_ortho_svals[i]->empty();
450 
451  //clenup of the cache
452  for(int i = 0; i <= this->max_shape_inx[mode]; i++)
453  {
455  ortho_rhs_cache[i] = ValueCacheItem<Scalar>();
456  }
457 
458  //calculate for all orders
459  double sub_area_corr_coef = 1.0 / num_sub;
460  OrderPermutator order_perm(info.min_quad_order, info.max_quad_order, mode == HERMES_MODE_TRIANGLE || info.uniform_orders);
461  do
462  {
463  int quad_order = order_perm.get_quad_order();
464  int order_h = H2D_GET_H_ORDER(quad_order), order_v = H2D_GET_V_ORDER(quad_order);
465 
466  //build a list of shape indices from the full list
467  int num_shapes = 0;
468  unsigned int inx_shape = 0;
469  while (inx_shape < full_shape_indices.size())
470  {
471  typename OptimumSelector<Scalar>::ShapeInx& shape = full_shape_indices[inx_shape];
472  if(order_h >= shape.order_h && order_v >= shape.order_v)
473  {
474  if(num_shapes >= max_num_shapes)
475  throw Exceptions::Exception("more shapes than predicted, possible incosistency");
476  shape_inxs[num_shapes] = shape.inx;
477  num_shapes++;
478  }
479  inx_shape++;
480  }
481 
482  //continue only if there are shapes to process
483  if(num_shapes == 0)
484  continue;
485 
486  bool use_ortho = ortho_svals_available && order_perm.get_order_h() == order_perm.get_order_v();
487 
488  //select a cache
489  Hermes::vector< ValueCacheItem<Scalar> >& rhs_cache = use_ortho ? ortho_rhs_cache : nonortho_rhs_cache;
490  Hermes::vector<TrfShapeExp>** sub_svals = use_ortho ? sub_ortho_svals : sub_nonortho_svals;
491 
492  //calculate projection matrix iff no ortho is used
493  if(!use_ortho)
494  {
495  if(proj_matrices[order_h][order_v] == NULL)
496  proj_matrices[order_h][order_v] = build_projection_matrix(gip_points, num_gip_points, shape_inxs, num_shapes, mode);
497  copy_matrix(proj_matrix, proj_matrices[order_h][order_v], num_shapes, num_shapes); //copy projection matrix because original matrix will be modified
498  }
499 
500  //build right side (fill cache values that are missing)
501  for(int inx_sub = 0; inx_sub < num_sub; inx_sub++)
502  {
503  Element* this_sub_domain = sub_domains[inx_sub];
504  ElemSubTrf this_sub_trf = { sub_trfs[inx_sub], 1 / sub_trfs[inx_sub]->m[0], 1 / sub_trfs[inx_sub]->m[1] };
505  ElemGIP this_sub_gip = { gip_points, num_gip_points, sub_rvals[inx_sub] };
506  Hermes::vector<TrfShapeExp>& this_sub_svals = *(sub_svals[inx_sub]);
507 
508  for(int k = 0; k < num_shapes; k++)
509  {
510  int shape_inx = shape_inxs[k];
511  ValueCacheItem<Scalar>& shape_rhs_cache = rhs_cache[shape_inx];
512  if(!shape_rhs_cache.is_valid())
513  {
514  TrfShapeExp empty_sub_vals;
515  ElemSubShapeFunc this_sub_shape = { shape_inx, this_sub_svals.empty() ? empty_sub_vals : this_sub_svals[shape_inx] };
516  shape_rhs_cache.set(shape_rhs_cache.get() + evaluate_rhs_subdomain(this_sub_domain, this_sub_gip, this_sub_trf, this_sub_shape));
517  }
518  }
519  }
520 
521  //copy values from cache and apply area correction coefficient
522  for(int k = 0; k < num_shapes; k++)
523  {
524  ValueCacheItem<Scalar>& rhs_cache_value = rhs_cache[shape_inxs[k]];
525  right_side[k] = sub_area_corr_coef * rhs_cache_value.get();
526  rhs_cache_value.mark();
527  }
528 
529  //solve iff no ortho is used
530  if(!use_ortho)
531  {
532  ludcmp(proj_matrix, num_shapes, indx, d);
533  lubksb<Scalar>(proj_matrix, num_shapes, indx, right_side);
534  }
535 
536  //calculate error
537  double error_squared = 0;
538  for(int inx_sub = 0; inx_sub < num_sub; inx_sub++)
539  {
540  Element* this_sub_domain = sub_domains[inx_sub];
541  ElemSubTrf this_sub_trf = { sub_trfs[inx_sub], 1 / sub_trfs[inx_sub]->m[0], 1 / sub_trfs[inx_sub]->m[1] };
542  ElemGIP this_sub_gip = { gip_points, num_gip_points, sub_rvals[inx_sub] };
543  ElemProj elem_proj = { shape_inxs, num_shapes, *(sub_svals[inx_sub]), right_side, quad_order };
544 
545  error_squared += evaluate_error_squared_subdomain(this_sub_domain, this_sub_gip, this_sub_trf, elem_proj);
546  }
547  errors_squared[order_h][order_v] = error_squared * sub_area_corr_coef; //apply area correction coefficient
548  }
549  while (order_perm.next());
550 
551  delete [] proj_matrix;
552  delete [] right_side;
553  delete [] shape_inxs;
554  delete [] indx;
555  delete [] d;
556  }
557 
558  template class HERMES_API ProjBasedSelector<double>;
559  template class HERMES_API ProjBasedSelector<std::complex<double> >;
560  }
561  }
562 }