4 #include "discrete_problem.h"
6 #include "element_to_refine.h"
7 #include "optimum_selector.h"
9 #define H2DST_ANY H2DST_VERTEX | H2DST_HORIZ_EDGE | H2DST_VERT_EDGE | H2DST_TRI_EDGE | H2DST_BUBBLE
15 namespace RefinementSelectors
30 throw Hermes::Exceptions::Exception(
"Invalid adapt type %d.", cand_list);
47 default:
throw Hermes::Exceptions::Exception(
"Invalid adapt type %d.", cand_list);
return false;
63 default:
throw Hermes::Exceptions::Exception(
"Invalid adapt type %d.", cand_list);
return false;
67 template<
typename Scalar>
69 max_order,
Shapeset* shapeset,
const Range& vertex_order,
const
70 Range& edge_bubble_order) :
72 opt_symmetric_mesh(true),
73 opt_apply_exp_dof(false),
79 throw Exceptions::NullException(3);
82 for(
int i = 0; i < 2; i++)
87 num_shapes[i][j] =
new int*[H2DRS_MAX_ORDER + 2];
88 for(
int k = 0; k < H2DRS_MAX_ORDER + 2; k++)
101 template<
typename Scalar>
106 for(
int i = 0; i < 2; i++)
110 for(
int k = 0; k < H2DRS_MAX_ORDER + 2; k++)
112 delete [] num_shapes[i][j][k];
114 delete [] num_shapes[i][j];
116 delete [] num_shapes[i];
118 delete [] num_shapes;
122 template<
typename Scalar>
125 int quad_order = H2D_MAKE_QUAD_ORDER(order_h, order_v);
126 const int num_bubbles = shapeset->get_num_bubbles(quad_order, mode);
127 int* bubble_inxs = shapeset->get_bubble_indices(quad_order, mode);
128 for(
int j = 0; j < num_bubbles; j++)
130 int inx_bubble = bubble_inxs[j];
131 if(used_shape_index.find(inx_bubble) == used_shape_index.end())
133 used_shape_index[inx_bubble] =
true;
134 indices.push_back(
ShapeInx(order_h, order_v, inx_bubble, H2DST_BUBBLE));
135 for(
int order_h_i = order_h+1; order_h_i <
H2DRS_MAX_ORDER + 2; order_h_i++)
136 for(
int order_v_i = order_v+1; order_v_i < H2DRS_MAX_ORDER + 2; order_v_i++)
138 num_shapes[mode][order_h_i][order_v_i][H2DSI_BUBBLE]++;
139 num_shapes[mode][order_h_i][order_v_i][H2DSI_ANY]++;
141 num_shapes[mode][0][0][H2DSI_BUBBLE]++;
142 num_shapes[mode][0][0][H2DSI_ANY]++;
147 template<
typename Scalar>
150 Hermes::vector<ShapeInx> &indices = shape_indices[mode];
151 int* next_order = this->next_order_shape[mode];
152 int& max_shape_inx = this->max_shape_inx[mode];
153 int num_edges = (mode == HERMES_MODE_QUAD) ? 4 : 3;
154 bool &has_vertex = has_vertex_shape[mode];
155 bool &has_edge = has_edge_shape[mode];
156 bool &has_bubble = has_bubble_shape[mode];
161 has_vertex = has_edge = has_bubble =
false;
164 Range order_range = Range::make_envelope(vertex_order, edge_bubble_order);
167 std::map<int, bool> used_shape_index;
171 int examined_shape = 0;
172 for(
int i = order_range.lower(); i <= order_range.upper(); i++)
175 if(vertex_order.is_in_closed(i))
177 for (
int j = 0; j < num_edges; j++)
179 int inx = shapeset->get_vertex_index(j, mode);
182 used_shape_index[inx] =
true;
183 indices.push_back(
ShapeInx(1, 1, inx, H2DST_VERTEX));
184 if(mode == HERMES_MODE_QUAD)
187 for(
int order_v_i = 2; order_v_i < H2DRS_MAX_ORDER + 2; order_v_i++)
189 num_shapes[mode][order_h_i][order_v_i][H2DSI_VERTEX]++;
190 num_shapes[mode][order_h_i][order_v_i][H2DSI_ANY]++;
192 num_shapes[mode][0][0][H2DSI_VERTEX]++;
193 num_shapes[mode][0][0][H2DSI_ANY]++;
199 num_shapes[mode][order_h_i][0][H2DSI_VERTEX]++;
200 num_shapes[mode][order_h_i][0][H2DSI_ANY]++;
202 num_shapes[mode][0][0][H2DSI_VERTEX]++;
203 num_shapes[mode][0][0][H2DSI_ANY]++;
211 if(edge_bubble_order.is_in_closed(i))
214 if(mode == HERMES_MODE_QUAD)
216 for (
int j = 0; j < num_edges; j++)
218 int inx = shapeset->get_edge_index(j, 0, i, HERMES_MODE_QUAD);
221 used_shape_index[inx] =
true;
224 indices.push_back(
ShapeInx(i, 0, inx, H2DST_HORIZ_EDGE));
226 for(
int order_v_i = 1; order_v_i < H2DRS_MAX_ORDER + 2; order_v_i++)
228 num_shapes[mode][order_h_i][order_v_i][H2DST_HORIZ_EDGE]++;
229 num_shapes[mode][order_h_i][order_v_i][H2DSI_ANY]++;
231 num_shapes[mode][0][0][H2DST_HORIZ_EDGE]++;
232 num_shapes[mode][0][0][H2DSI_ANY]++;
236 indices.push_back(
ShapeInx(0, i, inx, H2DST_VERT_EDGE));
238 for(
int order_v_i = i+1; order_v_i < H2DRS_MAX_ORDER + 2; order_v_i++)
240 num_shapes[mode][order_h_i][order_v_i][H2DSI_VERT_EDGE]++;
241 num_shapes[mode][order_h_i][order_v_i][H2DSI_ANY]++;
243 num_shapes[mode][0][0][H2DSI_VERT_EDGE]++;
244 num_shapes[mode][0][0][H2DSI_ANY]++;
252 for (
int j = 0; j < num_edges; j++)
254 int inx = shapeset->get_edge_index(j, 0, i, HERMES_MODE_TRIANGLE);
257 used_shape_index[inx] =
true;
258 indices.push_back(
ShapeInx(i, i, inx, H2DST_TRI_EDGE));
261 num_shapes[mode][order_h_i][0][H2DSI_TRI_EDGE]++;
262 num_shapes[mode][order_h_i][0][H2DSI_ANY]++;
264 num_shapes[mode][0][0][H2DSI_TRI_EDGE]++;
265 num_shapes[mode][0][0][H2DSI_ANY]++;
272 if(mode == HERMES_MODE_QUAD)
278 unsigned num_indices_prev = indices.size();
279 for(
int order_other = edge_bubble_order.lower(); order_other <= order; order_other++)
281 add_bubble_shape_index(order, order_other, used_shape_index, indices, mode);
282 add_bubble_shape_index(order_other, order, used_shape_index, indices, mode);
286 if(num_indices_prev < indices.size())
291 int num_bubbles = shapeset->get_num_bubbles(order, mode);
292 int* bubble_inxs = shapeset->get_bubble_indices(order, mode);
293 for(
int j = 0; j < num_bubbles; j++)
295 int inx_bubble = bubble_inxs[j];
296 if(used_shape_index.find(inx_bubble) == used_shape_index.end())
298 used_shape_index[inx_bubble] =
true;
299 indices.push_back(
ShapeInx(order, order, inx_bubble, H2DST_BUBBLE));
300 for(
int order_h_i = order+1; order_h_i <
H2DRS_MAX_ORDER + 2; order_h_i++)
302 num_shapes[mode][order_h_i][0][H2DSI_BUBBLE]++;
303 num_shapes[mode][order_h_i][0][H2DSI_ANY]++;
305 num_shapes[mode][0][0][H2DSI_BUBBLE]++;
306 num_shapes[mode][0][0][H2DSI_ANY]++;
313 next_order[i] = (int)indices.size();
316 while(examined_shape < next_order[i])
318 max_shape_inx = std::max(max_shape_inx, indices[examined_shape].inx);
323 next_order[i] = (int)indices.size();
327 template<
typename Scalar>
331 bool full_eval =
false;
332 if((allowed_type_mask & H2DST_VERTEX) != 0)
333 full_eval |= has_vertex_shape[mode];
334 if((allowed_type_mask & (H2DST_HORIZ_EDGE | H2DST_VERT_EDGE | H2DST_TRI_EDGE)) != 0)
335 full_eval |= has_edge_shape[mode];
336 if((allowed_type_mask & H2DST_BUBBLE) != 0)
337 full_eval |= has_bubble_shape[mode];
342 Hermes::vector<ShapeInx>& shapes = shape_indices[mode];
344 typename Hermes::vector<ShapeInx>::const_iterator shape = shapes.begin();
345 while (shape != shapes.end())
347 if(((
int)shape->type & allowed_type_mask) != 0)
360 template<
typename Scalar>
364 if(last_quad_order < 0 || start_quad_order < 0)
370 const int num_sons = get_refin_sons(split);
375 for(
int i = 0; i < num_sons; i++)
377 quad_orders[i] = start_quad_order;
378 quad_perms[i] =
OrderPermutator(start_quad_order, last_quad_order, iso_p, &quad_orders[i]);
388 candidates.push_back(
Cand(split, quad_orders));
389 }
while (quad_perms[0].next());
392 quad_perms[0].
reset();
396 while (inx_son < num_sons && !quad_perms[inx_son].next())
398 quad_perms[inx_son].
reset();
401 if(inx_son >= num_sons)
406 template<
typename Scalar>
409 int order_h =
H2D_GET_H_ORDER(quad_order), order_v = H2D_GET_V_ORDER(quad_order);
410 int max_p_order_h =
H2D_GET_H_ORDER(max_p_quad_order), max_p_order_v = H2D_GET_V_ORDER(max_p_quad_order);
411 int max_ha_order_h =
H2D_GET_H_ORDER(max_ha_quad_order), max_ha_order_v = H2D_GET_V_ORDER(max_ha_quad_order);
412 bool tri = e->is_triangle();
422 int start_order_h = std::max(current_min_order, order_h - 1), start_order_v = std::max(current_min_order, order_v - 1);
423 int start_quad_order = H2D_MAKE_QUAD_ORDER(start_order_h, start_order_v);
428 case H2D_H_ANISO: last_quad_order = start_quad_order;
break;
433 append_candidates_split(quad_order, last_quad_order, H2D_REFINEMENT_P, tri || iso_p);
437 start_order_h = std::max(current_min_order, order_h - 1), start_order_v = std::max(current_min_order, order_v - 1);
438 start_quad_order = H2D_MAKE_QUAD_ORDER(start_order_h, start_order_v);
439 last_quad_order = H2D_MAKE_QUAD_ORDER(std::min(max_ha_order_h, std::min(start_order_h +
H2DRS_MAX_ORDER_INC, order_h)), std::min(max_ha_order_v, std::min(start_order_v +
H2DRS_MAX_ORDER_INC, order_v)));
444 last_quad_order = start_quad_order = quad_order;
break;
450 append_candidates_split(start_quad_order, last_quad_order, H2D_REFINEMENT_H, tri || iso_p);
457 int start_quad_order_hz = H2D_MAKE_QUAD_ORDER(order_h, std::max(current_min_order, (order_v - 1)));
459 int start_quad_order_vt = H2D_MAKE_QUAD_ORDER(std::max(current_min_order, (order_h - 1)), order_v);
464 last_quad_order_hz = start_quad_order_hz = quad_order;
465 last_quad_order_vt = start_quad_order_vt = quad_order;
470 int order = std::min(
H2D_GET_H_ORDER(start_quad_order_hz), H2D_GET_V_ORDER(start_quad_order_hz));
471 start_quad_order_hz = H2D_MAKE_QUAD_ORDER(order, order);
472 order = std::min(
H2D_GET_H_ORDER(start_quad_order_vt), H2D_GET_V_ORDER(start_quad_order_vt));
473 start_quad_order_vt = H2D_MAKE_QUAD_ORDER(order, order);
475 order = std::min(
H2D_GET_H_ORDER(last_quad_order_hz), H2D_GET_V_ORDER(last_quad_order_hz));
476 last_quad_order_hz = H2D_MAKE_QUAD_ORDER(order, order);
477 order = std::min(
H2D_GET_H_ORDER(last_quad_order_vt), H2D_GET_V_ORDER(last_quad_order_vt));
478 last_quad_order_vt = H2D_MAKE_QUAD_ORDER(order, order);
480 append_candidates_split(start_quad_order_hz, last_quad_order_hz, H2D_REFINEMENT_ANISO_H, iso_p);
481 append_candidates_split(start_quad_order_vt, last_quad_order_vt, H2D_REFINEMENT_ANISO_V, iso_p);
485 template<
typename Scalar>
488 typename Hermes::vector<Cand>::const_iterator cand = candidates.begin();
489 while (cand != candidates.end())
492 if(cand->split == H2D_REFINEMENT_H) info = &info_h;
493 else if(cand->split == H2D_REFINEMENT_P) info = &info_p;
494 else if(cand->split == H2D_REFINEMENT_ANISO_H || cand->split == H2D_REFINEMENT_ANISO_V) info = &info_aniso;
495 else {
throw Hermes::Exceptions::Exception(
"Invalid candidate type: %d.", cand->split); };
498 const int num_elems = cand->get_num_elems();
499 for(
int i = 0; i < num_elems; i++)
501 int elem_order_h =
H2D_GET_H_ORDER(cand->p[i]), elem_order_v = H2D_GET_V_ORDER(cand->p[i]);
502 if(elem_order_h != elem_order_v)
518 template<
typename Scalar>
521 bool tri = e->is_triangle();
523 for (
unsigned i = 0; i < candidates.size(); i++)
525 Cand& c = candidates[i];
529 case H2D_REFINEMENT_H:
531 const int central = 3;
541 if(has_vertex_shape[HERMES_MODE_TRIANGLE])
546 case H2D_REFINEMENT_P:
551 throw Hermes::Exceptions::Exception(
"Unknown split type \"%d\" at candidate %d (element #%d)", c.
split, i, e->
id);
557 case H2D_REFINEMENT_H:
560 c.
dofs += num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[j]) + 1][H2D_GET_V_ORDER(c.
p[j]) + 1][H2DSI_ANY];
561 for(
int j = 0; j < 2; j++) {
562 c.
dofs -= num_shapes[HERMES_MODE_QUAD][
H2DRS_ORDER_ANY + 1][std::min(H2D_GET_V_ORDER(c.
p[2*j]), H2D_GET_V_ORDER(c.
p[2*j + 1])) + 1][H2DSI_VERT_EDGE] / 2;
565 if(has_vertex_shape[HERMES_MODE_QUAD])
569 case H2D_REFINEMENT_ANISO_H:
570 c.
dofs = num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[0]) + 1][H2D_GET_V_ORDER(c.
p[0]) + 1][H2DSI_ANY];
571 c.
dofs += num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[1]) + 1][H2D_GET_V_ORDER(c.
p[1]) + 1][H2DSI_ANY];
573 if(has_vertex_shape[HERMES_MODE_QUAD])
577 case H2D_REFINEMENT_ANISO_V:
578 c.
dofs = num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[0]) + 1][H2D_GET_V_ORDER(c.
p[0]) + 1][H2DSI_ANY];
579 c.
dofs += num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[1]) + 1][H2D_GET_V_ORDER(c.
p[1]) + 1][H2DSI_ANY];
580 c.
dofs -= num_shapes[HERMES_MODE_QUAD][
H2DRS_ORDER_ANY + 1][std::min(H2D_GET_V_ORDER(c.
p[0]), H2D_GET_V_ORDER(c.
p[1])) + 1][H2DSI_VERT_EDGE] / 2;
581 if(has_vertex_shape[HERMES_MODE_QUAD])
585 case H2D_REFINEMENT_P:
586 c.
dofs = num_shapes[HERMES_MODE_QUAD][
H2D_GET_H_ORDER(c.
p[0]) + 1][H2D_GET_V_ORDER(c.
p[0]) + 1][H2DSI_ANY];
590 throw Hermes::Exceptions::Exception(
"Unknown split type \"%d\" at candidate %d", c.
split, i);
596 template<
typename Scalar>
599 evaluate_cands_error(e, rsln, avg_error, dev_error);
601 evaluate_cands_dof(e, rsln);
603 evaluate_cands_score(e);
606 template<
typename Scalar>
610 Cand& unrefined = candidates[0];
611 const int num_cands = (int)candidates.size();
613 const double unrefined_dofs_exp = std::pow(unrefined.
dofs, conv_exp);
614 for (
int i = 1; i < num_cands; i++)
616 Cand& cand = candidates[i];
619 double delta_dof_exp = std::pow(cand.
dofs - unrefined.
dofs, conv_exp);
620 if(opt_apply_exp_dof)
621 delta_dof_exp = std::pow(cand.
dofs, conv_exp) - unrefined_dofs_exp;
622 candidates[i].score = (log10(unrefined.
error) - log10(cand.
error)) / delta_dof_exp;
625 candidates[i].score = 0;
629 template<
typename Scalar>
632 return a.score > b.score;
635 template<
typename Scalar>
641 const int num_cands = (int)candidates.size();
643 std::sort(candidates.begin() + 1, candidates.end(), compare_cand_score);
646 int imax = 1, h_imax = 1;
647 if(opt_symmetric_mesh) {
649 while ((imax + 1) < num_cands && std::abs(candidates[imax].score - candidates[imax + 1].score) <
H2DRS_SCORE_DIFF_ZERO)
652 Cand& cand_current = candidates[imax];
653 int imax_end = imax + 2;
662 while (h_imax < num_cands && candidates[h_imax].split != H2D_REFINEMENT_H)
667 if(imax >= num_cands || candidates[imax].score == 0)
669 if(h_imax >= num_cands || candidates[h_imax].score == 0)
673 *selected_cand = imax;
674 *selected_h_cand = h_imax;
677 template<
typename Scalar>
681 int order_h =
H2D_GET_H_ORDER(quad_order), order_v = H2D_GET_V_ORDER(quad_order);
682 if(element->is_triangle())
685 quad_order = H2D_MAKE_QUAD_ORDER(order_h, order_v);
689 set_current_order_range(element);
692 if(current_max_order < std::max(order_h, order_v))
693 current_max_order = std::max(order_h, order_v);
696 int inx_cand, inx_h_cand;
697 create_candidates(element, quad_order
698 , H2D_MAKE_QUAD_ORDER(current_max_order, current_max_order)
699 , H2D_MAKE_QUAD_ORDER(current_max_order, current_max_order));
701 if(candidates.size() > 1) {
703 double avg_error, dev_error;
704 evaluate_candidates(element, rsln, &avg_error, &dev_error);
707 select_best_candidate(element, avg_error, dev_error, &inx_cand, &inx_h_cand);
715 Cand& cand = candidates[inx_cand];
716 Cand& cand_h = candidates[inx_h_cand];
717 refinement.split = cand.
split;
718 ElementToRefine::copy_orders(refinement.p, cand.
p);
719 if(candidates[inx_h_cand].split == H2D_REFINEMENT_H) {
720 ElementToRefine::copy_orders(refinement.q, cand_h.
p);
724 ElementToRefine::copy_orders(refinement.q, h_cand_orders);
728 if(element->is_triangle())
732 if(!(H2D_GET_V_ORDER(refinement.p[i]) == 0 ||
H2D_GET_H_ORDER(refinement.p[i]) == H2D_GET_V_ORDER(refinement.p[i])))
733 throw Exceptions::Exception(
"Triangle processed but the resulting order (%d, %d) of son %d is not uniform",
H2D_GET_H_ORDER(refinement.p[i]), H2D_GET_V_ORDER(refinement.p[i]), i);
734 refinement.p[i] = H2D_MAKE_QUAD_ORDER(
H2D_GET_H_ORDER(refinement.p[i]), 0);
735 if(!(H2D_GET_V_ORDER(refinement.q[i]) == 0 ||
H2D_GET_H_ORDER(refinement.q[i]) == H2D_GET_V_ORDER(refinement.q[i])))
736 throw Exceptions::Exception(
"Triangle processed but the resulting q-order (%d, %d) of son %d is not uniform",
H2D_GET_H_ORDER(refinement.q[i]), H2D_GET_V_ORDER(refinement.q[i]), i);
737 refinement.q[i] = H2D_MAKE_QUAD_ORDER(
H2D_GET_H_ORDER(refinement.q[i]), 0);
747 template<
typename Scalar>
750 if(refinement == H2D_REFINEMENT_P)
751 throw Exceptions::Exception(
"P-candidate not supported for updating shared orders");
752 const int num_sons = get_refin_sons(refinement);
753 if(suggested_quad_orders != NULL)
755 for(
int i = 0; i < num_sons; i++)
756 tgt_quad_orders[i] = suggested_quad_orders[i];
761 int quad_order = orig_quad_order;
763 int order_h =
H2D_GET_H_ORDER(quad_order), order_v = H2D_GET_V_ORDER(quad_order);
766 case H2D_REFINEMENT_H:
767 order_h = std::max(1, (order_h + 1)/2);
768 order_v = std::max(1, (order_v + 1)/2);
770 case H2D_REFINEMENT_ANISO_H:
771 order_v = std::max(1, 2*(order_v + 1)/3);
773 case H2D_REFINEMENT_ANISO_V:
774 order_h = std::max(1, 2*(order_h + 1)/3);
777 if(element->is_triangle())
778 quad_order = H2D_MAKE_QUAD_ORDER(order_h, 0);
780 quad_order = H2D_MAKE_QUAD_ORDER(order_h, order_v);
782 for(
int i = 0; i < num_sons; i++)
783 tgt_quad_orders[i] = quad_order;
787 template<
typename Scalar>
794 default:
throw Hermes::Exceptions::Exception(
"Unknown option %d.", (
int)option);
798 template<
typename Scalar>
801 template<
typename Scalar>
802 OptimumSelector<Scalar>::Range::Range(
const int& lower_bound,
const int& upper_bound) : lower_bound(lower_bound), upper_bound(upper_bound), empty_range(lower_bound > upper_bound)
806 template<
typename Scalar>
807 bool OptimumSelector<Scalar>::Range::empty()
const
812 template<
typename Scalar>
813 const int& OptimumSelector<Scalar>::Range::lower()
const
818 template<
typename Scalar>
819 const int& OptimumSelector<Scalar>::Range::upper()
const
824 template<
typename Scalar>
825 bool OptimumSelector<Scalar>::Range::is_in_closed(
const typename OptimumSelector<Scalar>::Range& range)
const
827 return (range.lower_bound >= lower_bound && range.upper_bound <= upper_bound);
830 template<
typename Scalar>
831 bool OptimumSelector<Scalar>::Range::is_in_closed(
const int& value)
const
833 return (value >= lower_bound && value <= upper_bound);
836 template<
typename Scalar>
837 bool OptimumSelector<Scalar>::Range::is_in_open(
const int& value)
const
839 return (value > lower_bound && value < upper_bound);
842 template<
typename Scalar>
843 void OptimumSelector<Scalar>::Range::enlarge_to_include(
const int& value)
846 lower_bound = upper_bound = value;
850 if(lower_bound > value)
852 if(upper_bound < value)
857 template<
typename Scalar>
858 typename OptimumSelector<Scalar>::Range OptimumSelector<Scalar>::Range::make_envelope(
const typename OptimumSelector<Scalar>::Range& a,
const typename OptimumSelector<Scalar>::Range& b)
865 return Range(std::min(a.lower(), b.lower()), std::max(a.upper(), b.upper()));
868 template class HERMES_API OptimumSelector<double>;
869 template class HERMES_API OptimumSelector<std::complex<double> >;