Hermes2D  2.0
discrete_problem.cpp
1 //#define DEBUG_DG_ASSEMBLING
2 //#define DEBUG_DG_ASSEMBLING_ELEMENT 44
3 //#define DEBUG_DG_ASSEMBLING_ISURF 3
4 // This file is part of Hermes2D.
5 //
6 // Hermes2D is free software: you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation, either version 2 of the License, or
9 // (at your option) any later version.
10 //
11 // Hermes2D is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 //
16 // You should have received a copy of the GNU General Public License
17 // along with Hermes2D. If not, see <http://www.gnu.org/licenses/>.
18 
19 #include "discrete_problem.h"
20 #include "function/exact_solution.h"
21 #include <algorithm>
22 #include <map>
23 #include <string>
24 #include <utility>
25 #include <vector>
26 #include "global.h"
27 #include "integrals/h1.h"
28 #include "quadrature/limit_order.h"
29 #include "mesh/traverse.h"
30 #include "space/space.h"
31 #include "shapeset/precalc.h"
32 #include "mesh/refmap.h"
33 #include "function/solution.h"
34 #include "neighbor.h"
35 #include "api2d.h"
36 
37 using namespace Hermes::Algebra::DenseMatrixOperations;
38 
39 namespace Hermes
40 {
41  namespace Hermes2D
42  {
43  static const std::string H2D_DG_INNER_EDGE = "-1234567";
44 
45  template<typename Scalar>
46  double DiscreteProblem<Scalar>::fake_wt = 1.0;
47 
48  template<typename Scalar>
49  DiscreteProblem<Scalar>::DiscreteProblem(const WeakForm<Scalar>* wf, Hermes::vector<const Space<Scalar> *> spaces) : Hermes::Solvers::DiscreteProblemInterface<Scalar>(), wf(wf)
50  {
51  if(spaces.empty())
52  throw Exceptions::NullException(2);
53  unsigned int first_dof_running = 0;
54  for(unsigned int i = 0; i < spaces.size(); i++)
55  {
56  this->spaces.push_back(spaces.at(i));
57  this->spaces_first_dofs.push_back(first_dof_running);
58  first_dof_running += spaces.at(i)->get_num_dofs();
59  }
60  init();
61  }
62 
63  template<typename Scalar>
65  : Hermes::Solvers::DiscreteProblemInterface<Scalar>(), wf(wf)
66  {
67  spaces.push_back(space);
68  this->spaces_first_dofs.push_back(0);
69 
70  init();
71  }
72 
73  template<typename Scalar>
74  DiscreteProblem<Scalar>::DiscreteProblem() : Hermes::Solvers::DiscreteProblemInterface<Scalar>(), wf(NULL)
75  {
76  // Set all attributes for which we don't need to acces wf or spaces.
77  // This is important for the destructor to properly detect what needs to be deallocated.
78  // Initialize special variable for Runge-Kutta time integration.
79  RungeKutta = false;
81 
82  this->ndof = 0;
83 
84  // Internal variables settings.
85  sp_seq = NULL;
86 
87  // Matrix<Scalar> related settings.
88  have_matrix = false;
89 
90  // There is a special function that sets a DiscreteProblem to be FVM.
91  // Purpose is that this constructor looks cleaner and is simpler.
92  this->is_fvm = false;
93 
94  this->DG_matrix_forms_present = false;
95  this->DG_vector_forms_present = false;
96 
98  geom_ord = *tmp;
99  delete tmp;
100 
101  current_mat = NULL;
102  current_rhs = NULL;
103  current_block_weights = NULL;
104 
105 
106  cache_element_stored = NULL;
107 
108  this->do_not_use_cache = false;
109 
110  this->is_linear = false;
111  }
112 
113  template<typename Scalar>
115  {
116  if(this->wf == NULL)
117  return false;
118 
119  if(this->spaces.size() == 0)
120  return false;
121 
122  // Initial check of meshes and spaces.
123  for(unsigned int space_i = 0; space_i < this->spaces.size(); space_i++)
124  this->spaces[space_i]->check();
125 
126  for(unsigned int space_i = 0; space_i < this->spaces.size(); space_i++)
127  if(!this->spaces[space_i]->is_up_to_date())
128  throw Exceptions::Exception("Space is out of date, if you manually refine it, you have to call assign_dofs().");
129 
130  return true;
131  }
132 
133  template<typename Scalar>
135  {
136  // Initialize special variable for Runge-Kutta time integration.
137  RungeKutta = false;
138  RK_original_spaces_count = 0;
139 
140  this->ndof = Space<Scalar>::get_num_dofs(spaces);
141 
142  // Sanity checks.
143  if(wf == NULL)
144  throw Hermes::Exceptions::Exception("WeakForm<Scalar>* wf can not be NULL in DiscreteProblem<Scalar>::DiscreteProblem.");
145 
146  if(spaces.size() != (unsigned) wf->get_neq())
147  throw Hermes::Exceptions::Exception("Bad number of spaces in DiscreteProblem.");
148  if(spaces.size() == 0)
149  throw Hermes::Exceptions::Exception("Zero number of spaces in DiscreteProblem.");
150 
151  // Internal variables settings.
152  sp_seq = new int[wf->get_neq()];
153  memset(sp_seq, -1, sizeof(int) * wf->get_neq());
154 
155  // Matrix<Scalar> related settings.
156  have_matrix = false;
157 
158  // There is a special function that sets a DiscreteProblem to be FVM.
159  // Purpose is that this constructor looks cleaner and is simpler.
160  this->is_fvm = false;
161 
162  this->DG_matrix_forms_present = false;
163  this->DG_vector_forms_present = false;
164 
165  if(!this->wf->mfDG.empty())
166  this->DG_matrix_forms_present = true;
167 
168  if(!this->wf->vfDG.empty())
169  this->DG_vector_forms_present = true;
170 
172  geom_ord = *tmp;
173  delete tmp;
174 
175  this->is_linear = false;
176 
177  current_mat = NULL;
178  current_rhs = NULL;
179  current_block_weights = NULL;
180 
181  cache_records_sub_idx = new std::map<uint64_t, CacheRecordPerSubIdx*>**[spaces.size()];
182  cache_records_element = new CacheRecordPerElement**[spaces.size()];
183 
184  this->cache_size = spaces[0]->get_mesh()->get_max_element_id() + 1;
185  for(unsigned int i = 1; i < spaces.size(); i++)
186  {
187  int cache_size_i = spaces[i]->get_mesh()->get_max_element_id() + 1;
188  if(cache_size_i > cache_size)
189  this->cache_size = cache_size_i;
190  }
191 
192  for(unsigned int i = 0; i < spaces.size(); i++)
193  {
194  cache_records_sub_idx[i] = (std::map<uint64_t, CacheRecordPerSubIdx*>**)malloc(this->cache_size * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
195  memset(cache_records_sub_idx[i], NULL, this->cache_size * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
196 
197  cache_records_element[i] = (CacheRecordPerElement**)malloc(this->cache_size * sizeof(CacheRecordPerElement*));
198  memset(cache_records_element[i], NULL, this->cache_size * sizeof(CacheRecordPerElement*));
199  }
200  cache_element_stored = NULL;
201 
202  this->do_not_use_cache = false;
203  }
204 
205  template<typename Scalar>
207  {
208  Hermes::vector<Space<Scalar>*> spaces;
209  for(unsigned int i = 0; i < this->get_spaces().size(); i++)
210  spaces.push_back(const_cast<Space<Scalar>*>(this->get_space(i)));
211 
213  const_cast<WeakForm<Scalar>*>(this->wf)->set_current_time(time);
214  }
215 
216  template<typename Scalar>
217  void DiscreteProblem<Scalar>::set_time_step(double time_step)
218  {
219  const_cast<WeakForm<Scalar>*>(this->wf)->set_current_time_step(time_step);
220  }
221 
222  template<typename Scalar>
224  {
225  if(wf != NULL)
226  memset(sp_seq, -1, sizeof(int) * wf->get_neq());
227 
228  if(sp_seq != NULL) delete [] sp_seq;
229 
230  this->delete_cache();
231  }
232 
233  template<typename Scalar>
235  {
236  return this->ndof;
237  }
238 
239  template<typename Scalar>
240  Hermes::vector<const Space<Scalar>*> DiscreteProblem<Scalar>::get_spaces() const
241  {
242  return this->spaces;
243  }
244 
245  template<typename Scalar>
247  {
248  return this->wf;
249  }
250 
251  template<typename Scalar>
253  {
254  if(wf == NULL)
255  throw Hermes::Exceptions::NullException(0);
256 
257  this->wf = wf;
258  this->have_matrix = false;
259 
260  if(!this->wf->mfDG.empty())
261  this->DG_matrix_forms_present = true;
262 
263  if(!this->wf->vfDG.empty())
264  this->DG_vector_forms_present = true;
265  }
266 
267  template<typename Scalar>
269  {
270  return wf->is_matrix_free();
271  }
272 
273  template<typename Scalar>
275  {
276  // check if we can reuse the matrix structure
277  bool up_to_date = true;
278  if(!have_matrix)
279  up_to_date = false;
280 
281  for (unsigned int i = 0; i < wf->get_neq(); i++)
282  {
283  if(spaces[i]->get_seq() != sp_seq[i])
284  {
285  up_to_date = false;
286  break;
287  }
288  }
289 
290  return up_to_date;
291  }
292 
293  template<typename Scalar>
295  {
296  this->is_fvm = true;
297  }
298 
299  template<typename Scalar>
301  {
302  for(unsigned int i = 0; i < spaces.size(); i++)
303  {
304  for(unsigned int j = 0; j < cache_size; j++)
305  {
306  if(this->cache_records_sub_idx[i][j] != NULL)
307  {
308  for(typename std::map<uint64_t, CacheRecordPerSubIdx*>::iterator it = this->cache_records_sub_idx[i][j]->begin(); it != this->cache_records_sub_idx[i][j]->end(); it++)
309  {
310  it->second->clear();
311  delete it->second;
312  }
313 
314  this->cache_records_sub_idx[i][j]->clear();
315  delete this->cache_records_sub_idx[i][j];
316  this->cache_records_sub_idx[i][j] = NULL;
317  }
318  if(this->cache_records_element[i][j] != NULL)
319  {
320  this->cache_records_element[i][j]->clear();
321  delete this->cache_records_element[i][j];
322  this->cache_records_element[i][j] = NULL;
323  }
324  }
325  free(cache_records_sub_idx[i]);
326  free(cache_records_element[i]);
327  }
328  delete [] this->cache_records_sub_idx;
329  delete [] this->cache_records_element;
330  }
331 
332  template<typename Scalar>
333  void DiscreteProblem<Scalar>::set_spaces(Hermes::vector<const Space<Scalar>*> spacesToSet)
334  {
335  for(unsigned int i = 0; i < spacesToSet.size(); i++)
336  spacesToSet[i]->check();
337 
338  if(this->spaces.size() != spacesToSet.size() && this->spaces.size() > 0)
339  throw Hermes::Exceptions::LengthException(0, spacesToSet.size(), this->spaces.size());
340 
341  int originalSize = this->spaces.size();
342 
343  this->spaces = spacesToSet;
344 
345  this->ndof = Space<Scalar>::get_num_dofs(spaces);
346 
348  for(unsigned int i = 0; i < spacesToSet.size(); i++)
349  if(spacesToSet[i]->get_shapeset()->get_num_components() > 1)
350  this->do_not_use_cache = true;
351 
352  have_matrix = false;
353 
354  unsigned int first_dof_running = 0;
355  this->spaces_first_dofs.clear();
356  for(unsigned int i = 0; i < spaces.size(); i++)
357  {
358  this->spaces_first_dofs.push_back(first_dof_running);
359  first_dof_running += spaces.at(i)->get_num_dofs();
360  }
361 
362  if(originalSize == 0)
363  {
364  // Internal variables settings.
365  sp_seq = new int[spaces.size()];
366  memset(sp_seq, -1, sizeof(int) * spaces.size());
367 
368  // Matrix<Scalar> related settings.
369  have_matrix = false;
370 
371  cache_records_sub_idx = new std::map<uint64_t, CacheRecordPerSubIdx*>**[spaces.size()];
372  cache_records_element = new CacheRecordPerElement**[spaces.size()];
373 
374  this->cache_size = spaces[0]->get_mesh()->get_max_element_id() + 1;
375  for(unsigned int i = 1; i < spaces.size(); i++)
376  {
377  int cache_size_i = spaces[i]->get_mesh()->get_max_element_id() + 1;
378  if(cache_size_i > cache_size)
379  this->cache_size = cache_size_i;
380  }
381 
382  for(unsigned int i = 0; i < spaces.size(); i++)
383  {
384  cache_records_sub_idx[i] = (std::map<uint64_t, CacheRecordPerSubIdx*>**)malloc(this->cache_size * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
385  memset(cache_records_sub_idx[i], NULL, this->cache_size * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
386 
387  cache_records_element[i] = (CacheRecordPerElement**)malloc(this->cache_size * sizeof(CacheRecordPerElement*));
388  memset(cache_records_element[i], NULL, this->cache_size * sizeof(CacheRecordPerElement*));
389  }
390  cache_element_stored = NULL;
391  }
392  else
393  {
394  int max_size = spacesToSet[0]->get_mesh()->get_max_element_id();
395  for(unsigned int i = 1; i < spacesToSet.size(); i++)
396  {
397  int max_size_i = spacesToSet[i]->get_mesh()->get_max_element_id();
398  if(max_size_i > max_size)
399  max_size = max_size_i;
400  sp_seq[i] = spacesToSet[i]->get_seq();
401  }
402 
403  if(max_size > this->cache_size + 1)
404  {
405  for(unsigned int i = 0; i < this->spaces.size(); i++)
406  {
407  this->cache_records_sub_idx[i] = (std::map<uint64_t, CacheRecordPerSubIdx*>**)realloc(this->cache_records_sub_idx[i], max_size * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
408  memset(this->cache_records_sub_idx[i] + this->cache_size, NULL, (max_size - this->cache_size) * sizeof(std::map<uint64_t, CacheRecordPerSubIdx*>*));
409 
410  this->cache_records_element[i] = (CacheRecordPerElement**)realloc(this->cache_records_element[i], max_size * sizeof(CacheRecordPerElement*));
411  memset(this->cache_records_element[i] + this->cache_size, NULL, (max_size - this->cache_size) * sizeof(CacheRecordPerElement*));
412  }
413 
414  this->cache_size = max_size;
415  }
416 
417  for(unsigned int i = 0; i < spaces.size(); i++)
418  {
419  for(unsigned int j = 0; j < cache_size; j++)
420  {
421  if(j < spaces[i]->get_mesh()->get_max_element_id())
422  {
423  if(spaces[i]->get_mesh()->get_element(j) == NULL || !spaces[i]->get_mesh()->get_element(j)->active || spaces[i]->get_element_order(spaces[i]->get_mesh()->get_element(j)->id) < 0)
424  {
425  if(this->cache_records_sub_idx[i][j] != NULL)
426  {
427  for(typename std::map<uint64_t, CacheRecordPerSubIdx*>::iterator it = this->cache_records_sub_idx[i][j]->begin(); it != this->cache_records_sub_idx[i][j]->end(); it++)
428  it->second->clear();
429 
430  this->cache_records_sub_idx[i][j]->clear();
431  delete this->cache_records_sub_idx[i][j];
432  this->cache_records_sub_idx[i][j] = NULL;
433  }
434  if(this->cache_records_element[i][j] != NULL)
435  {
436  this->cache_records_element[i][j]->clear();
437  delete this->cache_records_element[i][j];
438  this->cache_records_element[i][j] = NULL;
439  }
440  }
441  }
442  else
443  {
444  if(this->cache_records_sub_idx[i][j] != NULL)
445  {
446  for(typename std::map<uint64_t, CacheRecordPerSubIdx*>::iterator it = this->cache_records_sub_idx[i][j]->begin(); it != this->cache_records_sub_idx[i][j]->end(); it++)
447  it->second->clear();
448 
449  this->cache_records_sub_idx[i][j]->clear();
450  delete this->cache_records_sub_idx[i][j];
451  this->cache_records_sub_idx[i][j] = NULL;
452  }
453  if(this->cache_records_element[i][j] != NULL)
454  {
455  this->cache_records_element[i][j]->clear();
456  delete this->cache_records_element[i][j];
457  this->cache_records_element[i][j] = NULL;
458  }
459  }
460  }
461  }
462  }
463 
464  this->ndof = Space<Scalar>::get_num_dofs(this->spaces);
465  }
466 
467  template<typename Scalar>
469  {
470  Hermes::vector<const Space<Scalar>*> spaces;
471  spaces.push_back(space);
472  this->set_spaces(spaces);
473  }
474 
475  template<typename Scalar>
476  double DiscreteProblem<Scalar>::block_scaling_coeff(MatrixForm<Scalar>* form) const
477  {
478  if(current_block_weights != NULL)
479  return current_block_weights->get_A(form->i, form->j);
480  return 1.0;
481  }
482 
483  template<typename Scalar>
484  bool DiscreteProblem<Scalar>::form_to_be_assembled(MatrixForm<Scalar>* form, Traverse::State* current_state)
485  {
486  if(current_state->e[form->i] == NULL || current_state->e[form->j] == NULL)
487  return false;
488  if(fabs(form->scaling_factor) < 1e-12)
489  return false;
490 
491  // If a block scaling table is provided, and if the scaling coefficient
492  // A_mn for this block is zero, then the form does not need to be assembled.
493  if(current_block_weights != NULL)
494  if(fabs(current_block_weights->get_A(form->i, form->j)) < 1e-12)
495  return false;
496  return true;
497  }
498 
499  template<typename Scalar>
500  bool DiscreteProblem<Scalar>::form_to_be_assembled(MatrixFormVol<Scalar>* form, Traverse::State* current_state)
501  {
502  if(!form_to_be_assembled((MatrixForm<Scalar>*)form, current_state))
503  return false;
504 
505  // Assemble this form only if one of its areas is HERMES_ANY
506  // of if the element marker coincides with one of the form's areas.
507  bool assemble_this_form = false;
508  for (unsigned int ss = 0; ss < form->areas.size(); ss++)
509  {
510  if(form->areas[ss] == HERMES_ANY)
511  {
512  assemble_this_form = true;
513  break;
514  }
515  else
516  {
517  bool marker_on_space_m = this->spaces[form->i]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).valid;
518  if(marker_on_space_m)
519  marker_on_space_m = (this->spaces[form->i]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->marker);
520 
521  bool marker_on_space_n = this->spaces[form->j]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).valid;
522  if(marker_on_space_n)
523  marker_on_space_n = (this->spaces[form->j]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->marker);
524 
525  if(marker_on_space_m && marker_on_space_n)
526  {
527  assemble_this_form = true;
528  break;
529  }
530  }
531  }
532  if(!assemble_this_form)
533  return false;
534  return true;
535  }
536 
537  template<typename Scalar>
538  bool DiscreteProblem<Scalar>::form_to_be_assembled(MatrixFormSurf<Scalar>* form, Traverse::State* current_state)
539  {
540  if(current_state->rep->en[current_state->isurf]->marker == 0)
541  return false;
542 
543  if(form->areas[0] == H2D_DG_INNER_EDGE)
544  return false;
545  if(!form_to_be_assembled((MatrixForm<Scalar>*)form, current_state))
546  return false;
547 
548  bool assemble_this_form = false;
549  for (unsigned int ss = 0; ss < form->areas.size(); ss++)
550  {
551  if(form->areas[ss] == HERMES_ANY)
552  {
553  assemble_this_form = true;
554  break;
555  }
556  else
557  {
558  bool marker_on_space_m = this->spaces[form->i]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).valid;
559  if(marker_on_space_m)
560  marker_on_space_m = (this->spaces[form->i]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->en[current_state->isurf]->marker);
561 
562  bool marker_on_space_n = this->spaces[form->j]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).valid;
563  if(marker_on_space_n)
564  marker_on_space_n = (this->spaces[form->j]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->en[current_state->isurf]->marker);
565 
566  if(marker_on_space_m && marker_on_space_n)
567  {
568  assemble_this_form = true;
569  break;
570  }
571  }
572  }
573  if(assemble_this_form == false)
574  return false;
575  return true;
576  }
577 
578  template<typename Scalar>
579  bool DiscreteProblem<Scalar>::form_to_be_assembled(VectorForm<Scalar>* form, Traverse::State* current_state)
580  {
581  if(current_state->e[form->i] == NULL)
582  return false;
583  if(fabs(form->scaling_factor) < 1e-12)
584  return false;
585 
586  return true;
587  }
588 
589  template<typename Scalar>
590  bool DiscreteProblem<Scalar>::form_to_be_assembled(VectorFormVol<Scalar>* form, Traverse::State* current_state)
591  {
592  if(!form_to_be_assembled((VectorForm<Scalar>*)form, current_state))
593  return false;
594 
595  // Assemble this form only if one of its areas is HERMES_ANY
596  // of if the element marker coincides with one of the form's areas.
597  bool assemble_this_form = false;
598  for (unsigned int ss = 0; ss < form->areas.size(); ss++)
599  {
600  if(form->areas[ss] == HERMES_ANY)
601  {
602  assemble_this_form = true;
603  break;
604  }
605  else
606  {
607  bool marker_on_space_m = this->spaces[form->i]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).valid;
608  if(marker_on_space_m)
609  marker_on_space_m = (this->spaces[form->i]->get_mesh()->get_element_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->marker);
610 
611  if(marker_on_space_m)
612  {
613  assemble_this_form = true;
614  break;
615  }
616  }
617  }
618  if(!assemble_this_form)
619  return false;
620  return true;
621  }
622 
623  template<typename Scalar>
624  bool DiscreteProblem<Scalar>::form_to_be_assembled(VectorFormSurf<Scalar>* form, Traverse::State* current_state)
625  {
626  if(current_state->rep->en[current_state->isurf]->marker == 0)
627  return false;
628 
629  if(form->areas[0] == H2D_DG_INNER_EDGE)
630  return false;
631 
632  if(!form_to_be_assembled((VectorForm<Scalar>*)form, current_state))
633  return false;
634 
635  bool assemble_this_form = false;
636  for (unsigned int ss = 0; ss < form->areas.size(); ss++)
637  {
638  if(form->areas[ss] == HERMES_ANY)
639  {
640  assemble_this_form = true;
641  break;
642  }
643  else
644  {
645  bool marker_on_space_m = this->spaces[form->i]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).valid;
646  if(marker_on_space_m)
647  marker_on_space_m = (this->spaces[form->i]->get_mesh()->get_boundary_markers_conversion().get_internal_marker(form->areas[ss]).marker == current_state->rep->en[current_state->isurf]->marker);
648 
649  if(marker_on_space_m)
650  {
651  assemble_this_form = true;
652  break;
653  }
654  }
655  }
656  if(assemble_this_form == false)
657  return false;
658  return true;
659  }
660 
661  template<typename Scalar>
662  void DiscreteProblem<Scalar>::create_sparse_structure(SparseMatrix<Scalar>* mat, Vector<Scalar>* rhs)
663  {
664  this->current_mat = mat;
665  if(rhs != NULL)
666  this->current_rhs = rhs;
667  this->create_sparse_structure();
668  }
669 
670  template<typename Scalar>
672  {
673  if(is_up_to_date())
674  {
675  if(current_mat != NULL)
676  current_mat->zero();
677  if(current_rhs != NULL)
678  {
679  // If we use e.g. a new NewtonSolver (providing a new Vector) for this instance of DiscreteProblem that already assembled a system,
680  // we end up with everything up_to_date, but unallocated Vector.
681  if(current_rhs->length() == 0)
682  current_rhs->alloc(this->ndof);
683  else
684  current_rhs->zero();
685  }
686  return;
687  }
688 
689  // For DG, the sparse structure is different as we have to
690  // account for over-edge calculations.
691  bool is_DG = false;
692  for(unsigned int i = 0; i < this->wf->mfsurf.size(); i++)
693  {
694  if(!this->wf->mfDG.empty())
695  {
696  is_DG = true;
697  break;
698  }
699  }
700  for(unsigned int i = 0; i < this->wf->vfsurf.size() && is_DG == false; i++)
701  {
702  if(!this->wf->vfDG.empty())
703  {
704  is_DG = true;
705  break;
706  }
707  }
708 
709  if(current_mat != NULL)
710  {
711  // Spaces have changed: create the matrix from scratch.
712  have_matrix = true;
713  current_mat->free();
714  current_mat->prealloc(this->ndof);
715 
716  AsmList<Scalar>* al = new AsmList<Scalar>[wf->get_neq()];
717  const Mesh** meshes = new const Mesh*[wf->get_neq()];
718  bool **blocks = wf->get_blocks(current_force_diagonal_blocks);
719 
720  // Init multi-mesh traversal.
721  for (unsigned int i = 0; i < wf->get_neq(); i++)
722  meshes[i] = spaces[i]->get_mesh();
723 
724  Traverse trav(true);
725  trav.begin(wf->get_neq(), meshes);
726 
727  if(is_DG)
728  {
729  Hermes::vector<Space<Scalar>*> mutable_spaces;
730  for(unsigned int i = 0; i < this->spaces.size(); i++)
731  {
732  mutable_spaces.push_back(const_cast<Space<Scalar>*>(spaces.at(i)));
733  spaces_first_dofs[i] = 0;
734  }
735  Space<Scalar>::assign_dofs(mutable_spaces);
736  }
737 
738  Traverse::State* current_state;
739  // Loop through all elements.
740  while ((current_state = trav.get_next_state()) != NULL)
741  {
742  // Obtain assembly lists for the element at all spaces.
744  for (unsigned int i = 0; i < wf->get_neq(); i++)
745  if(current_state->e[i] != NULL)
746  if(is_DG)
747  spaces[i]->get_element_assembly_list(current_state->e[i], &(al[i]));
748  else
749  spaces[i]->get_element_assembly_list(current_state->e[i], &(al[i]), spaces_first_dofs[i]);
750 
751  if(is_DG)
752  {
753  // Number of edges ( = number of vertices).
754  int num_edges = current_state->e[0]->nvert;
755 
756  // Allocation an array of arrays of neighboring elements for every mesh x edge.
757  Element **** neighbor_elems_arrays = new Element ***[wf->get_neq()];
758  for(unsigned int i = 0; i < wf->get_neq(); i++)
759  neighbor_elems_arrays[i] = new Element **[num_edges];
760 
761  // The same, only for number of elements
762  int ** neighbor_elems_counts = new int *[wf->get_neq()];
763  for(unsigned int i = 0; i < wf->get_neq(); i++)
764  neighbor_elems_counts[i] = new int[num_edges];
765 
766  // Get the neighbors.
767  for(unsigned int el = 0; el < wf->get_neq(); el++)
768  {
769  NeighborSearch<Scalar> ns(current_state->e[el], meshes[el]);
770 
771  // Ignoring errors (and doing nothing) in case the edge is a boundary one.
772  ns.set_ignore_errors(true);
773 
774  for(int ed = 0; ed < num_edges; ed++)
775  {
776  ns.set_active_edge(ed);
777  const Hermes::vector<Element *> *neighbors = ns.get_neighbors();
778 
779  neighbor_elems_counts[el][ed] = ns.get_num_neighbors();
780  neighbor_elems_arrays[el][ed] = new Element *[neighbor_elems_counts[el][ed]];
781  for(int neigh = 0; neigh < neighbor_elems_counts[el][ed]; neigh++)
782  neighbor_elems_arrays[el][ed][neigh] = (*neighbors)[neigh];
783  }
784  }
785 
786  // Pre-add into the stiffness matrix.
787  for (unsigned int m = 0; m < wf->get_neq(); m++)
788  for(unsigned int el = 0; el < wf->get_neq(); el++)
789  for(int ed = 0; ed < num_edges; ed++)
790  for(int neigh = 0; neigh < neighbor_elems_counts[el][ed]; neigh++)
791  if((blocks[m][el] || blocks[el][m]) && current_state->e[m] != NULL)
792  {
793  AsmList<Scalar>*am = &(al[m]);
795  spaces[el]->get_element_assembly_list(neighbor_elems_arrays[el][ed][neigh], an);
796 
797  // pretend assembling of the element stiffness matrix
798  // register nonzero elements
799  for (unsigned int i = 0; i < am->cnt; i++)
800  if(am->dof[i] >= 0)
801  for (unsigned int j = 0; j < an->cnt; j++)
802  if(an->dof[j] >= 0)
803  {
804  if(blocks[m][el]) current_mat->pre_add_ij(am->dof[i], an->dof[j]);
805  if(blocks[el][m]) current_mat->pre_add_ij(an->dof[j], am->dof[i]);
806  }
807  delete an;
808  }
809 
810  // Deallocation an array of arrays of neighboring elements
811  // for every mesh x edge.
812  for(unsigned int el = 0; el < wf->get_neq(); el++)
813  {
814  for(int ed = 0; ed < num_edges; ed++)
815  delete [] neighbor_elems_arrays[el][ed];
816  delete [] neighbor_elems_arrays[el];
817  }
818  delete [] neighbor_elems_arrays;
819 
820  // The same, only for number of elements.
821  for(unsigned int el = 0; el < wf->get_neq(); el++)
822  delete [] neighbor_elems_counts[el];
823  delete [] neighbor_elems_counts;
824  }
825 
826  // Go through all equation-blocks of the local stiffness matrix.
827  for (unsigned int m = 0; m < wf->get_neq(); m++)
828  {
829  for (unsigned int n = 0; n < wf->get_neq(); n++)
830  {
831  if(blocks[m][n] && current_state->e[m] != NULL && current_state->e[n] != NULL)
832  {
833  AsmList<Scalar>*am = &(al[m]);
834  AsmList<Scalar>*an = &(al[n]);
835 
836  // Pretend assembling of the element stiffness matrix.
837  for (unsigned int i = 0; i < am->cnt; i++)
838  if(am->dof[i] >= 0)
839  for (unsigned int j = 0; j < an->cnt; j++)
840  if(an->dof[j] >= 0)
841  current_mat->pre_add_ij(am->dof[i], an->dof[j]);
842  }
843  }
844  }
845  }
846 
847  trav.finish();
848  delete [] al;
849  delete [] meshes;
850  delete [] blocks;
851 
852  current_mat->alloc();
853  }
854 
855  // WARNING: unlike Matrix<Scalar>::alloc(), Vector<Scalar>::alloc(ndof) frees the memory occupied
856  // by previous vector before allocating
857  if(current_rhs != NULL)
858  current_rhs->alloc(this->ndof);
859 
860  // save space seq numbers and weakform seq number, so we can detect their changes
861  for (unsigned int i = 0; i < wf->get_neq(); i++)
862  sp_seq[i] = spaces[i]->get_seq();
863  }
864 
865  template<typename Scalar>
866  void DiscreteProblem<Scalar>::assemble(SparseMatrix<Scalar>* mat, Vector<Scalar>* rhs,
867  bool force_diagonal_blocks, Table* block_weights)
868  {
869  Scalar* coeff_vec = NULL;
870  assemble(coeff_vec, mat, rhs, force_diagonal_blocks, block_weights);
871  }
872 
873  template<typename Scalar>
874  void DiscreteProblem<Scalar>::assemble(Vector<Scalar>* rhs,
875  bool force_diagonal_blocks, Table* block_weights)
876  {
877  assemble(NULL, NULL, rhs, force_diagonal_blocks, block_weights);
878  }
879 
880  template<typename Scalar>
881  void DiscreteProblem<Scalar>::init_assembling(Scalar* coeff_vec, PrecalcShapeset*** pss , PrecalcShapeset*** spss, RefMap*** refmaps, Solution<Scalar>*** u_ext, AsmList<Scalar>*** als, WeakForm<Scalar>** weakforms)
882  {
883  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
884  {
885  pss[i] = new PrecalcShapeset*[wf->get_neq()];
886  for (unsigned int j = 0; j < wf->get_neq(); j++)
887  pss[i][j] = new PrecalcShapeset(spaces[j]->shapeset);
888  }
889  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
890  {
891  spss[i] = new PrecalcShapeset*[wf->get_neq()];
892  for (unsigned int j = 0; j < wf->get_neq(); j++)
893  spss[i][j] = new PrecalcShapeset(pss[i][j]);
894  }
895  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
896  {
897  refmaps[i] = new RefMap*[wf->get_neq()];
898  for (unsigned int j = 0; j < wf->get_neq(); j++)
899  {
900  refmaps[i][j] = new RefMap();
901  refmaps[i][j]->set_quad_2d(&g_quad_2d_std);
902  }
903  }
904 
905  // U_ext functions
906  if(!is_linear)
907  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
908  {
909  if(coeff_vec != NULL)
910  {
911  u_ext[i] = new Solution<Scalar>*[wf->get_neq()];
912  if(i == 0)
913  {
914  int first_dof = 0;
915  for (int j = 0; j < wf->get_neq(); j++)
916  {
917  u_ext[i][j] = new Solution<Scalar>(spaces[j]->get_mesh());
918  Solution<Scalar>::vector_to_solution(coeff_vec, spaces[j], u_ext[i][j], !RungeKutta, first_dof);
919  first_dof += spaces[j]->get_num_dofs();
920  }
921  }
922  else
923  {
924  for (int j = 0; j < wf->get_neq(); j++)
925  {
926  u_ext[i][j] = new Solution<Scalar>(spaces[j]->get_mesh());
927  u_ext[i][j]->copy(u_ext[0][j]);
928  }
929  }
930  }
931  else
932  {
933  u_ext[i] = new Solution<Scalar>*[wf->get_neq()];
934  for (int j = 0; j < wf->get_neq(); j++)
935  {
936  if(spaces[j]->get_shapeset()->get_num_components() == 1)
937  u_ext[i][j] = new ZeroSolution<Scalar>(spaces[j]->get_mesh());
938  else
939  u_ext[i][j] = new ZeroSolutionVector<Scalar>(spaces[j]->get_mesh());
940  }
941  }
942  }
943 
944  // Assembly lists
945  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
946  {
947  als[i] = new AsmList<Scalar>*[wf->get_neq()];
948  for (unsigned int j = 0; j < wf->get_neq(); j++)
949  als[i][j] = new AsmList<Scalar>();
950  }
951 
952  // Weakforms.
953  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
954  {
955  weakforms[i] = this->wf->clone();
956  weakforms[i]->cloneMembers(this->wf);
957  }
958 
959  assert(cache_element_stored == NULL);
960  cache_element_stored = new bool*[this->spaces.size()];
961  for(unsigned int i = 0; i < this->spaces.size(); i++)
962  {
963  cache_element_stored[i] = new bool[this->spaces[i]->get_mesh()->get_max_element_id()];
964  memset(cache_element_stored[i], 0, sizeof(bool) * this->spaces[i]->get_mesh()->get_max_element_id());
965  }
966  }
967 
968  template<typename Scalar>
969  void DiscreteProblem<Scalar>::deinit_assembling(PrecalcShapeset*** pss , PrecalcShapeset*** spss, RefMap*** refmaps, Solution<Scalar>*** u_ext, AsmList<Scalar>*** als, WeakForm<Scalar>** weakforms)
970  {
971  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
972  {
973  for (unsigned int j = 0; j < wf->get_neq(); j++)
974  delete pss[i][j];
975  delete [] pss[i];
976  }
977  delete [] pss;
978 
979  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
980  {
981  for (unsigned int j = 0; j < wf->get_neq(); j++)
982  delete spss[i][j];
983  delete [] spss[i];
984  }
985  delete [] spss;
986 
987  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
988  {
989  for (unsigned int j = 0; j < wf->get_neq(); j++)
990  delete refmaps[i][j];
991  delete [] refmaps[i];
992  }
993  delete [] refmaps;
994 
995  if(u_ext != NULL)
996  {
997  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
998  {
999  if(u_ext[i] != NULL)
1000  {
1001  for (unsigned int j = 0; j < wf->get_neq(); j++)
1002  delete u_ext[i][j];
1003  delete [] u_ext[i];
1004  }
1005  }
1006  delete [] u_ext;
1007  }
1008 
1009  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
1010  {
1011  for (unsigned int j = 0; j < wf->get_neq(); j++)
1012  delete als[i][j];
1013  delete [] als[i];
1014  }
1015  delete [] als;
1016 
1017  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
1018  {
1019  weakforms[i]->free_ext();
1020  delete weakforms[i];
1021  }
1022  delete [] weakforms;
1023 
1024  for(unsigned int i = 0; i < this->spaces.size(); i++)
1025  delete [] cache_element_stored[i];
1026  delete [] cache_element_stored;
1027  cache_element_stored = NULL;
1028  }
1029 
1030  template<typename Scalar>
1031  void DiscreteProblem<Scalar>::assemble(Scalar* coeff_vec, SparseMatrix<Scalar>* mat, Vector<Scalar>* rhs, bool force_diagonal_blocks, Table* block_weights)
1032  {
1033  // Check.
1034  this->check();
1035  if(this->ndof == 0)
1036  throw Exceptions::Exception("Zero DOFs detected in DiscreteProblem::assemble().");
1037 
1038  // Important, sets the current caughtException to NULL.
1039  this->caughtException = NULL;
1040 
1041  current_mat = mat;
1042  current_rhs = rhs;
1043  current_force_diagonal_blocks = force_diagonal_blocks;
1044  current_block_weights = block_weights;
1045 
1046  // Check that the block scaling table have proper dimension.
1047  if(block_weights != NULL)
1048  if(block_weights->get_size() != wf->get_neq())
1049  throw Exceptions::LengthException(6, block_weights->get_size(), wf->get_neq());
1050 
1051  // Creating matrix sparse structure.
1052  create_sparse_structure();
1053 
1054  // Initial check of meshes and spaces.
1055  for(unsigned int ext_i = 0; ext_i < this->wf->ext.size(); ext_i++)
1056  {
1057  if(!this->wf->ext[ext_i]->isOkay())
1058  throw Hermes::Exceptions::Exception("Ext function %d is not okay in assemble().", ext_i);
1059  }
1060 
1061  // Structures that cloning will be done into.
1062  PrecalcShapeset*** pss = new PrecalcShapeset**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1063  PrecalcShapeset*** spss = new PrecalcShapeset**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1064  RefMap*** refmaps = new RefMap**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1065  Solution<Scalar>*** u_ext = new Solution<Scalar>**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1066  AsmList<Scalar>*** als = new AsmList<Scalar>**[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1067  WeakForm<Scalar>** weakforms = new WeakForm<Scalar>*[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1068 
1069  // Fill these structures.
1070  init_assembling(coeff_vec, pss, spss, refmaps, u_ext, als, weakforms);
1071 
1072  // Vector of meshes.
1073  Hermes::vector<const Mesh*> meshes;
1074  for(unsigned int space_i = 0; space_i < spaces.size(); space_i++)
1075  meshes.push_back(spaces[space_i]->get_mesh());
1076  for(unsigned int ext_i = 0; ext_i < this->wf->ext.size(); ext_i++)
1077  meshes.push_back(this->wf->ext[ext_i]->get_mesh());
1078  for(unsigned int form_i = 0; form_i < this->wf->get_forms().size(); form_i++)
1079  for(unsigned int ext_i = 0; ext_i < this->wf->get_forms()[form_i]->ext.size(); ext_i++)
1080  if(this->wf->get_forms()[form_i]->ext[ext_i] != NULL)
1081  meshes.push_back(this->wf->get_forms()[form_i]->ext[ext_i]->get_mesh());
1082  for(unsigned int space_i = 0; space_i < spaces.size(); space_i++)
1083  meshes.push_back(spaces[space_i]->get_mesh());
1084 
1085  Traverse trav_master(true);
1086  unsigned int num_states = trav_master.get_num_states(meshes);
1087 
1088  trav_master.begin(meshes.size(), &(meshes.front()));
1089 
1090  Traverse* trav = new Traverse[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1091  Hermes::vector<Transformable *>* fns = new Hermes::vector<Transformable *>[Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads)];
1092  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
1093  {
1094  for (unsigned j = 0; j < spaces.size(); j++)
1095  fns[i].push_back(pss[i][j]);
1096  for (unsigned j = 0; j < this->wf->ext.size(); j++)
1097  {
1098  fns[i].push_back(weakforms[i]->ext[j]);
1099  weakforms[i]->ext[j]->set_quad_2d(&g_quad_2d_std);
1100  }
1101  for(unsigned int form_i = 0; form_i < this->wf->get_forms().size(); form_i++)
1102  {
1103  for(unsigned int ext_i = 0; ext_i < this->wf->get_forms()[form_i]->ext.size(); ext_i++)
1104  if(this->wf->get_forms()[form_i]->ext[ext_i] != NULL)
1105  {
1106  fns[i].push_back(weakforms[i]->get_forms()[form_i]->ext[ext_i]);
1107  weakforms[i]->get_forms()[form_i]->ext[ext_i]->set_quad_2d(&g_quad_2d_std);
1108  }
1109  }
1110  for (unsigned j = 0; j < wf->get_neq(); j++)
1111  {
1112  fns[i].push_back(u_ext[i][j]);
1113  u_ext[i][j]->set_quad_2d(&g_quad_2d_std);
1114  }
1115  trav[i].begin(meshes.size(), &(meshes.front()), &(fns[i].front()));
1116  trav[i].stack = trav_master.stack;
1117  }
1118 
1119  int state_i;
1120 
1121  PrecalcShapeset** current_pss;
1122  PrecalcShapeset** current_spss;
1123  RefMap** current_refmaps;
1124  Solution<Scalar>** current_u_ext;
1125  AsmList<Scalar>** current_als;
1126  WeakForm<Scalar>* current_weakform;
1127 
1128 #define CHUNKSIZE 1
1129  int num_threads_used = Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads);
1130 #pragma omp parallel shared(trav_master, mat, rhs ) private(state_i, current_pss, current_spss, current_refmaps, current_u_ext, current_als, current_weakform) num_threads(num_threads_used)
1131  {
1132 #pragma omp for schedule(dynamic, CHUNKSIZE)
1133  for(state_i = 0; state_i < num_states; state_i++)
1134  {
1135  try
1136  {
1137  Traverse::State current_state;
1138 #pragma omp critical (get_next_state)
1139  current_state = trav[omp_get_thread_num()].get_next_state(&trav_master.top, &trav_master.id);
1140 
1141  current_pss = pss[omp_get_thread_num()];
1142  current_spss = spss[omp_get_thread_num()];
1143  current_refmaps = refmaps[omp_get_thread_num()];
1144  current_u_ext = u_ext[omp_get_thread_num()];
1145  current_als = als[omp_get_thread_num()];
1146  current_weakform = weakforms[omp_get_thread_num()];
1147 
1148  // One state is a collection of (virtual) elements sharing
1149  // the same physical location on (possibly) different meshes.
1150  // This is then the same element of the virtual union mesh.
1151  // The proper sub-element mappings to all the functions of
1152  // this stage is supplied by the function Traverse::get_next_state()
1153  // called in the while loop.
1154  assemble_one_state(current_pss, current_spss, current_refmaps, current_u_ext, current_als, &current_state, current_weakform);
1155 
1156  if(DG_matrix_forms_present || DG_vector_forms_present)
1157  assemble_one_DG_state(current_pss, current_spss, current_refmaps, current_u_ext, current_als, &current_state, current_weakform->mfDG, current_weakform->vfDG, trav[omp_get_thread_num()].fn);
1158  }
1159  catch(Hermes::Exceptions::Exception& e)
1160  {
1161  if(this->caughtException == NULL)
1162  this->caughtException = e.clone();
1163  }
1164  catch(std::exception& e)
1165  {
1166  if(this->caughtException == NULL)
1167  this->caughtException = new Hermes::Exceptions::Exception(e.what());
1168  }
1169  }
1170  }
1171 
1172  deinit_assembling(pss, spss, refmaps, u_ext, als, weakforms);
1173 
1174  trav_master.finish();
1175  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
1176  trav[i].finish();
1177 
1178  for(unsigned int i = 0; i < Hermes2DApi.get_integral_param_value(Hermes::Hermes2D::numThreads); i++)
1179  {
1180  fns[i].clear();
1181  }
1182  delete [] fns;
1183  delete [] trav;
1184 
1186  if(current_mat != NULL)
1187  current_mat->finish();
1188  if(current_rhs != NULL)
1189  current_rhs->finish();
1190 
1191  if(DG_matrix_forms_present || DG_vector_forms_present)
1192  {
1193  Element* element_to_set_nonvisited;
1194  for(unsigned int mesh_i = 0; mesh_i < meshes.size(); mesh_i++)
1195  for_all_elements(element_to_set_nonvisited, meshes[mesh_i])
1196  element_to_set_nonvisited->visited = false;
1197  }
1198 
1199  if(this->caughtException != NULL)
1200  throw *(this->caughtException);
1201  }
1202 
1203  template<typename Scalar>
1204  void DiscreteProblem<Scalar>::assemble(Scalar* coeff_vec, Vector<Scalar>* rhs,
1205  bool force_diagonal_blocks, Table* block_weights)
1206  {
1207  assemble(coeff_vec, NULL, rhs, force_diagonal_blocks, block_weights);
1208  }
1209 
1210  template<typename Scalar>
1212  {
1213  }
1214 
1215  template<typename Scalar>
1216  void DiscreteProblem<Scalar>::CacheRecordPerSubIdx::clear()
1217  {
1218  for(unsigned int i = 0; i < this->asmlistCnt; i++)
1219  {
1220  this->fns[i]->free_fn();
1221  delete this->fns[i];
1222  }
1223  delete [] this->fns;
1224 
1225  delete [] this->jacobian_x_weights;
1226  this->geometry->free();
1227  delete this->geometry;
1228 
1229  if(this->fnsSurface != NULL)
1230  {
1231  for(unsigned int edge_i = 0; edge_i < nvert; edge_i++)
1232  {
1233  if(this->fnsSurface[edge_i] == NULL)
1234  continue;
1235  delete [] this->jacobian_x_weightsSurface[edge_i];
1236  this->geometrySurface[edge_i]->free();
1237  delete this->geometrySurface[edge_i];
1238 
1239  for(unsigned int i = 0; i < this->asmlistSurfaceCnt[edge_i]; i++)
1240  {
1241  this->fnsSurface[edge_i][i]->free_fn();
1242  delete this->fnsSurface[edge_i][i];
1243  }
1244  delete [] this->fnsSurface[edge_i];
1245  }
1246 
1247  delete [] this->fnsSurface;
1248  delete [] this->geometrySurface;
1249  delete [] this->jacobian_x_weightsSurface;
1250 
1251  delete [] this->n_quadrature_pointsSurface;
1252  delete [] this->orderSurface;
1253  delete [] this->asmlistSurfaceCnt;
1254 
1255  this->fnsSurface = NULL;
1256  }
1257  }
1258 
1259  template<typename Scalar>
1260  void DiscreteProblem<Scalar>::CacheRecordPerElement::clear()
1261  {
1262  delete [] this->asmlistIdx;
1263  }
1264 
1265  template<typename Scalar>
1266  bool DiscreteProblem<Scalar>::state_needs_recalculation(AsmList<Scalar>** current_als, Traverse::State* current_state)
1267  {
1268  for(unsigned int i = 0; i < this->spaces.size(); i++)
1269  {
1270  if(current_state->e[i] == NULL)
1271  continue;
1272  if(this->spaces[i]->edata[current_state->e[i]->id].changed_in_last_adaptation)
1273  return true;
1274 
1275  // Check of potential new constraints.
1276  if(!this->do_not_use_cache)
1277  {
1278  if(this->cache_records_element[i][current_state->e[i]->id] != NULL)
1279  {
1280  if(this->cache_records_element[i][current_state->e[i]->id]->asmlistCnt != current_als[i]->cnt)
1281  return true;
1282  else
1283  {
1284  for(unsigned int idx_i = 0; idx_i < current_als[i]->cnt; idx_i++)
1285  if(current_als[i]->idx[idx_i] != this->cache_records_element[i][current_state->e[i]->id]->asmlistIdx[idx_i])
1286  return true;
1287  }
1288  }
1289  else
1290  return true;
1291  }
1292  }
1293  return false;
1294  }
1295 
1296  template<typename Scalar>
1297  void DiscreteProblem<Scalar>::calculate_cache_records(PrecalcShapeset** current_pss, PrecalcShapeset** current_spss, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, AsmList<Scalar>** current_als, Traverse::State* current_state,
1298  AsmList<Scalar>** current_alsSurface, WeakForm<Scalar>* current_wf)
1299  {
1300  for(unsigned int space_i = 0; space_i < this->spaces.size(); space_i++)
1301  {
1302  bool new_cache = false;
1303  if(current_state->e[space_i] == NULL)
1304  continue;
1305 
1306  // No sub_idx map for this element.
1307 #pragma omp critical (cache_records_sub_idx_map)
1308  {
1309  if(this->cache_records_sub_idx[space_i][current_state->e[space_i]->id] == NULL)
1310  {
1311  this->cache_records_sub_idx[space_i][current_state->e[space_i]->id] = new std::map<uint64_t, CacheRecordPerSubIdx*>;
1312  new_cache = true;
1313  }
1314  else
1315  {
1316  // If the sub_idx map exists AND contains a record for this sub_idx, we need to delete the record.
1317  typename std::map<uint64_t, CacheRecordPerSubIdx*>::iterator it = this->cache_records_sub_idx[space_i][current_state->e[space_i]->id]->find(current_state->sub_idx[space_i]);
1318  if(it != this->cache_records_sub_idx[space_i][current_state->e[space_i]->id]->end())
1319  (*it).second->clear();
1320  else new_cache = true;
1321  }
1322 
1323  // Insert the new record.
1324  if(new_cache)
1325  this->cache_records_sub_idx[space_i][current_state->e[space_i]->id]->insert(std::pair<uint64_t, CacheRecordPerSubIdx*>(current_state->sub_idx[space_i], new CacheRecordPerSubIdx));
1326  }
1327  }
1328 
1329  // Order calculation.
1330  int order = this->wf->global_integration_order_set ? this->wf->global_integration_order : 0;
1331  if(order == 0)
1332  {
1333  Hermes::vector<MatrixFormVol<Scalar>*> current_mfvol = current_wf->mfvol;
1334  Hermes::vector<VectorFormVol<Scalar>*> current_vfvol = current_wf->vfvol;
1335 
1336  for(int current_mfvol_i = 0; current_mfvol_i < current_mfvol.size(); current_mfvol_i++)
1337  {
1338  if(!form_to_be_assembled(current_mfvol[current_mfvol_i], current_state))
1339  continue;
1340  current_mfvol[current_mfvol_i]->wf = current_wf;
1341  int orderTemp = calc_order_matrix_form(current_mfvol[current_mfvol_i], current_refmaps, current_u_ext, current_state);
1342  if(order < orderTemp)
1343  order = orderTemp;
1344  }
1345 
1346  for(int current_vfvol_i = 0; current_vfvol_i < current_vfvol.size(); current_vfvol_i++)
1347  {
1348  if(!form_to_be_assembled(current_vfvol[current_vfvol_i], current_state))
1349  continue;
1350  current_vfvol[current_vfvol_i]->wf = current_wf;
1351  int orderTemp = calc_order_vector_form(current_vfvol[current_vfvol_i], current_refmaps, current_u_ext, current_state);
1352  if(order < orderTemp)
1353  order = orderTemp;
1354  }
1355 
1356  if(current_state->isBnd)
1357  {
1358  for(int current_mfvol_i = 0; current_mfvol_i < current_mfvol.size(); current_mfvol_i++)
1359  {
1360  if(!form_to_be_assembled(current_mfvol[current_mfvol_i], current_state))
1361  continue;
1362  current_mfvol[current_mfvol_i]->wf = current_wf;
1363  int orderTemp = calc_order_matrix_form(current_mfvol[current_mfvol_i], current_refmaps, current_u_ext, current_state);
1364  if(order < orderTemp)
1365  order = orderTemp;
1366  }
1367  for(int current_vfvol_i = 0; current_vfvol_i < current_vfvol.size(); current_vfvol_i++)
1368  {
1369  if(!form_to_be_assembled(current_vfvol[current_vfvol_i], current_state))
1370  continue;
1371  current_vfvol[current_vfvol_i]->wf = current_wf;
1372  int orderTemp = calc_order_vector_form(current_vfvol[current_vfvol_i], current_refmaps, current_u_ext, current_state);
1373  if(order < orderTemp)
1374  order = orderTemp;
1375  }
1376  }
1377 
1378  // Surface forms.
1379  if(current_state->isBnd && (current_wf->mfsurf.size() > 0 || current_wf->vfsurf.size() > 0))
1380  {
1381  Hermes::vector<MatrixFormSurf<Scalar>*> current_mfsurf = current_wf->mfsurf;
1382  Hermes::vector<VectorFormSurf<Scalar>*> current_vfsurf = current_wf->vfsurf;
1383 
1384  for (current_state->isurf = 0; current_state->isurf < current_state->rep->nvert; current_state->isurf++)
1385  {
1386  if(!current_state->bnd[current_state->isurf])
1387  continue;
1388  for(int current_mfsurf_i = 0; current_mfsurf_i < current_mfsurf.size(); current_mfsurf_i++)
1389  {
1390  if(!form_to_be_assembled(current_mfsurf[current_mfsurf_i], current_state))
1391  continue;
1392  current_mfsurf[current_mfsurf_i]->wf = current_wf;
1393  int orderTemp = calc_order_matrix_form(current_mfsurf[current_mfsurf_i], current_refmaps, current_u_ext, current_state);
1394  if(order < orderTemp)
1395  order = orderTemp;
1396  }
1397 
1398  for(int current_vfsurf_i = 0; current_vfsurf_i < current_vfsurf.size(); current_vfsurf_i++)
1399  {
1400  if(!form_to_be_assembled(current_vfsurf[current_vfsurf_i], current_state))
1401  continue;
1402  current_vfsurf[current_vfsurf_i]->wf = current_wf;
1403  int orderTemp = calc_order_vector_form(current_vfsurf[current_vfsurf_i], current_refmaps, current_u_ext, current_state);
1404  if(order < orderTemp)
1405  order = orderTemp;
1406  }
1407  }
1408  }
1409  }
1410 
1411  // Order is known, we know how many integration points we need and we can proceed.
1412  // cache record sub idx : new precalcshapeset(spaces->shapeset, element, sub_idx, order, asmlist->idx)
1413  for(unsigned int i = 0; i < this->spaces.size(); i++)
1414  {
1415  if(current_state->e[i] == NULL)
1416  continue;
1417  // Is this thread supposed to compute the perElement record?
1418  bool computingThread = false;
1419  if(!this->cache_element_stored[i][current_state->e[i]->id])
1420  {
1421 #pragma omp critical (cache_for_element_preparation)
1422  if(!this->cache_element_stored[i][current_state->e[i]->id])
1423  {
1424  computingThread = true;
1425  this->cache_element_stored[i][current_state->e[i]->id] = true;
1426  }
1427  }
1428 
1429  if(computingThread)
1430  {
1431  if(this->cache_records_element[i][current_state->e[i]->id] == NULL)
1432  this->cache_records_element[i][current_state->e[i]->id] = new CacheRecordPerElement;
1433  else
1434  this->cache_records_element[i][current_state->e[i]->id]->clear();
1435 
1436  this->cache_records_element[i][current_state->e[i]->id]->asmlistCnt = current_als[i]->cnt;
1437  this->cache_records_element[i][current_state->e[i]->id]->asmlistIdx = new int[current_als[i]->cnt];
1438  for(unsigned int asmlist_i = 0; asmlist_i < current_als[i]->cnt; asmlist_i++)
1439  this->cache_records_element[i][current_state->e[i]->id]->asmlistIdx[asmlist_i] = current_als[i]->idx[asmlist_i];
1440  this->cache_element_stored[i][current_state->e[i]->id] = true;
1441  }
1442 
1443  CacheRecordPerSubIdx* newRecord;
1444 
1445 #pragma omp critical (cache_records_sub_idx_map)
1446  newRecord = (*this->cache_records_sub_idx[i][current_state->e[i]->id]->find(current_state->sub_idx[i])).second;
1447 
1448  newRecord->nvert = current_state->rep->nvert;
1449  newRecord->order = order;
1450 
1451  // Set active element to all test functions.
1452  current_spss[i]->set_active_element(current_state->e[i]);
1453  current_spss[i]->set_master_transform();
1454 
1455  // Set active element to reference mappings.
1456  current_refmaps[i]->force_transform(current_pss[i]->get_transform(), current_pss[i]->get_ctm());
1457  newRecord->fns = new Func<double>*[current_als[i]->cnt];
1458  newRecord->asmlistCnt = current_als[i]->cnt;
1459  for (unsigned int j = 0; j < current_als[i]->cnt; j++)
1460  {
1461  current_spss[i]->set_active_shape(current_als[i]->idx[j]);
1462  newRecord->fns[j] = init_fn(current_spss[i], current_refmaps[i], newRecord->order);
1463  }
1464 
1465  newRecord->n_quadrature_points = init_geometry_points(current_refmaps[i], newRecord->order, newRecord->geometry, newRecord->jacobian_x_weights);
1466 
1467  if(current_state->isBnd && (current_wf->mfsurf.size() > 0 || current_wf->vfsurf.size() > 0))
1468  {
1469  newRecord->fnsSurface = new Func<double>**[newRecord->nvert];
1470  memset(newRecord->fnsSurface, NULL, sizeof(Func<double>**) * newRecord->nvert);
1471 
1472  newRecord->geometrySurface = new Geom<double>*[newRecord->nvert];
1473  newRecord->jacobian_x_weightsSurface = new double*[newRecord->nvert];
1474 
1475  newRecord->n_quadrature_pointsSurface = new int[newRecord->nvert];
1476  newRecord->orderSurface = new int[newRecord->nvert];
1477  newRecord->asmlistSurfaceCnt = new int[newRecord->nvert];
1478 
1479  int order = newRecord->order;
1480  for (current_state->isurf = 0; current_state->isurf < newRecord->nvert; current_state->isurf++)
1481  {
1482  if(!current_state->bnd[current_state->isurf])
1483  continue;
1484  newRecord->n_quadrature_pointsSurface[current_state->isurf] = init_surface_geometry_points(current_refmaps[i], order, current_state, newRecord->geometrySurface[current_state->isurf], newRecord->jacobian_x_weightsSurface[current_state->isurf]);
1485  newRecord->orderSurface[current_state->isurf] = order;
1486  order = newRecord->order;
1487  }
1488 
1489  for (current_state->isurf = 0; current_state->isurf < newRecord->nvert; current_state->isurf++)
1490  {
1491  if(!current_state->bnd[current_state->isurf])
1492  continue;
1493  newRecord->asmlistSurfaceCnt[current_state->isurf] = current_alsSurface[i][current_state->isurf].cnt;
1494 
1495  newRecord->fnsSurface[current_state->isurf] = new Func<double>*[current_alsSurface[i][current_state->isurf].cnt];
1496  for (unsigned int j = 0; j < current_alsSurface[i][current_state->isurf].cnt; j++)
1497  {
1498  current_spss[i]->set_active_shape(current_alsSurface[i][current_state->isurf].idx[j]);
1499  newRecord->fnsSurface[current_state->isurf][j] = init_fn(current_spss[i], current_refmaps[i], newRecord->orderSurface[current_state->isurf]);
1500  }
1501  }
1502  }
1503  }
1504  }
1505 
1506  template<typename Scalar>
1507  void DiscreteProblem<Scalar>::assemble_one_state(PrecalcShapeset** current_pss, PrecalcShapeset** current_spss, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, AsmList<Scalar>** current_als,
1508  Traverse::State* current_state, WeakForm<Scalar>* current_wf)
1509  {
1510  // Representing space.
1511  int rep_space_i = -1;
1512 
1513  // Get necessary (volumetric) assembly lists.
1514  for(unsigned int space_i = 0; space_i < this->spaces.size(); space_i++)
1515  if(current_state->e[space_i] != NULL)
1516  {
1517  rep_space_i = space_i;
1518  current_refmaps[space_i]->set_active_element(current_state->e[space_i]);
1519  spaces[space_i]->get_element_assembly_list(current_state->e[space_i], current_als[space_i], spaces_first_dofs[space_i]);
1520  }
1521 
1522  if(rep_space_i == -1)
1523  return;
1524 
1525  // Do we have to recalculate the data for this state even if the cache contains the data?
1526  bool changedInLastAdaptation = this->do_not_use_cache ? true : this->state_needs_recalculation(current_als, current_state);
1527 
1528  // Assembly lists for surface forms.
1529  AsmList<Scalar>** current_alsSurface = NULL;
1530  if(current_state->isBnd && (current_wf->mfsurf.size() > 0 || current_wf->vfsurf.size() > 0 || current_wf->mfDG.size() > 0 || current_wf->vfDG.size() > 0))
1531  {
1532  current_alsSurface = new AsmList<Scalar>*[this->spaces.size()];
1533  for(unsigned int space_i = 0; space_i < this->spaces.size(); space_i++)
1534  {
1535  if(current_state->e[space_i] == NULL)
1536  return;
1537  current_alsSurface[space_i] = new AsmList<Scalar>[current_state->rep->nvert];
1538  for (current_state->isurf = 0; current_state->isurf < current_state->rep->nvert; current_state->isurf++)
1539  if(current_state->bnd[current_state->isurf])
1540  spaces[space_i]->get_boundary_assembly_list(current_state->e[space_i], current_state->isurf, &current_alsSurface[space_i][current_state->isurf], spaces_first_dofs[space_i]);
1541  }
1542  }
1543 
1544  // Calculate the cache entries.
1545  CacheRecordPerSubIdx** cacheRecordPerSubIdx = new CacheRecordPerSubIdx*[this->spaces.size()];
1546 
1547  if(changedInLastAdaptation)
1548  this->calculate_cache_records(current_pss, current_spss, current_refmaps, current_u_ext, current_als, current_state, current_alsSurface, current_wf);
1549 
1550  // Store the cache entries.
1551  for(int temp_i = 0; temp_i < this->spaces.size(); temp_i++)
1552  {
1553  if(current_state->e[temp_i] == NULL)
1554  continue;
1555 #pragma omp critical (cache_records_sub_idx_map)
1556  {
1557  typename std::map<uint64_t, CacheRecordPerSubIdx*>::iterator it = this->cache_records_sub_idx[temp_i][current_state->e[temp_i]->id]->find(current_state->sub_idx[temp_i]);
1558  if(it != this->cache_records_sub_idx[temp_i][current_state->e[temp_i]->id]->end())
1559  cacheRecordPerSubIdx[temp_i] = it->second;
1560  }
1561  }
1562 
1563  // Ext functions.
1564  // - order
1565  int order = cacheRecordPerSubIdx[rep_space_i]->order;
1566 
1567  // - u_ext
1568  Func<Scalar>** u_ext = NULL;
1569  int prevNewtonSize = this->wf->get_neq();
1570 
1571  if(!this->is_linear)
1572  {
1573  u_ext = new Func<Scalar>*[prevNewtonSize];
1574  if(current_u_ext != NULL)
1575  for(int u_ext_i = 0; u_ext_i < prevNewtonSize; u_ext_i++)
1576  if(current_u_ext[u_ext_i] != NULL)
1577  u_ext[u_ext_i] = init_fn(current_u_ext[u_ext_i], order);
1578  else
1579  u_ext[u_ext_i] = NULL;
1580  else
1581  for(int u_ext_i = 0; u_ext_i < prevNewtonSize; u_ext_i++)
1582  u_ext[u_ext_i] = NULL;
1583  }
1584 
1585  // - ext
1586  int current_extCount = this->wf->ext.size();
1587  Func<Scalar>** ext = NULL;
1588  if(current_extCount > 0)
1589  {
1590  ext = new Func<Scalar>*[current_extCount];
1591  for(int ext_i = 0; ext_i < current_extCount; ext_i++)
1592  if(current_wf->ext[ext_i] != NULL)
1593  ext[ext_i] = init_fn(current_wf->ext[ext_i], order);
1594  else
1595  ext[ext_i] = NULL;
1596  }
1597 
1598  if(RungeKutta)
1599  for(int ext_i = 0; ext_i < this->RK_original_spaces_count; ext_i++)
1600  u_ext[ext_i]->add(ext[current_extCount - this->RK_original_spaces_count + ext_i]);
1601 
1602  if(current_mat != NULL)
1603  {
1604  for(int current_mfvol_i = 0; current_mfvol_i < wf->mfvol.size(); current_mfvol_i++)
1605  {
1606  MatrixFormVol<Scalar>* mfv = current_wf->mfvol[current_mfvol_i];
1607 
1608  if(!form_to_be_assembled(mfv, current_state))
1609  continue;
1610 
1611  int form_i = mfv->i;
1612  int form_j = mfv->j;
1613  CacheRecordPerSubIdx* CacheRecordPerSubIdxI = cacheRecordPerSubIdx[form_i];
1614  CacheRecordPerSubIdx* CacheRecordPerSubIdxJ = cacheRecordPerSubIdx[form_j];
1615 
1616  assemble_matrix_form(mfv,
1617  CacheRecordPerSubIdxI->order,
1618  CacheRecordPerSubIdxJ->fns,
1619  CacheRecordPerSubIdxI->fns,
1620  ext,
1621  u_ext,
1622  current_als[form_i],
1623  current_als[form_j],
1624  current_state,
1625  CacheRecordPerSubIdxI->n_quadrature_points,
1626  CacheRecordPerSubIdxI->geometry,
1627  CacheRecordPerSubIdxI->jacobian_x_weights);
1628  }
1629  }
1630  if(current_rhs != NULL)
1631  {
1632  for(int current_vfvol_i = 0; current_vfvol_i < wf->vfvol.size(); current_vfvol_i++)
1633  {
1634  VectorFormVol<Scalar>* vfv = current_wf->vfvol[current_vfvol_i];
1635 
1636  if(!form_to_be_assembled(vfv, current_state))
1637  continue;
1638 
1639  int form_i = vfv->i;
1640  CacheRecordPerSubIdx* CacheRecordPerSubIdxI = cacheRecordPerSubIdx[form_i];
1641 
1642  assemble_vector_form(vfv,
1643  CacheRecordPerSubIdxI->order,
1644  CacheRecordPerSubIdxI->fns,
1645  ext,
1646  u_ext,
1647  current_als[form_i],
1648  current_state,
1649  CacheRecordPerSubIdxI->n_quadrature_points,
1650  CacheRecordPerSubIdxI->geometry,
1651  CacheRecordPerSubIdxI->jacobian_x_weights);
1652  }
1653  }
1654 
1655  // Cleanup - u_ext
1656  if(current_u_ext != NULL)
1657  {
1658  for(int u_ext_i = 0; u_ext_i < prevNewtonSize; u_ext_i++)
1659  if(current_u_ext[u_ext_i] != NULL && current_state->e[u_ext_i] != NULL)
1660  {
1661  u_ext[u_ext_i]->free_fn();
1662  delete u_ext[u_ext_i];
1663  }
1664  delete [] u_ext;
1665  }
1666 
1667  // Cleanup - ext
1668  for(int ext_i = 0; ext_i < current_extCount; ext_i++)
1669  if(current_wf->ext[ext_i] != NULL)
1670  {
1671  ext[ext_i]->free_fn();
1672  delete ext[ext_i];
1673  }
1674  delete [] ext;
1675 
1676  // Assemble surface integrals now: loop through surfaces of the element.
1677  if(current_state->isBnd && (current_wf->mfsurf.size() > 0 || current_wf->vfsurf.size() > 0))
1678  {
1679  for (current_state->isurf = 0; current_state->isurf < current_state->rep->nvert; current_state->isurf++)
1680  {
1681  if(!current_state->bnd[current_state->isurf])
1682  continue;
1683 
1684  // Ext functions.
1685  // - order
1686  int orderSurf = cacheRecordPerSubIdx[rep_space_i]->orderSurface[current_state->isurf];
1687  // - u_ext
1688  int prevNewtonSize = this->wf->get_neq();
1689  Func<Scalar>** u_extSurf = NULL;
1690  if(!this->is_linear)
1691  {
1692  u_extSurf = new Func<Scalar>*[prevNewtonSize];
1693  if(current_u_ext != NULL)
1694  for(int u_ext_surf_i = 0; u_ext_surf_i < prevNewtonSize; u_ext_surf_i++)
1695  if(current_u_ext[u_ext_surf_i] != NULL)
1696  u_extSurf[u_ext_surf_i] = current_state->e[u_ext_surf_i] == NULL ? NULL : init_fn(current_u_ext[u_ext_surf_i], orderSurf);
1697  else
1698  u_extSurf[u_ext_surf_i] = NULL;
1699  else
1700  for(int u_ext_surf_i = 0; u_ext_surf_i < prevNewtonSize; u_ext_surf_i++)
1701  u_extSurf[u_ext_surf_i] = NULL;
1702  }
1703  // - ext
1704  int current_extCount = this->wf->ext.size();
1705  Func<Scalar>** extSurf = new Func<Scalar>*[current_extCount];
1706  for(int ext_surf_i = 0; ext_surf_i < current_extCount; ext_surf_i++)
1707  if(current_wf->ext[ext_surf_i] != NULL)
1708  extSurf[ext_surf_i] = current_state->e[ext_surf_i] == NULL ? NULL : init_fn(current_wf->ext[ext_surf_i], orderSurf);
1709  else
1710  extSurf[ext_surf_i] = NULL;
1711 
1712  if(RungeKutta)
1713  for(int ext_surf_i = 0; ext_surf_i < this->RK_original_spaces_count; ext_surf_i++)
1714  u_extSurf[ext_surf_i]->add(extSurf[current_extCount - this->RK_original_spaces_count + ext_surf_i]);
1715 
1716  if(current_mat != NULL)
1717  {
1718  for(int current_mfsurf_i = 0; current_mfsurf_i < wf->mfsurf.size(); current_mfsurf_i++)
1719  {
1720  if(!form_to_be_assembled(current_wf->mfsurf[current_mfsurf_i], current_state))
1721  continue;
1722 
1723  int form_i = current_wf->mfsurf[current_mfsurf_i]->i;
1724  int form_j = current_wf->mfsurf[current_mfsurf_i]->j;
1725  CacheRecordPerSubIdx* CacheRecordPerSubIdxI = cacheRecordPerSubIdx[form_i];
1726  CacheRecordPerSubIdx* CacheRecordPerSubIdxJ = cacheRecordPerSubIdx[form_j];
1727 
1728  assemble_matrix_form(current_wf->mfsurf[current_mfsurf_i],
1729  CacheRecordPerSubIdxI->orderSurface[current_state->isurf],
1730  CacheRecordPerSubIdxJ->fnsSurface[current_state->isurf],
1731  CacheRecordPerSubIdxI->fnsSurface[current_state->isurf],
1732  extSurf,
1733  u_extSurf,
1734  &current_alsSurface[form_i][current_state->isurf],
1735  &current_alsSurface[form_j][current_state->isurf],
1736  current_state,
1737  CacheRecordPerSubIdxI->n_quadrature_pointsSurface[current_state->isurf],
1738  CacheRecordPerSubIdxI->geometrySurface[current_state->isurf],
1739  CacheRecordPerSubIdxI->jacobian_x_weightsSurface[current_state->isurf]);
1740  }
1741  }
1742 
1743  if(current_rhs != NULL)
1744  {
1745  for(int current_vfsurf_i = 0; current_vfsurf_i < wf->vfsurf.size(); current_vfsurf_i++)
1746  {
1747  if(!form_to_be_assembled(current_wf->vfsurf[current_vfsurf_i], current_state))
1748  continue;
1749 
1750  int form_i = current_wf->vfsurf[current_vfsurf_i]->i;
1751  CacheRecordPerSubIdx* CacheRecordPerSubIdxI = cacheRecordPerSubIdx[form_i];
1752 
1753  assemble_vector_form(current_wf->vfsurf[current_vfsurf_i],
1754  CacheRecordPerSubIdxI->orderSurface[current_state->isurf],
1755  CacheRecordPerSubIdxI->fnsSurface[current_state->isurf],
1756  extSurf,
1757  u_extSurf,
1758  &current_alsSurface[form_i][current_state->isurf],
1759  current_state,
1760  CacheRecordPerSubIdxI->n_quadrature_pointsSurface[current_state->isurf],
1761  CacheRecordPerSubIdxI->geometrySurface[current_state->isurf],
1762  CacheRecordPerSubIdxI->jacobian_x_weightsSurface[current_state->isurf]);
1763  }
1764  }
1765 
1766  if(current_u_ext != NULL)
1767  {
1768  for(int u_ext_surf_i = 0; u_ext_surf_i < prevNewtonSize; u_ext_surf_i++)
1769  if(current_u_ext[u_ext_surf_i] != NULL)
1770  {
1771  u_extSurf[u_ext_surf_i]->free_fn();
1772  delete u_extSurf[u_ext_surf_i];
1773  }
1774  delete [] u_extSurf;
1775  }
1776 
1777  for(int ext_surf_i = 0; ext_surf_i < current_extCount; ext_surf_i++)
1778  if(current_wf->ext[ext_surf_i] != NULL)
1779  {
1780  extSurf[ext_surf_i]->free_fn();
1781  delete extSurf[ext_surf_i];
1782  }
1783  delete [] extSurf;
1784  }
1785 
1786  for(unsigned int i = 0; i < this->spaces.size(); i++)
1787  if(current_state->e[i] != NULL)
1788  delete [] current_alsSurface[i];
1789  }
1790 
1791  if(cacheRecordPerSubIdx != NULL)
1792  delete [] cacheRecordPerSubIdx;
1793  if(current_alsSurface != NULL)
1794  delete [] current_alsSurface;
1795  }
1796 
1797  template<typename Scalar>
1798  int DiscreteProblem<Scalar>::calc_order_matrix_form(MatrixForm<Scalar> *form, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, Traverse::State* current_state)
1799  {
1800  int order;
1801 
1802  if(is_fvm)
1803  order = current_refmaps[form->i]->get_inv_ref_order();
1804  else
1805  {
1806  // order of solutions from the previous Newton iteration etc..
1807  Func<Hermes::Ord>** u_ext_ord = new Func<Hermes::Ord>*[RungeKutta ? RK_original_spaces_count : this->wf->get_neq() - form->u_ext_offset];
1808  Func<Hermes::Ord>** ext_ord = NULL;
1809  int ext_size = std::max(form->ext.size(), form->wf->ext.size());
1810  if(ext_size > 0)
1811  ext_ord = new Func<Hermes::Ord>*[ext_size];
1812  init_ext_orders(form, u_ext_ord, ext_ord, current_u_ext, current_state);
1813 
1814  // Order of shape functions.
1815  int max_order_j = this->spaces[form->j]->get_element_order(current_state->e[form->j]->id);
1816  int max_order_i = this->spaces[form->i]->get_element_order(current_state->e[form->i]->id);
1817  if(H2D_GET_V_ORDER(max_order_i) > H2D_GET_H_ORDER(max_order_i))
1818  max_order_i = H2D_GET_V_ORDER(max_order_i);
1819  else
1820  max_order_i = H2D_GET_H_ORDER(max_order_i);
1821  if(H2D_GET_V_ORDER(max_order_j) > H2D_GET_H_ORDER(max_order_j))
1822  max_order_j = H2D_GET_V_ORDER(max_order_j);
1823  else
1824  max_order_j = H2D_GET_H_ORDER(max_order_j);
1825 
1826  for (unsigned int k = 0; k < current_state->rep->nvert; k++)
1827  {
1828  int eo = this->spaces[form->i]->get_edge_order(current_state->e[form->i], k);
1829  if(eo > max_order_i)
1830  max_order_i = eo;
1831  eo = this->spaces[form->j]->get_edge_order(current_state->e[form->j], k);
1832  if(eo > max_order_j)
1833  max_order_j = eo;
1834  }
1835 
1836  Func<Hermes::Ord>* ou = init_fn_ord(max_order_j + (spaces[form->j]->get_shapeset()->get_num_components() > 1 ? 1 : 0));
1837  Func<Hermes::Ord>* ov = init_fn_ord(max_order_i + (spaces[form->i]->get_shapeset()->get_num_components() > 1 ? 1 : 0));
1838 
1839  // Total order of the vector form.
1840  Hermes::Ord o = form->ord(1, &fake_wt, u_ext_ord, ou, ov, &geom_ord, ext_ord);
1841 
1842  adjust_order_to_refmaps(form, order, &o, current_refmaps);
1843 
1844  // Cleanup.
1845  deinit_ext_orders(form, u_ext_ord, ext_ord);
1846  delete [] u_ext_ord;
1847  ou->free_ord();
1848  delete ou;
1849  ov->free_ord();
1850  delete ov;
1851  }
1852  return order;
1853  }
1854 
1855  template<typename Scalar>
1857  AsmList<Scalar>* current_als_i, AsmList<Scalar>* current_als_j, Traverse::State* current_state, int n_quadrature_points, Geom<double>* geometry, double* jacobian_x_weights)
1858  {
1859  bool surface_form = (dynamic_cast<MatrixFormVol<Scalar>*>(form) == NULL);
1860 
1861  double block_scaling_coefficient = this->block_scaling_coeff(form);
1862 
1863  bool tra = (form->i != form->j) && (form->sym != 0);
1864  bool sym = (form->i == form->j) && (form->sym == 1);
1865 
1866  // Assemble the local stiffness matrix for the form form.
1867  Scalar **local_stiffness_matrix = new_matrix<Scalar>(std::max(current_als_i->cnt, current_als_j->cnt));
1868 
1869  Func<Scalar>** local_ext = ext;
1870  // If the user supplied custom ext functions for this form.
1871  if(form->ext.size() > 0)
1872  {
1873  int local_ext_count = form->ext.size();
1874  local_ext = new Func<Scalar>*[local_ext_count];
1875  for(int ext_i = 0; ext_i < local_ext_count; ext_i++)
1876  if(form->ext[ext_i] != NULL)
1877  local_ext[ext_i] = current_state->e[ext_i] == NULL ? NULL : init_fn(form->ext[ext_i], order);
1878  else
1879  local_ext[ext_i] = NULL;
1880  }
1881 
1882  // Account for the previous time level solution previously inserted at the back of ext.
1883  if(RungeKutta)
1884  u_ext += form->u_ext_offset;
1885 
1886  // Actual form-specific calculation.
1887  for (unsigned int i = 0; i < current_als_i->cnt; i++)
1888  {
1889  if(current_als_i->dof[i] < 0)
1890  continue;
1891 
1892  if((!tra || surface_form) && current_als_i->dof[i] < 0)
1893  continue;
1894  if(std::abs(current_als_i->coef[i]) < 1e-12)
1895  continue;
1896  if(!sym)
1897  {
1898  for (unsigned int j = 0; j < current_als_j->cnt; j++)
1899  {
1900  if(current_als_j->dof[j] >= 0)
1901  {
1902  // Is this necessary, i.e. is there a coefficient smaller than 1e-12?
1903  if(std::abs(current_als_j->coef[j]) < 1e-12)
1904  continue;
1905 
1906  Func<double>* u = base_fns[j];
1907  Func<double>* v = test_fns[i];
1908 
1909  if(surface_form)
1910  local_stiffness_matrix[i][j] = 0.5 * block_scaling_coefficient * form->value(n_quadrature_points, jacobian_x_weights, u_ext, u, v, geometry, local_ext) * form->scaling_factor * current_als_j->coef[j] * current_als_i->coef[i];
1911  else
1912  local_stiffness_matrix[i][j] = block_scaling_coefficient * form->value(n_quadrature_points, jacobian_x_weights, u_ext, u, v, geometry, local_ext) * form->scaling_factor * current_als_j->coef[j] * current_als_i->coef[i];
1913  }
1914  }
1915  }
1916  // Symmetric block.
1917  else
1918  {
1919  for (unsigned int j = 0; j < current_als_j->cnt; j++)
1920  {
1921  if(j < i && current_als_j->dof[j] >= 0)
1922  continue;
1923  if(current_als_j->dof[j] >= 0)
1924  {
1925  // Is this necessary, i.e. is there a coefficient smaller than 1e-12?
1926  if(std::abs(current_als_j->coef[j]) < 1e-12)
1927  continue;
1928 
1929  Func<double>* u = base_fns[j];
1930  Func<double>* v = test_fns[i];
1931 
1932  Scalar val = block_scaling_coefficient * form->value(n_quadrature_points, jacobian_x_weights, u_ext, u, v, geometry, local_ext) * form->scaling_factor * current_als_j->coef[j] * current_als_i->coef[i];
1933 
1934  local_stiffness_matrix[i][j] = local_stiffness_matrix[j][i] = val;
1935  }
1936  }
1937  }
1938  }
1939 
1940  // Insert the local stiffness matrix into the global one.
1941 
1942  current_mat->add(current_als_i->cnt, current_als_j->cnt, local_stiffness_matrix, current_als_i->dof, current_als_j->dof);
1943 
1944  // Insert also the off-diagonal (anti-)symmetric block, if required.
1945  if(tra)
1946  {
1947  if(form->sym < 0)
1948  chsgn(local_stiffness_matrix, current_als_i->cnt, current_als_j->cnt);
1949  transpose(local_stiffness_matrix, current_als_i->cnt, current_als_j->cnt);
1950 
1951  current_mat->add(current_als_j->cnt, current_als_i->cnt, local_stiffness_matrix, current_als_j->dof, current_als_i->dof);
1952  }
1953 
1954  if(form->ext.size() > 0)
1955  {
1956  for(int ext_i = 0; ext_i < form->ext.size(); ext_i++)
1957  if(form->ext[ext_i] != NULL)
1958  {
1959  local_ext[ext_i]->free_fn();
1960  delete local_ext[ext_i];
1961  }
1962  delete [] local_ext;
1963  }
1964 
1965  if(RungeKutta)
1966  u_ext -= form->u_ext_offset;
1967 
1968  // Cleanup.
1969  delete [] local_stiffness_matrix;
1970  }
1971 
1972  template<typename Scalar>
1973  int DiscreteProblem<Scalar>::calc_order_vector_form(VectorForm<Scalar> *form, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, Traverse::State* current_state)
1974  {
1975  int order;
1976 
1977  if(is_fvm)
1978  order = current_refmaps[form->i]->get_inv_ref_order();
1979  else
1980  {
1981  // order of solutions from the previous Newton iteration etc..
1982  Func<Hermes::Ord>** u_ext_ord = new Func<Hermes::Ord>*[RungeKutta ? RK_original_spaces_count : this->wf->get_neq() - form->u_ext_offset];
1983  Func<Hermes::Ord>** ext_ord = NULL;
1984  int ext_size = std::max(form->ext.size(), form->wf->ext.size());
1985  if(ext_size > 0)
1986  ext_ord = new Func<Hermes::Ord>*[ext_size];
1987  init_ext_orders(form, u_ext_ord, ext_ord, current_u_ext, current_state);
1988 
1989  // Order of shape functions.
1990  int max_order_i = this->spaces[form->i]->get_element_order(current_state->e[form->i]->id);
1991  if(H2D_GET_V_ORDER(max_order_i) > H2D_GET_H_ORDER(max_order_i))
1992  max_order_i = H2D_GET_V_ORDER(max_order_i);
1993  else
1994  max_order_i = H2D_GET_H_ORDER(max_order_i);
1995 
1996  for (unsigned int k = 0; k < current_state->rep->nvert; k++)
1997  {
1998  int eo = this->spaces[form->i]->get_edge_order(current_state->e[form->i], k);
1999  if(eo > max_order_i)
2000  max_order_i = eo;
2001  }
2002  Func<Hermes::Ord>* ov = init_fn_ord(max_order_i + (spaces[form->i]->get_shapeset()->get_num_components() > 1 ? 1 : 0));
2003 
2004  // Total order of the vector form.
2005  Hermes::Ord o = form->ord(1, &fake_wt, u_ext_ord, ov, &geom_ord, ext_ord);
2006 
2007  adjust_order_to_refmaps(form, order, &o, current_refmaps);
2008 
2009  // Cleanup.
2010  deinit_ext_orders(form, u_ext_ord, ext_ord);
2011  delete [] u_ext_ord;
2012  ov->free_ord();
2013  delete ov;
2014  }
2015  return order;
2016  }
2017 
2018  template<typename Scalar>
2020  AsmList<Scalar>* current_als_i, Traverse::State* current_state, int n_quadrature_points, Geom<double>* geometry, double* jacobian_x_weights)
2021  {
2022  bool surface_form = (dynamic_cast<VectorFormVol<Scalar>*>(form) == NULL);
2023 
2024  Func<Scalar>** local_ext = ext;
2025 
2026  // If the user supplied custom ext functions for this form.
2027  if(form->ext.size() > 0)
2028  {
2029  int local_ext_count = form->ext.size();
2030  local_ext = new Func<Scalar>*[local_ext_count];
2031  for(int ext_i = 0; ext_i < local_ext_count; ext_i++)
2032  if(form->ext[ext_i] != NULL)
2033  local_ext[ext_i] = init_fn(form->ext[ext_i], order);
2034  else
2035  local_ext[ext_i] = NULL;
2036  }
2037 
2038  // Account for the previous time level solution previously inserted at the back of ext.
2039  if(RungeKutta)
2040  u_ext += form->u_ext_offset;
2041 
2042  // Actual form-specific calculation.
2043  for (unsigned int i = 0; i < current_als_i->cnt; i++)
2044  {
2045  if(current_als_i->dof[i] < 0)
2046  continue;
2047 
2048  // Is this necessary, i.e. is there a coefficient smaller than 1e-12?
2049  if(std::abs(current_als_i->coef[i]) < 1e-12)
2050  continue;
2051 
2052  Func<double>* v = test_fns[i];
2053 
2054  Scalar val;
2055  if(surface_form)
2056  val = 0.5 * form->value(n_quadrature_points, jacobian_x_weights, u_ext, v, geometry, local_ext) * form->scaling_factor * current_als_i->coef[i];
2057  else
2058  val = form->value(n_quadrature_points, jacobian_x_weights, u_ext, v, geometry, local_ext) * form->scaling_factor * current_als_i->coef[i];
2059 
2060  current_rhs->add(current_als_i->dof[i], val);
2061  }
2062 
2063  if(form->ext.size() > 0)
2064  {
2065  for(int ext_i = 0; ext_i < form->ext.size(); ext_i++)
2066  if(form->ext[ext_i] != NULL)
2067  {
2068  local_ext[ext_i]->free_fn();
2069  delete local_ext[ext_i];
2070  }
2071  delete [] local_ext;
2072  }
2073 
2074  if(RungeKutta)
2075  u_ext -= form->u_ext_offset;
2076  }
2077 
2078  template<typename Scalar>
2079  int DiscreteProblem<Scalar>::init_geometry_points(RefMap* reference_mapping, int order, Geom<double>*& geometry, double*& jacobian_x_weights)
2080  {
2081  double3* pt = reference_mapping->get_quad_2d()->get_points(order, reference_mapping->get_active_element()->get_mode());
2082  int np = reference_mapping->get_quad_2d()->get_num_points(order, reference_mapping->get_active_element()->get_mode());
2083 
2084  // Init geometry and jacobian*weights.
2085  geometry = init_geom_vol(reference_mapping, order);
2086  double* jac = NULL;
2087  if(!reference_mapping->is_jacobian_const())
2088  jac = reference_mapping->get_jacobian(order);
2089  jacobian_x_weights = new double[np];
2090  for(int i = 0; i < np; i++)
2091  {
2092  if(reference_mapping->is_jacobian_const())
2093  jacobian_x_weights[i] = pt[i][2] * reference_mapping->get_const_jacobian();
2094  else
2095  jacobian_x_weights[i] = pt[i][2] * jac[i];
2096  }
2097  return np;
2098  }
2099 
2100  template<typename Scalar>
2101  int DiscreteProblem<Scalar>::init_surface_geometry_points(RefMap* reference_mapping, int& order, Traverse::State* current_state, Geom<double>*& geometry, double*& jacobian_x_weights)
2102  {
2103  int eo = reference_mapping->get_quad_2d()->get_edge_points(current_state->isurf, order, reference_mapping->get_active_element()->get_mode());
2104  double3* pt = reference_mapping->get_quad_2d()->get_points(eo, reference_mapping->get_active_element()->get_mode());
2105  int np = reference_mapping->get_quad_2d()->get_num_points(eo, reference_mapping->get_active_element()->get_mode());
2106 
2107  // Init geometry and jacobian*weights.
2108  double3* tan;
2109  geometry = init_geom_surf(reference_mapping, current_state->isurf, current_state->rep->en[current_state->isurf]->marker, eo, tan);
2110  jacobian_x_weights = new double[np];
2111  for(int i = 0; i < np; i++)
2112  jacobian_x_weights[i] = pt[i][2] * tan[i][2];
2113  order = eo;
2114  return np;
2115  }
2116 
2117  template<typename Scalar>
2118  void DiscreteProblem<Scalar>::init_ext_orders(Form<Scalar> *form, Func<Hermes::Ord>** oi, Func<Hermes::Ord>** oext, Solution<Scalar>** current_u_ext, Traverse::State* current_state)
2119  {
2120  unsigned int prev_size = RungeKutta ? RK_original_spaces_count : this->wf->get_neq() - form->u_ext_offset;
2121  bool surface_form = (current_state->isurf > -1);
2122 
2123  if(current_u_ext != NULL)
2124  for(int i = 0; i < prev_size; i++)
2125  if(current_u_ext[i + form->u_ext_offset] != NULL)
2126  if(surface_form)
2127  oi[i] = init_fn_ord(current_u_ext[i + form->u_ext_offset]->get_edge_fn_order(current_state->isurf) + (current_u_ext[i + form->u_ext_offset]->get_num_components() > 1 ? 1 : 0));
2128  else
2129  oi[i] = init_fn_ord(current_u_ext[i + form->u_ext_offset]->get_fn_order() + (current_u_ext[i + form->u_ext_offset]->get_num_components() > 1 ? 1 : 0));
2130  else
2131  oi[i] = init_fn_ord(0);
2132  else
2133  for(int i = 0; i < prev_size; i++)
2134  oi[i] = init_fn_ord(0);
2135 
2136  if(form->ext.size() > 0)
2137  {
2138  for (int i = 0; i < form->ext.size(); i++)
2139  if(surface_form)
2140  oext[i] = init_fn_ord(form->ext[i]->get_edge_fn_order(current_state->isurf) + (form->ext[i]->get_num_components() > 1 ? 1 : 0));
2141  else
2142  oext[i] = init_fn_ord(form->ext[i]->get_fn_order() + (form->ext[i]->get_num_components() > 1 ? 1 : 0));
2143  }
2144 
2145  else
2146  {
2147  for (int i = 0; i < form->wf->ext.size(); i++)
2148  if(surface_form)
2149  oext[i] = init_fn_ord(form->wf->ext[i]->get_edge_fn_order(current_state->isurf) + (form->wf->ext[i]->get_num_components() > 1 ? 1 : 0));
2150  else
2151  oext[i] = init_fn_ord(form->wf->ext[i]->get_fn_order() + (form->wf->ext[i]->get_num_components() > 1 ? 1 : 0));
2152  }
2153  }
2154 
2155  template<typename Scalar>
2157  {
2158  unsigned int prev_size = RungeKutta ? RK_original_spaces_count : this->wf->get_neq() - form->u_ext_offset;
2159  for(int i = 0; i < prev_size; i++)
2160  {
2161  oi[i]->free_ord();
2162  delete oi[i];
2163  }
2164 
2165  if(oext != NULL)
2166  {
2167  if(form->ext.size() > 0)
2168  for (int i = 0; i < form->ext.size(); i++)
2169  {
2170  oext[i]->free_ord();
2171  delete oext[i];
2172  }
2173  else
2174  for (int i = 0; i < form->wf->ext.size(); i++)
2175  {
2176  oext[i]->free_ord();
2177  delete oext[i];
2178  }
2179 
2180  delete [] oext;
2181  }
2182  }
2183 
2184  template<typename Scalar>
2185  void DiscreteProblem<Scalar>::adjust_order_to_refmaps(Form<Scalar> *form, int& order, Hermes::Ord* o, RefMap** current_refmaps)
2186  {
2187  // Increase due to reference map.
2188  int coordinate = (dynamic_cast<VectorForm<Scalar>*>(form) == NULL) ? (static_cast<MatrixForm<Scalar>*>(form)->i) : (static_cast<VectorForm<Scalar>*>(form)->i);
2189  order = current_refmaps[coordinate]->get_inv_ref_order();
2190  order += o->get_order();
2191  limit_order(order, current_refmaps[coordinate]->get_active_element()->get_mode());
2192  }
2193 
2194  template<typename Scalar>
2195  void DiscreteProblem<Scalar>::assemble_one_DG_state(PrecalcShapeset** current_pss, PrecalcShapeset** current_spss, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, AsmList<Scalar>** current_als,
2196  Traverse::State* current_state, Hermes::vector<MatrixFormDG<Scalar>*> current_mfDG, Hermes::vector<VectorFormDG<Scalar>*> current_vfDG, Transformable** fn)
2197  {
2198  // Determine the minimum mesh seq.
2199  unsigned int min_dg_mesh_seq = 0;
2200  for(unsigned int i = 0; i < spaces.size(); i++)
2201  if(spaces[i]->get_mesh()->get_seq() < min_dg_mesh_seq || i == 0)
2202  min_dg_mesh_seq = spaces[i]->get_mesh()->get_seq();
2203 
2204  // Create neighbor psss, refmaps.
2205  std::map<unsigned int, PrecalcShapeset *> npss;
2206  std::map<unsigned int, PrecalcShapeset *> nspss;
2207  std::map<unsigned int, RefMap *> nrefmap;
2208 
2209  // Initialize neighbor precalc shapesets and refmaps.
2210  // This is only needed when there are matrix DG forms present.
2212  {
2213  for (unsigned int i = 0; i < spaces.size(); i++)
2214  {
2215  PrecalcShapeset* new_ps = new PrecalcShapeset(spaces[i]->shapeset);
2216  npss.insert(std::pair<unsigned int, PrecalcShapeset*>(i, new_ps));
2217  PrecalcShapeset* new_pss = new PrecalcShapeset(npss[i]);
2218  nspss.insert(std::pair<unsigned int, PrecalcShapeset*>(i, new_pss));
2219  RefMap* new_rm = new RefMap();
2220  new_rm->set_quad_2d(&g_quad_2d_std);
2221  nrefmap.insert(std::pair<unsigned int, RefMap*>(i, new_rm));
2222  }
2223  }
2224 
2225  bool** processed = new bool*[current_state->rep->nvert];
2226  LightArray<NeighborSearch<Scalar>*>** neighbor_searches = new LightArray<NeighborSearch<Scalar>*>*[current_state->rep->nvert];
2227  (5);
2228  unsigned int* num_neighbors = new unsigned int[current_state->rep->nvert];
2229 
2230  bool intra_edge_passed_DG[H2D_MAX_NUMBER_VERTICES];
2231  for(int a = 0; a < H2D_MAX_NUMBER_VERTICES; a++)
2232  intra_edge_passed_DG[a] = false;
2233 
2234 #pragma omp critical (DG)
2235  {
2236  for(unsigned int i = 0; i < current_state->num; i++)
2237  current_state->e[i]->visited = true;
2238 
2239  for(current_state->isurf = 0; current_state->isurf < current_state->rep->nvert; current_state->isurf++)
2240  {
2241  if(current_state->rep->en[current_state->isurf]->marker == 0)
2242  {
2243  neighbor_searches[current_state->isurf] = new LightArray<NeighborSearch<Scalar>*>(5);
2244 
2245  if(!init_neighbors((*neighbor_searches[current_state->isurf]), current_state, min_dg_mesh_seq))
2246  {
2247  intra_edge_passed_DG[current_state->isurf] = true;
2248  continue;
2249  }
2250  // Create a multimesh tree;
2251  NeighborNode* root = new NeighborNode(NULL, 0);
2252  build_multimesh_tree(root, (*neighbor_searches[current_state->isurf]));
2253 
2254 #ifdef DEBUG_DG_ASSEMBLING
2255 #pragma omp critical (debug_DG)
2256  {
2257  int id = 0;
2258  bool pass = true;
2259  if(DEBUG_DG_ASSEMBLING_ELEMENT != -1)
2260  {
2261  for(unsigned int i = 0; i < (*neighbor_searches[current_state->isurf]).get_size(); i++)
2262  if((*neighbor_searches[current_state->isurf]).present(i))
2263  if((*neighbor_searches[current_state->isurf]).get(i)->central_el->id == DEBUG_DG_ASSEMBLING_ELEMENT)
2264  pass = false;
2265  }
2266  else
2267  pass = false;
2268 
2269  if(!pass)
2270  if(DEBUG_DG_ASSEMBLING_ISURF != -1)
2271  if(current_state->isurf != DEBUG_DG_ASSEMBLING_ISURF)
2272  pass = true;
2273 
2274  if(!pass)
2275  {
2276  for(unsigned int i = 0; i < (*neighbor_searches[current_state->isurf]).get_size(); i++)
2277  {
2278  if((*neighbor_searches[current_state->isurf]).present(i))
2279  {
2280  NeighborSearch<Scalar>* ns = (*neighbor_searches[current_state->isurf]).get(i);
2281  std::cout << (std::string)"The " << ++id << (std::string)"-th Neighbor search:: " << (std::string)"Central element: " << ns->central_el->id << (std::string)", Isurf: " << current_state->isurf << (std::string)", Original sub_idx: " << ns->original_central_el_transform << std::endl;
2282  for(int j = 0; j < ns->n_neighbors; j++)
2283  {
2284  std::cout << '\t' << (std::string)"The " << j << (std::string)"-th neighbor element: " << ns->neighbors[j]->id << std::endl;
2285  if(ns->central_transformations.present(j))
2286  {
2287  std::cout << '\t' << (std::string)"Central transformations: " << std::endl;
2288  for(int k = 0; k < ns->central_transformations.get(j)->num_levels; k++)
2289  std::cout << '\t' << '\t' << ns->central_transformations.get(j)->transf[k] << std::endl;
2290  }
2291  if(ns->neighbor_transformations.present(j))
2292  {
2293  std::cout << '\t' << (std::string)"Neighbor transformations: " << std::endl;
2294  for(int k = 0; k < ns->neighbor_transformations.get(j)->num_levels; k++)
2295  std::cout << '\t' << '\t' << ns->neighbor_transformations.get(j)->transf[k] << std::endl;
2296  }
2297  }
2298  }
2299  }
2300  }
2301  }
2302 #endif
2303 
2304  // Update all NeighborSearches according to the multimesh tree.
2305  // After this, all NeighborSearches in neighbor_searches should have the same count
2306  // of neighbors and proper set of transformations
2307  // for the central and the neighbor element(s) alike.
2308  // Also check that every NeighborSearch has the same number of neighbor elements.
2309  num_neighbors[current_state->isurf] = 0;
2310  for(unsigned int i = 0; i < (*neighbor_searches[current_state->isurf]).get_size(); i++)
2311  {
2312  if((*neighbor_searches[current_state->isurf]).present(i))
2313  {
2314  NeighborSearch<Scalar>* ns = (*neighbor_searches[current_state->isurf]).get(i);
2315  update_neighbor_search(ns, root);
2316  if(num_neighbors[current_state->isurf] == 0)
2317  num_neighbors[current_state->isurf] = ns->n_neighbors;
2318  if(ns->n_neighbors != num_neighbors[current_state->isurf])
2319  throw Hermes::Exceptions::Exception("Num_neighbors of different NeighborSearches not matching in DiscreteProblem<Scalar>::assemble_surface_integrals().");
2320  }
2321  }
2322 
2323  // Delete the multimesh tree;
2324  delete root;
2325 
2326  processed[current_state->isurf] = new bool[num_neighbors[current_state->isurf]];
2327 
2328  for(unsigned int neighbor_i = 0; neighbor_i < num_neighbors[current_state->isurf]; neighbor_i++)
2329  {
2330  // If the active segment has already been processed (when the neighbor element was assembled), it is skipped.
2331  // We test all neighbor searches, because in the case of intra-element edge, the neighboring (the same as central) element
2332  // will be marked as visited, even though the edge was not calculated.
2333  processed[current_state->isurf][neighbor_i] = true;
2334  for(unsigned int i = 0; i < (*neighbor_searches[current_state->isurf]).get_size(); i++)
2335  {
2336  if((*neighbor_searches[current_state->isurf]).present(i))
2337  {
2338  if(!(*neighbor_searches[current_state->isurf]).get(i)->neighbors.at(neighbor_i)->visited)
2339  {
2340  processed[current_state->isurf][neighbor_i] = false;
2341  break;
2342  }
2343  }
2344  }
2345  }
2346  }
2347  }
2348  }
2349 
2350  for(current_state->isurf = 0; current_state->isurf < current_state->rep->nvert; current_state->isurf++)
2351  {
2352  if(intra_edge_passed_DG[current_state->isurf])
2353  continue;
2354  if(current_state->rep->en[current_state->isurf]->marker != 0)
2355  continue;
2356 
2357  for(unsigned int neighbor_i = 0; neighbor_i < num_neighbors[current_state->isurf]; neighbor_i++)
2358  {
2359  if(!DG_vector_forms_present && processed[current_state->isurf][neighbor_i])
2360  continue;
2361 
2362  assemble_DG_one_neighbor(processed[current_state->isurf][neighbor_i], neighbor_i, current_pss, current_spss, current_refmaps, current_u_ext, current_als,
2363  current_state, current_mfDG, current_vfDG, fn,
2364  npss, nspss, nrefmap, (*neighbor_searches[current_state->isurf]), min_dg_mesh_seq);
2365  }
2366 
2367  // Delete the neighbor_searches array.
2368  for(unsigned int i = 0; i < (*neighbor_searches[current_state->isurf]).get_size(); i++)
2369  if((*neighbor_searches[current_state->isurf]).present(i))
2370  delete (*neighbor_searches[current_state->isurf]).get(i);
2371  delete neighbor_searches[current_state->isurf];
2372  delete [] processed[current_state->isurf];
2373  }
2374 
2375  delete [] processed;
2376  delete [] neighbor_searches;
2377  delete [] num_neighbors;
2378 
2379  // Deinitialize neighbor pss's, refmaps.
2381  {
2382  for(std::map<unsigned int, PrecalcShapeset *>::iterator it = nspss.begin(); it != nspss.end(); it++)
2383  delete it->second;
2384  for(std::map<unsigned int, PrecalcShapeset *>::iterator it = npss.begin(); it != npss.end(); it++)
2385  delete it->second;
2386  for(std::map<unsigned int, RefMap *>::iterator it = nrefmap.begin(); it != nrefmap.end(); it++)
2387  delete it->second;
2388  }
2389  }
2390 
2391  template<typename Scalar>
2392  void DiscreteProblem<Scalar>::assemble_DG_one_neighbor(bool edge_processed, unsigned int neighbor_i,
2393  PrecalcShapeset** current_pss, PrecalcShapeset** current_spss, RefMap** current_refmaps, Solution<Scalar>** current_u_ext, AsmList<Scalar>** current_als,
2394  Traverse::State* current_state, Hermes::vector<MatrixFormDG<Scalar>*> current_mfDG, Hermes::vector<VectorFormDG<Scalar>*> current_vfDG, Transformable** fn,
2395  std::map<unsigned int, PrecalcShapeset *> npss, std::map<unsigned int, PrecalcShapeset *> nspss, std::map<unsigned int, RefMap *> nrefmap,
2396  LightArray<NeighborSearch<Scalar>*>& neighbor_searches, unsigned int min_dg_mesh_seq)
2397  {
2398  // Set the active segment in all NeighborSearches
2399  for(unsigned int i = 0; i < neighbor_searches.get_size(); i++)
2400  {
2401  if(neighbor_searches.present(i))
2402  {
2403  NeighborSearch<Scalar>* ns = neighbor_searches.get(i);
2404  ns->active_segment = neighbor_i;
2405  ns->neighb_el = ns->neighbors[neighbor_i];
2406  ns->neighbor_edge = ns->neighbor_edges[neighbor_i];
2407  }
2408  }
2409 
2410  // Push all the necessary transformations to all functions of this stage.
2411  // The important thing is that the transformations to the current subelement are already there.
2412  for(unsigned int fns_i = 0; fns_i < current_state->num; fns_i++)
2413  {
2414  const Mesh * mesh_i;
2415  if(dynamic_cast<PrecalcShapeset*>(fn[fns_i]) != NULL)
2416  mesh_i = spaces[fns_i]->get_mesh();
2417  else
2418  mesh_i = (dynamic_cast<MeshFunction<Scalar>*>(fn[fns_i]))->get_mesh();
2419  NeighborSearch<Scalar>* ns = neighbor_searches.get(mesh_i->get_seq() - min_dg_mesh_seq);
2420  if(ns->central_transformations.present(neighbor_i))
2421  ns->central_transformations.get(neighbor_i)->apply_on(fn[fns_i]);
2422  }
2423 
2424  // For neighbor psss.
2425  if(current_mat != NULL && DG_matrix_forms_present && !edge_processed)
2426  {
2427  for(unsigned int idx_i = 0; idx_i < spaces.size(); idx_i++)
2428  {
2429  NeighborSearch<Scalar>* ns = neighbor_searches.get(spaces[idx_i]->get_mesh()->get_seq() - min_dg_mesh_seq);
2430  npss[idx_i]->set_active_element((*ns->get_neighbors())[neighbor_i]);
2431  if(ns->neighbor_transformations.present(neighbor_i))
2432  ns->neighbor_transformations.get(neighbor_i)->apply_on(npss[idx_i]);
2433  }
2434  }
2435 
2436  // Also push the transformations to the slave psss and refmaps.
2437  for (unsigned int i = 0; i < spaces.size(); i++)
2438  {
2439  current_spss[i]->set_master_transform();
2440  current_refmaps[i]->force_transform(current_pss[i]->get_transform(), current_pss[i]->get_ctm());
2441 
2442  // Neighbor.
2443  if(current_mat != NULL && DG_matrix_forms_present && !edge_processed)
2444  {
2445  nspss[i]->set_active_element(npss[i]->get_active_element());
2446  nspss[i]->set_master_transform();
2447  nrefmap[i]->set_active_element(npss[i]->get_active_element());
2448  nrefmap[i]->force_transform(npss[i]->get_transform(), npss[i]->get_ctm());
2449  }
2450  }
2451 
2452  /***/
2453  // The computation takes place here.
2454  if(current_mat != NULL && DG_matrix_forms_present && !edge_processed)
2455  {
2456  for(int current_mfsurf_i = 0; current_mfsurf_i < wf->mfDG.size(); current_mfsurf_i++)
2457  {
2458  if(!form_to_be_assembled((MatrixForm<Scalar>*)current_mfDG[current_mfsurf_i], current_state))
2459  continue;
2460 
2461  int order = 20;
2462  int order_base = 20;
2463 
2464  MatrixFormDG<Scalar>* mfs = current_mfDG[current_mfsurf_i];
2465 
2466  int m = mfs->i;
2467  int n = mfs->j;
2468 
2469  // Create the extended shapeset on the union of the central element and its current neighbor.
2470  typename NeighborSearch<Scalar>::ExtendedShapeset* ext_asmlist_u = neighbor_searches.get(spaces[n]->get_mesh()->get_seq() - min_dg_mesh_seq)->create_extended_asmlist(spaces[n], current_als[n]);
2471  typename NeighborSearch<Scalar>::ExtendedShapeset* ext_asmlist_v = neighbor_searches.get(spaces[m]->get_mesh()->get_seq() - min_dg_mesh_seq)->create_extended_asmlist(spaces[m], current_als[m]);
2472 
2473  NeighborSearch<Scalar>* nbs_u = neighbor_searches.get(spaces[n]->get_mesh()->get_seq() - min_dg_mesh_seq);
2474  NeighborSearch<Scalar>* nbs_v = neighbor_searches.get(spaces[m]->get_mesh()->get_seq() - min_dg_mesh_seq);
2475 
2476  nbs_u->set_quad_order(order);
2477  nbs_v->set_quad_order(order);
2478 
2479  // Init geometry.
2480  int n_quadrature_points;
2481  Geom<double>* geometry = NULL;
2482  double* jacobian_x_weights = NULL;
2483  n_quadrature_points = init_surface_geometry_points(current_refmaps[mfs->i], order_base, current_state, geometry, jacobian_x_weights);
2484 
2485  Geom<double>* e = new InterfaceGeom<double>(geometry, nbs_u->neighb_el->marker,
2486  nbs_u->neighb_el->id, nbs_u->neighb_el->get_diameter());
2487 
2488  // Values of the previous Newton iteration, shape functions and external functions in quadrature points.
2489  int prev_size = wf->get_neq() - mfs->u_ext_offset;
2490  Func<Scalar>** prev = new Func<Scalar>*[prev_size];
2491  if(current_u_ext != NULL)
2492  {
2493  for (int i = 0; i < prev_size; i++)
2494  {
2495  if(current_u_ext[i + mfs->u_ext_offset] != NULL)
2496  {
2497  neighbor_searches.get(current_u_ext[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->set_quad_order(order);
2498  prev[i] = neighbor_searches.get(current_u_ext[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->init_ext_fn(current_u_ext[i]);
2499  }
2500  else
2501  prev[i] = NULL;
2502  }
2503  }
2504  else
2505  for (int i = 0; i < prev_size; i++)
2506  prev[i] = NULL;
2507 
2508  Func<Scalar>** ext = init_ext_fns(mfs->wf->ext, neighbor_searches, order, min_dg_mesh_seq);
2509 
2510  // Precalc shapeset and refmaps used for the evaluation.
2511  PrecalcShapeset* fu;
2512  PrecalcShapeset* fv;
2513  RefMap* ru;
2514  RefMap* rv;
2515  bool support_neigh_u, support_neigh_v;
2516 
2517  Scalar **local_stiffness_matrix = new_matrix<Scalar>(std::max(ext_asmlist_u->cnt, ext_asmlist_v->cnt));
2518  for (int i = 0; i < ext_asmlist_v->cnt; i++)
2519  {
2520  if(ext_asmlist_v->dof[i] < 0)
2521  continue;
2522  // Choose the correct shapeset for the test function.
2523  if(!ext_asmlist_v->has_support_on_neighbor(i))
2524  {
2525  current_spss[m]->set_active_shape(ext_asmlist_v->central_al->idx[i]);
2526  fv = current_spss[m];
2527  rv = current_refmaps[m];
2528  support_neigh_v = false;
2529  }
2530  else
2531  {
2532  nspss[m]->set_active_shape(ext_asmlist_v->neighbor_al->idx[i - ext_asmlist_v->central_al->cnt]);
2533  fv = nspss[m];
2534  rv = nrefmap[m];
2535  support_neigh_v = true;
2536  }
2537  for (int j = 0; j < ext_asmlist_u->cnt; j++)
2538  {
2539  // Choose the correct shapeset for the solution function.
2540  if(!ext_asmlist_u->has_support_on_neighbor(j))
2541  {
2542  current_pss[n]->set_active_shape(ext_asmlist_u->central_al->idx[j]);
2543  fu = current_pss[n];
2544  ru = current_refmaps[n];
2545  support_neigh_u = false;
2546  }
2547  else
2548  {
2549  npss[n]->set_active_shape(ext_asmlist_u->neighbor_al->idx[j - ext_asmlist_u->central_al->cnt]);
2550  fu = npss[n];
2551  ru = nrefmap[n];
2552  support_neigh_u = true;
2553  }
2554 
2555  if(ext_asmlist_u->dof[j] >= 0)
2556  {
2557  // Values of the previous Newton iteration, shape functions and external functions in quadrature points.
2558  DiscontinuousFunc<double>* u = new DiscontinuousFunc<double>(init_fn(fu, ru, nbs_u->get_quad_eo(support_neigh_u)),
2559  support_neigh_u, nbs_u->neighbor_edge.orientation);
2560  DiscontinuousFunc<double>* v = new DiscontinuousFunc<double>(init_fn(fv, rv, nbs_v->get_quad_eo(support_neigh_v)),
2561  support_neigh_v, nbs_v->neighbor_edge.orientation);
2562 
2563  Scalar res = mfs->value(n_quadrature_points, jacobian_x_weights, prev, u, v, e, ext) * mfs->scaling_factor;
2564 
2565  u->free_fn();
2566  delete u;
2567  v->free_fn();
2568  delete v;
2569 
2570  Scalar val = block_scaling_coeff(mfs) * 0.5 * res * (support_neigh_u ? ext_asmlist_u->neighbor_al->coef[j - ext_asmlist_u->central_al->cnt]: ext_asmlist_u->central_al->coef[j])
2571  * (support_neigh_v ? ext_asmlist_v->neighbor_al->coef[i - ext_asmlist_v->central_al->cnt]: ext_asmlist_v->central_al->coef[i]);
2572  local_stiffness_matrix[i][j] = val;
2573  }
2574  }
2575  }
2576 
2577 
2578  current_mat->add(ext_asmlist_v->cnt, ext_asmlist_u->cnt, local_stiffness_matrix, ext_asmlist_v->dof, ext_asmlist_u->dof);
2579 
2580  delete [] local_stiffness_matrix;
2581 
2582  // Clean up.
2583  for (int i = 0; i < prev_size; i++)
2584  {
2585  if(prev[i] != NULL)
2586  {
2587  prev[i]->free_fn();
2588  delete prev[i];
2589  }
2590  }
2591 
2592  delete [] prev;
2593 
2594  if(ext != NULL)
2595  {
2596  for(unsigned int i = 0; i < mfs->wf->ext.size(); i++)
2597  {
2598  ext[i]->free_fn();
2599  delete ext[i];
2600  }
2601  delete [] ext;
2602  }
2603 
2604  e->free();
2605  delete e;
2606 
2607  delete [] jacobian_x_weights;
2608 
2609  delete ext_asmlist_u;
2610  delete ext_asmlist_v;
2611  }
2612  }
2613 
2614  if(current_rhs != NULL && DG_vector_forms_present)
2615  {
2616  for (unsigned int ww = 0; ww < wf->vfDG.size(); ww++)
2617  {
2618  int order = 20;
2619  int order_base = 20;
2620 
2621  VectorFormDG<Scalar>* vfs = current_vfDG[ww];
2622  if(vfs->areas[0] != H2D_DG_INNER_EDGE)
2623  continue;
2624  int m = vfs->i;
2625 
2626  if(!form_to_be_assembled((VectorForm<Scalar>*)vfs, current_state))
2627  continue;
2628 
2629  Func<Scalar>** ext = init_ext_fns(vfs->wf->ext, neighbor_searches, order, min_dg_mesh_seq);
2630 
2631  NeighborSearch<Scalar>* nbs_v = (neighbor_searches.get(spaces[m]->get_mesh()->get_seq() - min_dg_mesh_seq));
2632 
2633  // Init geometry and jacobian*weights.
2634  // Init geometry.
2635  int n_quadrature_points;
2636  Geom<double>* geometry = NULL;
2637  double* jacobian_x_weights = NULL;
2638  n_quadrature_points = init_surface_geometry_points(current_refmaps[vfs->i], order_base, current_state, geometry, jacobian_x_weights);
2639 
2640  Geom<double>* e = new InterfaceGeom<double>(geometry, nbs_v->neighb_el->marker,
2641  nbs_v->neighb_el->id, nbs_v->neighb_el->get_diameter());
2642 
2643  // Values of the previous Newton iteration, shape functions and external functions in quadrature points.
2644  int prev_size = wf->get_neq() - vfs->u_ext_offset;
2645  Func<Scalar>** prev = new Func<Scalar>*[prev_size];
2646  if(current_u_ext != NULL)
2647  for (int i = 0; i < prev_size; i++)
2648  if(current_u_ext[i + vfs->u_ext_offset] != NULL)
2649  {
2650  neighbor_searches.get(current_u_ext[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->set_quad_order(order);
2651  prev[i] = neighbor_searches.get(current_u_ext[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->init_ext_fn(current_u_ext[i]);
2652  }
2653  else
2654  prev[i] = NULL;
2655  else
2656  for (int i = 0; i < prev_size; i++)
2657  prev[i] = NULL;
2658 
2659  // Here we use the standard pss, possibly just transformed by NeighborSearch.
2660  for (unsigned int dof_i = 0; dof_i < current_als[m]->cnt; dof_i++)
2661  {
2662  if(current_als[m]->dof[dof_i] < 0)
2663  continue;
2664  current_spss[m]->set_active_shape(current_als[m]->idx[dof_i]);
2665 
2666  Func<double>* v = init_fn(current_spss[m], current_refmaps[m], nbs_v->get_quad_eo());
2667 
2668 
2669  current_rhs->add(current_als[m]->dof[dof_i], 0.5 * vfs->value(n_quadrature_points, jacobian_x_weights, prev, v, e, ext) * vfs->scaling_factor * current_als[m]->coef[dof_i]);
2670 
2671  v->free_fn();
2672  delete v;
2673  }
2674 
2675  // Clean up.
2676  for (int i = 0; i < prev_size; i++)
2677  {
2678  if(prev[i] != NULL)
2679  {
2680  prev[i]->free_fn();
2681  delete prev[i];
2682  }
2683  }
2684 
2685  delete [] prev;
2686 
2687  if(ext != NULL)
2688  {
2689  for(unsigned int i = 0; i < vfs->wf->ext.size(); i++)
2690  ext[i]->free_fn();
2691  delete [] ext;
2692  }
2693 
2694  e->free();
2695  delete e;
2696  delete [] jacobian_x_weights;
2697  }
2698  }
2699 
2700  // This is just cleaning after ourselves.
2701  // Clear the transformations from the RefMaps and all functions.
2702  for(unsigned int fns_i = 0; fns_i < current_state->num; fns_i++)
2703  {
2704  const Mesh * mesh_i;
2705  if(dynamic_cast<PrecalcShapeset*>(fn[fns_i]) != NULL)
2706  mesh_i = spaces[fns_i]->get_mesh();
2707  else
2708  mesh_i = (dynamic_cast<MeshFunction<Scalar>*>(fn[fns_i]))->get_mesh();
2709 
2710  fn[fns_i]->set_transform(neighbor_searches.get(mesh_i->get_seq() - min_dg_mesh_seq)->original_central_el_transform);
2711  }
2712 
2713  // Also clear the transformations from the slave psss and refmaps.
2714  for (unsigned int i = 0; i < spaces.size(); i++)
2715  {
2716  current_spss[i]->set_master_transform();
2717  current_refmaps[i]->force_transform(current_pss[i]->get_transform(), current_pss[i]->get_ctm());
2718  }
2719  }
2720 
2721  template<typename Scalar>
2723  LightArray<NeighborSearch<Scalar>*>& neighbor_searches, int order, unsigned int min_dg_mesh_seq)
2724  {
2725  Func<Scalar>** ext_fns = new Func<Scalar>*[ext.size()];
2726  for(unsigned int j = 0; j < ext.size(); j++)
2727  {
2728  neighbor_searches.get(ext[j]->get_mesh()->get_seq() - min_dg_mesh_seq)->set_quad_order(order);
2729  ext_fns[j] = neighbor_searches.get(ext[j]->get_mesh()->get_seq() - min_dg_mesh_seq)->init_ext_fn(ext[j]);
2730  }
2731 
2732  return ext_fns;
2733  }
2734 
2735  template<typename Scalar>
2737  Traverse::State* current_state, unsigned int min_dg_mesh_seq)
2738  {
2739  // Initialize the NeighborSearches.
2740  for(unsigned int i = 0; i < spaces.size(); i++)
2741  {
2742  if(i > 0 && spaces[i - 1]->get_mesh()->get_seq() == spaces[i]->get_mesh()->get_seq())
2743  continue;
2744  else
2745  if(!neighbor_searches.present(spaces[i]->get_mesh()->get_seq() - min_dg_mesh_seq))
2746  {
2747  NeighborSearch<Scalar>* ns = new NeighborSearch<Scalar>(current_state->e[i], spaces[i]->get_mesh());
2748  ns->original_central_el_transform = current_state->sub_idx[i];
2749  neighbor_searches.add(ns, spaces[i]->get_mesh()->get_seq() - min_dg_mesh_seq);
2750  }
2751  }
2752 
2753  // Calculate respective neighbors.
2754  // Also clear the initial_sub_idxs from the central element transformations
2755  // of NeighborSearches with multiple neighbors.
2756  // If all DG meshes have this edge as intra-edge, pass.
2757  bool DG_intra = false;
2758  for(unsigned int i = 0; i < spaces.size(); i++)
2759  {
2760  if(!(i > 0 && spaces[i]->get_mesh()->get_seq() - min_dg_mesh_seq == spaces[i-1]->get_mesh()->get_seq() - min_dg_mesh_seq))
2761  {
2762  if(neighbor_searches.get(spaces[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->set_active_edge_multimesh(current_state->isurf) && spaces[i]->get_type() == HERMES_L2_SPACE)
2763  DG_intra = true;
2764  neighbor_searches.get(spaces[i]->get_mesh()->get_seq() - min_dg_mesh_seq)->clear_initial_sub_idx();
2765  }
2766  }
2767  return DG_intra;
2768  }
2769 
2770  template<typename Scalar>
2772  LightArray<NeighborSearch<Scalar>*>& neighbor_searches)
2773  {
2774  for(unsigned int i = 0; i < neighbor_searches.get_size(); i++)
2775  if(neighbor_searches.present(i))
2776  {
2777  NeighborSearch<Scalar>* ns = neighbor_searches.get(i);
2778  if(ns->n_neighbors == 1 &&
2779  (ns->central_transformations.get_size() == 0 || ns->central_transformations.get(0)->num_levels == 0))
2780  continue;
2781  for(unsigned int j = 0; j < ns->n_neighbors; j++)
2782  if(ns->central_transformations.present(j))
2783  insert_into_multimesh_tree(root, ns->central_transformations.get(j)->transf, ns->central_transformations.get(j)->num_levels);
2784  }
2785  }
2786 
2787  template<typename Scalar>
2789  unsigned int* transformations,
2790  unsigned int transformation_count)
2791  {
2792  // If we are already in the leaf.
2793  if(transformation_count == 0)
2794  return;
2795  // Both sons are null. We have to add a new Node. Let us do it for the left sone of node.
2796  if(node->get_left_son() == NULL && node->get_right_son() == NULL)
2797  {
2798  node->set_left_son(new NeighborNode(node, transformations[0]));
2799  insert_into_multimesh_tree(node->get_left_son(), transformations + 1, transformation_count - 1);
2800  }
2801  // At least the left son is not null (it is impossible only for the right one to be not null, because
2802  // the left one always gets into the tree first, as seen above).
2803  else
2804  {
2805  // The existing left son is the right one to continue through.
2806  if(node->get_left_son()->get_transformation() == transformations[0])
2807  insert_into_multimesh_tree(node->get_left_son(), transformations + 1, transformation_count - 1);
2808  // The right one also exists, check that it is the right one, or return an error.
2809  else if(node->get_right_son() != NULL)
2810  {
2811  if(node->get_right_son()->get_transformation() == transformations[0])
2812  insert_into_multimesh_tree(node->get_right_son(), transformations + 1, transformation_count - 1);
2813  else
2814  throw Hermes::Exceptions::Exception("More than two possible sons in insert_into_multimesh_tree().");
2815  }
2816  // If the right one does not exist and the left one was not correct, create a right son and continue this way.
2817  else
2818  {
2819  node->set_right_son(new NeighborNode(node, transformations[0]));
2820  insert_into_multimesh_tree(node->get_right_son(), transformations + 1, transformation_count - 1);
2821  }
2822  }
2823  }
2824 
2825  template<typename Scalar>
2826  Hermes::vector<Hermes::vector<unsigned int>*> DiscreteProblem<Scalar>::get_multimesh_neighbors_transformations(NeighborNode* multimesh_tree)
2827  {
2828  // Initialize the vector.
2829  Hermes::vector<Hermes::vector<unsigned int>*> running_transformations;
2830  // Prepare the first neighbor's vector.
2831  running_transformations.push_back(new Hermes::vector<unsigned int>);
2832  // Fill the vector.
2833  traverse_multimesh_tree(multimesh_tree, running_transformations);
2834  return running_transformations;
2835  }
2836 
2837  template<typename Scalar>
2839  Hermes::vector<Hermes::vector<unsigned int>*>& running_transformations)
2840  {
2841  // If we are in the root.
2842  if(node->get_transformation() == 0)
2843  {
2844  if(node->get_left_son() != NULL)
2845  traverse_multimesh_tree(node->get_left_son(), running_transformations);
2846  if(node->get_right_son() != NULL)
2847  traverse_multimesh_tree(node->get_right_son(), running_transformations);
2848  // Delete the vector prepared by the last accessed leaf.
2849  delete running_transformations.back();
2850  running_transformations.pop_back();
2851  return;
2852  }
2853  // If we are in a leaf.
2854  if(node->get_left_son() == NULL && node->get_right_son() == NULL)
2855  {
2856  // Create a vector for the new neighbor.
2857  Hermes::vector<unsigned int>* new_neighbor_transformations = new Hermes::vector<unsigned int>;
2858  // Copy there the whole path except for this leaf.
2859  for(unsigned int i = 0; i < running_transformations.back()->size(); i++)
2860  new_neighbor_transformations->push_back((*running_transformations.back())[i]);
2861  // Insert this leaf into the current running transformation, thus complete it.
2862  running_transformations.back()->push_back(node->get_transformation());
2863  // Make the new_neighbor_transformations the current running transformation.
2864  running_transformations.push_back(new_neighbor_transformations);
2865  return;
2866  }
2867  else
2868  {
2869  running_transformations.back()->push_back(node->get_transformation());
2870  if(node->get_left_son() != NULL)
2871  traverse_multimesh_tree(node->get_left_son(), running_transformations);
2872  if(node->get_right_son() != NULL)
2873  traverse_multimesh_tree(node->get_right_son(), running_transformations);
2874  running_transformations.back()->pop_back();
2875  return;
2876  }
2877  return;
2878  }
2879 
2880  template<typename Scalar>
2882  {
2883  // This has to be done, because we pass ns by reference and the number of neighbors is changing.
2884  unsigned int num_neighbors = ns->get_num_neighbors();
2885 
2886  for(unsigned int i = 0; i < num_neighbors; i++)
2887  {
2888  // Find the node corresponding to this neighbor in the tree.
2889  NeighborNode* node;
2890  if(ns->central_transformations.present(i))
2891  node = find_node(ns->central_transformations.get(i)->transf, ns->central_transformations.get(i)->num_levels, multimesh_tree);
2892  else
2893  node = multimesh_tree;
2894 
2895  // Update the NeighborSearch.
2896  int added = update_ns_subtree(ns, node, i);
2897  i += added;
2898  num_neighbors += added;
2899  }
2900  }
2901 
2902  template<typename Scalar>
2903  NeighborNode* DiscreteProblem<Scalar>::find_node(unsigned int* transformations,
2904  unsigned int transformation_count,
2905  NeighborNode* node)
2906  {
2907  // If there are no transformations left.
2908  if(transformation_count == 0)
2909  return node;
2910  else
2911  {
2912  if(node->get_left_son() != NULL)
2913  {
2914  if(node->get_left_son()->get_transformation() == transformations[0])
2915  return find_node(transformations + 1, transformation_count - 1, node->get_left_son());
2916  }
2917  if(node->get_right_son() != NULL)
2918  {
2919  if(node->get_right_son()->get_transformation() == transformations[0])
2920  return find_node(transformations + 1, transformation_count - 1, node->get_right_son());
2921  }
2922  }
2923  // We always should be able to empty the transformations array.
2924  throw
2925  Hermes::Exceptions::Exception("Transformation of a central element not found in the multimesh tree.");
2926  return NULL;
2927  }
2928 
2929  template<typename Scalar>
2931  NeighborNode* node, unsigned int ith_neighbor)
2932  {
2933  int current_count = ns->get_num_neighbors();
2934 
2935  // No subtree => no work.
2936  // Also check the assertion that if one son is null, then the other too.
2937  if(node->get_left_son() == NULL)
2938  {
2939  if(node->get_right_son() != NULL)
2940  throw Hermes::Exceptions::Exception("Only one son (right) not null in DiscreteProblem<Scalar>::update_ns_subtree.");
2941  return 0;
2942  }
2943 
2944  // Key part.
2945  // Begin with storing the info about the current neighbor.
2946  Element* neighbor = ns->neighbors[ith_neighbor];
2947  typename NeighborSearch<Scalar>::NeighborEdgeInfo edge_info = ns->neighbor_edges[ith_neighbor];
2948 
2949  // Initialize the vector for central transformations->
2950  Hermes::vector<Hermes::vector<unsigned int>*> running_central_transformations;
2951  // Prepare the first new neighbor's vector. Push back the current transformations (in case of GO_DOWN neighborhood).
2952  running_central_transformations.push_back(new Hermes::vector<unsigned int>);
2953  if(ns->central_transformations.present(ith_neighbor))
2954  ns->central_transformations.get(ith_neighbor)->copy_to(running_central_transformations.back());
2955 
2956  // Initialize the vector for neighbor transformations->
2957  Hermes::vector<Hermes::vector<unsigned int>*> running_neighbor_transformations;
2958  // Prepare the first new neighbor's vector. Push back the current transformations (in case of GO_UP/NO_TRF neighborhood).
2959  running_neighbor_transformations.push_back(new Hermes::vector<unsigned int>);
2960  if(ns->neighbor_transformations.present(ith_neighbor))
2961  ns->neighbor_transformations.get(ith_neighbor)->copy_to(running_neighbor_transformations.back());
2962 
2963  // Delete the current neighbor.
2964  ns->delete_neighbor(ith_neighbor);
2965 
2966  // Move down the subtree.
2967  if(node->get_left_son() != NULL)
2968  traverse_multimesh_subtree(node->get_left_son(), running_central_transformations,
2969  running_neighbor_transformations, edge_info, ns->active_edge,
2970  ns->central_el->get_mode());
2971  if(node->get_right_son() != NULL)
2972  traverse_multimesh_subtree(node->get_right_son(), running_central_transformations,
2973  running_neighbor_transformations, edge_info, ns->active_edge,
2974  ns->central_el->get_mode());
2975 
2976  // Delete the last neighbors' info (this is a dead end, caused by the function traverse_multimesh_subtree.
2977  delete running_central_transformations.back();
2978  running_central_transformations.pop_back();
2979  delete running_neighbor_transformations.back();
2980  running_neighbor_transformations.pop_back();
2981 
2982  // Insert new neighbors.
2983  for(unsigned int i = 0; i < running_central_transformations.size(); i++)
2984  {
2985  ns->neighbors.push_back(neighbor);
2986  ns->neighbor_edges.push_back(edge_info);
2987 
2988  if(!ns->central_transformations.present(ns->n_neighbors))
2989  ns->central_transformations.add(new typename NeighborSearch<Scalar>::Transformations, ns->n_neighbors);
2990  if(!ns->neighbor_transformations.present(ns->n_neighbors))
2991  ns->neighbor_transformations.add(new typename NeighborSearch<Scalar>::Transformations, ns->n_neighbors);
2992  ns->central_transformations.get(ns->n_neighbors)->copy_from(*running_central_transformations[i]);
2993  ns->neighbor_transformations.get(ns->n_neighbors)->copy_from(*running_neighbor_transformations[i]);
2994 
2995  ns->n_neighbors++;
2996  }
2997 
2998  for(unsigned int i = 0; i < running_central_transformations.size(); i++)
2999  delete running_central_transformations[i];
3000  for(unsigned int i = 0; i < running_neighbor_transformations.size(); i++)
3001  delete running_neighbor_transformations[i];
3002 
3003  // Return the number of neighbors added/deleted.
3004  return ns->get_num_neighbors() - current_count;
3005  }
3006 
3007  template<typename Scalar>
3009  Hermes::vector<Hermes::vector<unsigned int>*>& running_central_transformations,
3010  Hermes::vector<Hermes::vector<unsigned int>*>& running_neighbor_transformations,
3011  const typename NeighborSearch<Scalar>::NeighborEdgeInfo& edge_info, const int& active_edge, const int& mode)
3012  {
3013  // If we are in a leaf.
3014  if(node->get_left_son() == NULL && node->get_right_son() == NULL)
3015  {
3016  // Create vectors for the new neighbor.
3017  Hermes::vector<unsigned int>* new_neighbor_central_transformations = new Hermes::vector<unsigned int>;
3018  Hermes::vector<unsigned int>* new_neighbor_neighbor_transformations = new Hermes::vector<unsigned int>;
3019 
3020  // Copy there the whole path except for this leaf.
3021  for(unsigned int i = 0; i < running_central_transformations.back()->size(); i++)
3022  new_neighbor_central_transformations->push_back((*running_central_transformations.back())[i]);
3023  for(unsigned int i = 0; i < running_neighbor_transformations.back()->size(); i++)
3024  new_neighbor_neighbor_transformations->push_back((*running_neighbor_transformations.back())[i]);
3025 
3026  // Insert this leaf into the current running central transformation, thus complete it.
3027  running_central_transformations.back()->push_back(node->get_transformation());
3028 
3029  // Make the new_neighbor_central_transformations the current running central transformation.
3030  running_central_transformations.push_back(new_neighbor_central_transformations);
3031 
3032  // Take care of the neighbor transformation.
3033  // Insert appropriate info from this leaf into the current running neighbor transformation, thus complete it.
3034  if(mode == HERMES_MODE_TRIANGLE)
3035  if((active_edge == 0 && node->get_transformation() == 0) ||
3036  (active_edge == 1 && node->get_transformation() == 1) ||
3037  (active_edge == 2 && node->get_transformation() == 2))
3038  running_neighbor_transformations.back()->push_back((!edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % 3));
3039  else
3040  running_neighbor_transformations.back()->push_back((edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % 3));
3041  // Quads.
3042  else
3043  if((active_edge == 0 && (node->get_transformation() == 0 || node->get_transformation() == 6)) ||
3044  (active_edge == 1 && (node->get_transformation() == 1 || node->get_transformation() == 4)) ||
3045  (active_edge == 2 && (node->get_transformation() == 2 || node->get_transformation() == 7)) ||
3046  (active_edge == 3 && (node->get_transformation() == 3 || node->get_transformation() == 5)))
3047  running_neighbor_transformations.back()->push_back((!edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % H2D_MAX_NUMBER_EDGES));
3048  else
3049  running_neighbor_transformations.back()->push_back((edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % H2D_MAX_NUMBER_EDGES));
3050 
3051  // Make the new_neighbor_neighbor_transformations the current running neighbor transformation.
3052  running_neighbor_transformations.push_back(new_neighbor_neighbor_transformations);
3053 
3054  return;
3055  }
3056  else
3057  {
3058  // Insert this leaf into the current running central transformation, thus complete it.
3059  running_central_transformations.back()->push_back(node->get_transformation());
3060 
3061  // Insert appropriate info from this leaf into the current running neighbor transformation, thus complete it.
3062  // Triangles.
3063  if(mode == HERMES_MODE_TRIANGLE)
3064  if((active_edge == 0 && node->get_transformation() == 0) ||
3065  (active_edge == 1 && node->get_transformation() == 1) ||
3066  (active_edge == 2 && node->get_transformation() == 2))
3067  running_neighbor_transformations.back()->push_back((!edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % 3));
3068  else
3069  running_neighbor_transformations.back()->push_back((edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % 3));
3070  // Quads.
3071  else
3072  if((active_edge == 0 && (node->get_transformation() == 0 || node->get_transformation() == 6)) ||
3073  (active_edge == 1 && (node->get_transformation() == 1 || node->get_transformation() == 4)) ||
3074  (active_edge == 2 && (node->get_transformation() == 2 || node->get_transformation() == 7)) ||
3075  (active_edge == 3 && (node->get_transformation() == 3 || node->get_transformation() == 5)))
3076  running_neighbor_transformations.back()->push_back((!edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % H2D_MAX_NUMBER_EDGES));
3077  else
3078  running_neighbor_transformations.back()->push_back((edge_info.orientation ? edge_info.local_num_of_edge : (edge_info.local_num_of_edge + 1) % H2D_MAX_NUMBER_EDGES));
3079 
3080  // Move down.
3081  if(node->get_left_son() != NULL)
3082  traverse_multimesh_subtree(node->get_left_son(), running_central_transformations, running_neighbor_transformations,
3083  edge_info, active_edge, mode);
3084  if(node->get_right_son() != NULL)
3085  traverse_multimesh_subtree(node->get_right_son(), running_central_transformations, running_neighbor_transformations,
3086  edge_info, active_edge, mode);
3087 
3088  // Take this transformation out.
3089  running_central_transformations.back()->pop_back();
3090  running_neighbor_transformations.back()->pop_back();
3091  return;
3092  }
3093  return;
3094  }
3095 
3096  NeighborNode::NeighborNode(NeighborNode* parent, unsigned int transformation) : parent(parent), transformation(transformation)
3097  {
3098  left_son = right_son = NULL;
3099  }
3100  NeighborNode::~NeighborNode()
3101  {
3102  if(left_son != NULL)
3103  {
3104  delete left_son;
3105  left_son = NULL;
3106  }
3107  if(right_son != NULL)
3108  {
3109  delete right_son;
3110  right_son = NULL;
3111  }
3112  }
3113  void NeighborNode::set_left_son(NeighborNode* left_son)
3114  {
3115  this->left_son = left_son;
3116  }
3117  void NeighborNode::set_right_son(NeighborNode* right_son)
3118  {
3119  this->right_son = right_son;
3120  }
3121  void NeighborNode::set_transformation(unsigned int transformation)
3122  {
3123  this->transformation = transformation;
3124  }
3125  NeighborNode* NeighborNode::get_left_son()
3126  {
3127  return left_son;
3128  }
3129  NeighborNode* NeighborNode::get_right_son()
3130  {
3131  return right_son;
3132  }
3133  unsigned int NeighborNode::get_transformation()
3134  {
3135  return this->transformation;
3136  }
3137 
3138  template class HERMES_API DiscreteProblem<double>;
3139  template class HERMES_API DiscreteProblem<std::complex<double> >;
3140  }
3141 }