PRAgMaTIc  master
Mesh.h
Go to the documentation of this file.
1 /* Copyright (C) 2010 Imperial College London and others.
2  *
3  * Please see the AUTHORS file in the main source directory for a
4  * full list of copyright holders.
5  *
6  * Gerard Gorman
7  * Applied Modelling and Computation Group
8  * Department of Earth Science and Engineering
9  * Imperial College London
10  *
11  * g.gorman@imperial.ac.uk
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials provided
21  * with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
30  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
32  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
33  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
34  * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #ifndef MESH_H
39 #define MESH_H
40 
41 #include <algorithm>
42 #include <vector>
43 #include <set>
44 #include <stack>
45 #include <cmath>
46 #include <stdint.h>
47 
48 #ifdef HAVE_BOOST_UNORDERED_MAP_HPP
49 #include <boost/unordered_map.hpp>
50 #endif
51 
52 #ifdef _OPENMP
53 #include <omp.h>
54 #endif
55 
56 #include "mpi_tools.h"
57 
58 #include "PragmaticTypes.h"
59 #include "PragmaticMinis.h"
60 
61 #include "ElementProperty.h"
62 #include "MetricTensor.h"
63 #include "HaloExchange.h"
64 
70 template<typename real_t> class Mesh{
71  public:
72 
81  Mesh(int NNodes, int NElements, const index_t *ENList, const real_t *x, const real_t *y){
82 #ifdef HAVE_MPI
83  _mpi_comm = MPI_COMM_WORLD;
84 #endif
85  _init(NNodes, NElements, ENList, x, y, NULL, NULL, NULL);
86  }
87 
88 #ifdef HAVE_MPI
89 
100  Mesh(int NNodes, int NElements, const index_t *ENList,
101  const real_t *x, const real_t *y, const index_t *lnn2gnn,
102  const index_t *owner_range, MPI_Comm mpi_comm){
103  _mpi_comm = mpi_comm;
104  _init(NNodes, NElements, ENList, x, y, NULL, lnn2gnn, owner_range);
105  }
106 #endif
107 
117  Mesh(int NNodes, int NElements, const index_t *ENList,
118  const real_t *x, const real_t *y, const real_t *z){
119 #ifdef HAVE_MPI
120  _mpi_comm = MPI_COMM_WORLD;
121 #endif
122  _init(NNodes, NElements, ENList, x, y, z, NULL, NULL);
123  }
124 
125 #ifdef HAVE_MPI
126 
138  Mesh(int NNodes, int NElements, const index_t *ENList,
139  const real_t *x, const real_t *y, const real_t *z, const index_t *lnn2gnn,
140  const index_t *owner_range, MPI_Comm mpi_comm){
141  _mpi_comm = mpi_comm;
142  _init(NNodes, NElements, ENList, x, y, z, lnn2gnn, owner_range);
143  }
144 #endif
145 
147  ~Mesh(){
148  delete property;
149  }
150 
152  index_t append_vertex(const real_t *x, const double *m){
153  for(size_t i=0;i<ndims;i++)
154  _coords[ndims*NNodes+i] = x[i];
155 
156  for(size_t i=0;i<msize;i++)
157  metric[msize*NNodes+i] = m[i];
158 
159  ++NNodes;
160 
161  return get_number_nodes()-1;
162  }
163 
165  void erase_vertex(const index_t nid){
166  NNList[nid].clear();
167  NEList[nid].clear();
168  node_owner[nid] = rank;
169  lnn2gnn[nid] = -1;
170  }
171 
174  if(_ENList.size() < (NElements+1)*nloc){
175  _ENList.resize(2*NElements*nloc);
176  boundary.resize(2*NElements*nloc);
177  }
178 
179  for(size_t i=0;i<nloc;i++)
180  _ENList[nloc*NElements+i] = n[i];
181 
182  ++NElements;
183 
184  return get_number_elements()-1;
185  }
186 
188  index_t append_element(const index_t *n, const int *b){
189  if(_ENList.size() < (NElements+1)*nloc){
190  _ENList.resize(2*NElements*nloc);
191  boundary.resize(2*NElements*nloc);
192  }
193 
194  for(size_t i=0;i<nloc;i++){
195  _ENList[nloc*NElements+i] = n[i];
196  boundary[nloc*NElements+i] = b[i];
197  }
198 
199  ++NElements;
200 
201  return get_number_elements()-1;
202  }
203 
205  assert(boundary.size()==0);
206 
207  size_t NNodes = get_number_nodes();
208  size_t NElements = get_number_elements();
209 
210  if(ndims==2){
211  // Initialise the boundary array
212  boundary.resize(NElements*3);
213  std::fill(boundary.begin(), boundary.end(), -2);
214 
215  // Create node-element adjacency list.
216  std::vector< std::set<int> > NEList(NNodes);
217  for(size_t i=0;i<NElements;i++){
218  if(_ENList[i*3]==-1)
219  continue;
220 
221  for(size_t j=0;j<3;j++)
222  NEList[_ENList[i*3+j]].insert(i);
223  }
224 
225  // Check neighbourhood of each element
226  for(size_t i=0;i<NElements;i++){
227  if(_ENList[i*3]==-1)
228  continue;
229 
230  for(int j=0;j<3;j++){
231  int n1 = _ENList[i*3+(j+1)%3];
232  int n2 = _ENList[i*3+(j+2)%3];
233 
234  if(is_owned_node(n1)||is_owned_node(n2)){
235  std::set<int> neighbours;
236  set_intersection(NEList[n1].begin(), NEList[n1].end(),
237  NEList[n2].begin(), NEList[n2].end(),
238  inserter(neighbours, neighbours.begin()));
239 
240  if(neighbours.size()==2){
241  if(*neighbours.begin()==(int)i)
242  boundary[i*3+j] = *neighbours.rbegin();
243  else
244  boundary[i*3+j] = *neighbours.begin();
245  }
246  }else{
247  // This is a halo facet.
248  boundary[i*3+j] = -1;
249  }
250  }
251  }
252  }else{ // ndims==3
253  // Initialise the boundary array
254  boundary.resize(NElements*4);
255  std::fill(boundary.begin(), boundary.end(), -2);
256 
257  // Create node-element adjacency list.
258  std::vector< std::set<int> > NEList(NNodes);
259  for(size_t i=0;i<NElements;i++){
260  if(_ENList[i*4]==-1)
261  continue;
262 
263  for(size_t j=0;j<4;j++)
264  NEList[_ENList[i*4+j]].insert(i);
265  }
266 
267  // Check neighbourhood of each element
268  for(size_t i=0;i<NElements;i++){
269  if(_ENList[i*4]==-1)
270  continue;
271 
272  for(int j=0;j<4;j++){
273  int n1 = _ENList[i*4+(j+1)%4];
274  int n2 = _ENList[i*4+(j+2)%4];
275  int n3 = _ENList[i*4+(j+3)%4];
276 
277  if(is_owned_node(n1)||is_owned_node(n2)||is_owned_node(n3)){
278  std::set<int> edge_neighbours;
279  set_intersection(NEList[n1].begin(), NEList[n1].end(),
280  NEList[n2].begin(), NEList[n2].end(),
281  inserter(edge_neighbours, edge_neighbours.begin()));
282 
283  std::set<int> neighbours;
284  set_intersection(NEList[n3].begin(), NEList[n3].end(),
285  edge_neighbours.begin(), edge_neighbours.end(),
286  inserter(neighbours, neighbours.begin()));
287 
288  if(neighbours.size()==2){
289  if(*neighbours.begin()==(int)i)
290  boundary[i*4+j] = *neighbours.rbegin();
291  else
292  boundary[i*4+j] = *neighbours.begin();
293  }
294  }else{
295  // This is a halo facet.
296  boundary[i*4+j] = -1;
297  }
298  }
299  }
300  }
301  for(std::vector<int>::iterator it=boundary.begin();it!=boundary.end();++it)
302  if(*it==-2)
303  *it = 1;
304  else if(*it>=0)
305  *it = 0;
306  }
307 
308 
309  void set_boundary(int nfacets, const int *facets, const int *ids){
310  assert(boundary.size()==0);
311  create_boundary();
312 
313  // Create a map of facets to ids.
314  std::map< std::set<int>, int> facet2id;
315  for(int i=0;i<nfacets;i++){
316  std::set<int> facet;
317  for(int j=0;j<ndims;j++){
318  facet.insert(facets[i*ndims+j]);
319  }
320  assert(facet2id.find(facet)==facet2id.end());
321  facet2id[facet] = ids[i];
322  }
323 
324  // Sweep through boundary and set ids.
325  size_t NElements = get_number_elements();
326  for(int i=0;i<NElements;i++){
327  for(int j=0;j<nloc;j++){
328  if(boundary[i*nloc+j]==1){
329  std::set<int> facet;
330  for(int k=1;k<nloc;k++){
331  facet.insert(_ENList[i*nloc+(j+k)%nloc]);
332  }
333  assert(facet2id.find(facet)!=facet2id.end());
334  boundary[i*nloc+j] = facet2id[facet];
335  }
336  }
337  }
338  }
339 
341  void erase_element(const index_t eid){
342  const index_t *n = get_element(eid);
343 
344  for(size_t i=0; i<nloc; ++i)
345  NEList[n[i]].erase(eid);
346 
347  _ENList[eid*nloc] = -1;
348  }
349 
351  void invert_element(size_t eid){
352  int tmp = _ENList[eid*nloc];
353  _ENList[eid*nloc] = _ENList[eid*nloc+1];
354  _ENList[eid*nloc+1] = tmp;
355  }
356 
358  const index_t *get_element(size_t eid) const{
359  return &(_ENList[eid*nloc]);
360  }
361 
363  void get_element(size_t eid, index_t *ele) const{
364  for(size_t i=0;i<nloc;i++)
365  ele[i] = _ENList[eid*nloc+i];
366  }
367 
369  size_t get_number_nodes() const{
370  return NNodes;
371  }
372 
374  size_t get_number_elements() const{
375  return NElements;
376  }
377 
379  size_t get_number_dimensions() const{
380  return ndims;
381  }
382 
384  const real_t *get_coords(index_t nid) const{
385  return &(_coords[nid*ndims]);
386  }
387 
389  void get_coords(index_t nid, real_t *x) const{
390  for(size_t i=0;i<ndims;i++)
391  x[i] = _coords[nid*ndims+i];
392  return;
393  }
394 
396  const double *get_metric(index_t nid) const{
397  assert(metric.size()>0);
398  return &(metric[nid*msize]);
399  }
400 
402  void get_metric(index_t nid, double *m) const{
403  assert(metric.size()>0);
404  for(size_t i=0;i<msize;i++)
405  m[i] = metric[nid*msize+i];
406  return;
407  }
408 
410  bool is_halo_node(index_t nid) const{
411  return (node_owner[nid]!= rank || send_halo.count(nid)>0);
412  }
413 
415  bool is_owned_node(index_t nid) const{
416  return node_owner[nid] == rank;
417  }
418 
420  double get_lmean(){
421  int NNodes = get_number_nodes();
422  double total_length=0;
423  int nedges=0;
424 // #pragma omp parallel reduction(+:total_length,nedges)
425  {
426 #pragma omp for schedule(static)
427  for(int i=0;i<NNodes;i++){
428  if(is_owned_node(i) && (NNList[i].size()>0))
429  for(typename std::vector<index_t>::const_iterator it=NNList[i].begin();it!=NNList[i].end();++it){
430  if(i<*it){ // Ensure that every edge length is only calculated once.
431  double length = calc_edge_length(i, *it);
432 #pragma omp atomic
433  total_length += length;
434 #pragma omp atomic
435  nedges++;
436  }
437  }
438  }
439  }
440 
441 #ifdef HAVE_MPI
442  if(num_processes>1){
443  MPI_Allreduce(MPI_IN_PLACE, &total_length, 1, MPI_DOUBLE, MPI_SUM, _mpi_comm);
444  MPI_Allreduce(MPI_IN_PLACE, &nedges, 1, MPI_INT, MPI_SUM, _mpi_comm);
445  }
446 #endif
447 
448  double mean = total_length/nedges;
449 
450  return mean;
451  }
452 
455  int NElements = get_number_elements();
456  if(ndims==2){
457  long double total_length=0;
458 
459  if(num_processes>1){
460 #ifdef HAVE_MPI
461  for(int i=0;i<NElements;i++){
462  if(_ENList[i*nloc] < 0)
463  continue;
464 
465  for(int j=0;j<3;j++){
466  int n1 = _ENList[i*nloc+(j+1)%3];
467  int n2 = _ENList[i*nloc+(j+2)%3];
468 
469  if(boundary[i*nloc+j]>0 && (std::min(node_owner[n1], node_owner[n2])==rank)){
470  long double dx = (_coords[n1*2 ]-_coords[n2*2 ]);
471  long double dy = (_coords[n1*2+1]-_coords[n2*2+1]);
472 
473  total_length += sqrt(dx*dx+dy*dy);
474  }
475  }
476  }
477 
478  MPI_Allreduce(MPI_IN_PLACE, &total_length, 1, MPI_LONG_DOUBLE, MPI_SUM, _mpi_comm);
479 #endif
480  }else{
481  for(int i=0;i<NElements;i++){
482  if(_ENList[i*nloc] < 0)
483  continue;
484 
485  for(int j=0;j<3;j++){
486  if(boundary[i*nloc+j]>0){
487  int n1 = _ENList[i*nloc+(j+1)%3];
488  int n2 = _ENList[i*nloc+(j+2)%3];
489 
490  long double dx = (_coords[n1*2 ]-_coords[n2*2 ]);
491  long double dy = (_coords[n1*2+1]-_coords[n2*2+1]);
492 
493  total_length += sqrt(dx*dx+dy*dy);
494  }
495  }
496  }
497  }
498 
499  return total_length;
500  }else{
501  std::cerr<<"ERROR: calculate_perimeter() cannot be used for 3D. Use calculate_area() instead if you want the total surface area.\n";
502  return -1;
503  }
504  }
505 
506 
508  double calculate_area(){
509  int NElements = get_number_elements();
510  long double total_area=0;
511 
512  if(ndims==2){
513  if(num_processes>1){
514 #pragma omp parallel for reduction(+:total_area)
515  for(int i=0;i<NElements;i++){
516  const index_t *n=get_element(i);
517  if(n[0] < 0)
518  continue;
519 
520  // Don't sum if it's not ours
521  if(std::min(node_owner[n[0]], std::min(node_owner[n[1]], node_owner[n[2]]))!=rank)
522  continue;
523 
524  const double *x1 = get_coords(n[0]);
525  const double *x2 = get_coords(n[1]);
526  const double *x3 = get_coords(n[2]);
527 
528  // Use Heron's Formula
529  long double a;
530  {
531  long double dx = (x1[0]-x2[0]);
532  long double dy = (x1[1]-x2[1]);
533  a = sqrt(dx*dx+dy*dy);
534  }
535  long double b;
536  {
537  long double dx = (x1[0]-x3[0]);
538  long double dy = (x1[1]-x3[1]);
539  b = sqrt(dx*dx+dy*dy);
540  }
541  long double c;
542  {
543  long double dx = (x2[0]-x3[0]);
544  long double dy = (x2[1]-x3[1]);
545  c = sqrt(dx*dx+dy*dy);
546  }
547  long double s = 0.5*(a+b+c);
548 
549  total_area += sqrt(s*(s-a)*(s-b)*(s-c));
550  }
551 
552  MPI_Allreduce(MPI_IN_PLACE, &total_area, 1, MPI_LONG_DOUBLE, MPI_SUM, _mpi_comm);
553  }else{
554 #pragma omp parallel for reduction(+:total_area)
555  for(int i=0;i<NElements;i++){
556  const index_t *n=get_element(i);
557  if(n[0] < 0)
558  continue;
559 
560  const double *x1 = get_coords(n[0]);
561  const double *x2 = get_coords(n[1]);
562  const double *x3 = get_coords(n[2]);
563 
564  // Use Heron's Formula
565  long double a;
566  {
567  long double dx = (x1[0]-x2[0]);
568  long double dy = (x1[1]-x2[1]);
569  a = sqrt(dx*dx+dy*dy);
570  }
571  long double b;
572  {
573  long double dx = (x1[0]-x3[0]);
574  long double dy = (x1[1]-x3[1]);
575  b = sqrt(dx*dx+dy*dy);
576  }
577  long double c;
578  {
579  long double dx = (x2[0]-x3[0]);
580  long double dy = (x2[1]-x3[1]);
581  c = sqrt(dx*dx+dy*dy);
582  }
583  long double s = 0.5*(a+b+c);
584 
585  total_area += sqrt(s*(s-a)*(s-b)*(s-c));
586  }
587  }
588  }else{ // 3D
589  if(num_processes>1){
590 #pragma omp parallel for reduction(+:total_area)
591  for(int i=0;i<NElements;i++){
592  const index_t *n=get_element(i);
593  if(n[0] < 0)
594  continue;
595 
596  for(int j=0;j<4;j++){
597  if(boundary[i*nloc+j]<=0)
598  continue;
599 
600  int n1 = n[(j+1)%4];
601  int n2 = n[(j+2)%4];
602  int n3 = n[(j+3)%4];
603 
604  // Don't sum if it's not ours
605  if(std::min(node_owner[n1], std::min(node_owner[n2], node_owner[n3]))!=rank)
606  continue;
607 
608  const double *x1 = get_coords(n1);
609  const double *x2 = get_coords(n2);
610  const double *x3 = get_coords(n3);
611 
612  // Use Heron's Formula
613  long double a;
614  {
615  long double dx = (x1[0]-x2[0]);
616  long double dy = (x1[1]-x2[1]);
617  long double dz = (x1[2]-x2[2]);
618  a = sqrt(dx*dx+dy*dy+dz*dz);
619  }
620  long double b;
621  {
622  long double dx = (x1[0]-x3[0]);
623  long double dy = (x1[1]-x3[1]);
624  long double dz = (x1[2]-x3[2]);
625  b = sqrt(dx*dx+dy*dy+dz*dz);
626  }
627  long double c;
628  {
629  long double dx = (x2[0]-x3[0]);
630  long double dy = (x2[1]-x3[1]);
631  long double dz = (x2[2]-x3[2]);
632  c = sqrt(dx*dx+dy*dy+dz*dz);
633  }
634  long double s = 0.5*(a+b+c);
635 
636  total_area += sqrt(s*(s-a)*(s-b)*(s-c));
637  }
638  }
639 
640  MPI_Allreduce(MPI_IN_PLACE, &total_area, 1, MPI_LONG_DOUBLE, MPI_SUM, _mpi_comm);
641  }else{
642 #pragma omp parallel for reduction(+:total_area)
643  for(int i=0;i<NElements;i++){
644  const index_t *n=get_element(i);
645  if(n[0] < 0)
646  continue;
647 
648  for(int j=0;j<4;j++){
649  if(boundary[i*nloc+j]<=0)
650  continue;
651 
652  int n1 = n[(j+1)%4];
653  int n2 = n[(j+2)%4];
654  int n3 = n[(j+3)%4];
655 
656  const double *x1 = get_coords(n1);
657  const double *x2 = get_coords(n2);
658  const double *x3 = get_coords(n3);
659 
660  // Use Heron's Formula
661  long double a;
662  {
663  long double dx = (x1[0]-x2[0]);
664  long double dy = (x1[1]-x2[1]);
665  long double dz = (x1[2]-x2[2]);
666  a = sqrt(dx*dx+dy*dy+dz*dz);
667  }
668  long double b;
669  {
670  long double dx = (x1[0]-x3[0]);
671  long double dy = (x1[1]-x3[1]);
672  long double dz = (x1[2]-x3[2]);
673  b = sqrt(dx*dx+dy*dy+dz*dz);
674  }
675  long double c;
676  {
677  long double dx = (x2[0]-x3[0]);
678  long double dy = (x2[1]-x3[1]);
679  long double dz = (x2[2]-x3[2]);
680  c = sqrt(dx*dx+dy*dy+dz*dz);
681  }
682  long double s = 0.5*(a+b+c);
683 
684  total_area += sqrt(s*(s-a)*(s-b)*(s-c));
685  }
686  }
687  }
688  }
689  return total_area;
690  }
691 
694  int NElements = get_number_elements();
695  long double total_volume=0;
696 
697  if(ndims==2){
698  std::cerr<<"ERROR: Cannot calculate volume in 2D\n";
699  }else{ // 3D
700  if(num_processes>1){
701 #pragma omp parallel for reduction(+:total_volume)
702  for(int i=0;i<NElements;i++){
703  const index_t *n=get_element(i);
704  if(n[0] < 0)
705  continue;
706 
707  // Don't sum if it's not ours
708  if(std::min(std::min(node_owner[n[0]], node_owner[n[1]]), std::min(node_owner[n[2]], node_owner[n[3]]))!=rank)
709  continue;
710 
711  const double *x0 = get_coords(n[0]);
712  const double *x1 = get_coords(n[1]);
713  const double *x2 = get_coords(n[2]);
714  const double *x3 = get_coords(n[3]);
715 
716  long double x01 = (x0[0] - x1[0]);
717  long double x02 = (x0[0] - x2[0]);
718  long double x03 = (x0[0] - x3[0]);
719 
720  long double y01 = (x0[1] - x1[1]);
721  long double y02 = (x0[1] - x2[1]);
722  long double y03 = (x0[1] - x3[1]);
723 
724  long double z01 = (x0[2] - x1[2]);
725  long double z02 = (x0[2] - x2[2]);
726  long double z03 = (x0[2] - x3[2]);
727 
728  total_volume += (-x03*(z02*y01 - z01*y02) + x02*(z03*y01 - z01*y03) - x01*(z03*y02 - z02*y03));
729  }
730 
731  MPI_Allreduce(MPI_IN_PLACE, &total_volume, 1, MPI_LONG_DOUBLE, MPI_SUM, _mpi_comm);
732  }else{
733 #pragma omp parallel for reduction(+:total_volume)
734  for(int i=0;i<NElements;i++){
735  const index_t *n=get_element(i);
736  if(n[0] < 0)
737  continue;
738 
739  const double *x0 = get_coords(n[0]);
740  const double *x1 = get_coords(n[1]);
741  const double *x2 = get_coords(n[2]);
742  const double *x3 = get_coords(n[3]);
743 
744  long double x01 = (x0[0] - x1[0]);
745  long double x02 = (x0[0] - x2[0]);
746  long double x03 = (x0[0] - x3[0]);
747 
748  long double y01 = (x0[1] - x1[1]);
749  long double y02 = (x0[1] - x2[1]);
750  long double y03 = (x0[1] - x3[1]);
751 
752  long double z01 = (x0[2] - x1[2]);
753  long double z02 = (x0[2] - x2[2]);
754  long double z03 = (x0[2] - x3[2]);
755 
756  total_volume += (-x03*(z02*y01 - z01*y02) + x02*(z03*y01 - z01*y03) - x01*(z03*y02 - z02*y03));
757  }
758  }
759  }
760  return total_volume/6;
761  }
762 
764  double get_qmean() const{
765  double sum=0;
766  int nele=0;
767 
768 #pragma omp parallel reduction(+:sum, nele)
769  {
770 #pragma omp for schedule(static)
771  for(size_t i=0;i<NElements;i++){
772  const index_t *n=get_element(i);
773  if(n[0]<0)
774  continue;
775 
776  double q;
777  if(ndims==2){
778  q = property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]),
779  get_metric(n[0]), get_metric(n[1]), get_metric(n[2]));
780  }else{
781  q = property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]), get_coords(n[3]),
782  get_metric(n[0]), get_metric(n[1]), get_metric(n[2]), get_metric(n[3]));
783  }
784 
785  sum+=q;
786  nele++;
787  }
788  }
789 
790  if(num_processes>1){
791  MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, _mpi_comm);
792  MPI_Allreduce(MPI_IN_PLACE, &nele, 1, MPI_INT, MPI_SUM, _mpi_comm);
793  }
794 
795  double mean = sum/nele;
796 
797  return mean;
798  }
799 
801  void print_quality() const{
802 #pragma omp parallel
803  {
804 #pragma omp for schedule(static)
805  for(size_t i=0;i<NElements;i++){
806  const index_t *n=get_element(i);
807  if(n[0]<0)
808  continue;
809 
810  double q;
811  if(ndims==2){
812  q = property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]),
813  get_metric(n[0]), get_metric(n[1]), get_metric(n[2]));
814  }else{
815  q = property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]), get_coords(n[3]),
816  get_metric(n[0]), get_metric(n[1]), get_metric(n[2]), get_metric(n[3]));
817  }
818 #pragma omp critical
819  std::cout<<"Quality[ele="<<i<<"] = "<<q<<std::endl;
820  }
821  }
822  }
823 
825  double get_qmin() const{
826  if(ndims==2)
827  return get_qmin_2d();
828  else
829  return get_qmin_3d();
830  }
831 
832  double get_qmin_2d() const{
833  double qmin=1; // Where 1 is ideal.
834 
835  for(size_t i=0;i<NElements;i++){
836  const index_t *n=get_element(i);
837  if(n[0]<0)
838  continue;
839 
840  qmin = std::min(qmin, property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]),
841  get_metric(n[0]), get_metric(n[1]), get_metric(n[2])));
842  }
843 
844  if(num_processes>1)
845  MPI_Allreduce(MPI_IN_PLACE, &qmin, 1, MPI_DOUBLE, MPI_MIN, _mpi_comm);
846 
847  return qmin;
848  }
849 
850  double get_qmin_3d() const{
851  double qmin=1; // Where 1 is ideal.
852 
853  for(size_t i=0;i<NElements;i++){
854  const index_t *n=get_element(i);
855  if(n[0]<0)
856  continue;
857 
858  qmin = std::min(qmin, property->lipnikov(get_coords(n[0]), get_coords(n[1]), get_coords(n[2]), get_coords(n[3]),
859  get_metric(n[0]), get_metric(n[1]), get_metric(n[2]), get_metric(n[3])));
860  }
861 
862  if(num_processes>1)
863  MPI_Allreduce(MPI_IN_PLACE, &qmin, 1, MPI_DOUBLE, MPI_MIN, _mpi_comm);
864 
865  return qmin;
866  }
867 
868 #ifdef HAVE_MPI
869  MPI_Comm get_mpi_comm() const{
871  return _mpi_comm;
872  }
873 #endif
874 
876  std::set<index_t> get_node_patch(index_t nid) const{
877  assert(nid<(index_t)NNodes);
878  std::set<index_t> patch;
879  for(typename std::vector<index_t>::const_iterator it=NNList[nid].begin();it!=NNList[nid].end();++it)
880  patch.insert(patch.end(), *it);
881  return patch;
882  }
883 
885  std::set<index_t> get_node_patch(index_t nid, size_t min_patch_size){
886  std::set<index_t> patch = get_node_patch(nid);
887 
888  if(patch.size()<min_patch_size){
889  std::set<index_t> front = patch, new_front;
890  for(;;){
891  for(typename std::set<index_t>::const_iterator it=front.begin();it!=front.end();it++){
892  for(typename std::vector<index_t>::const_iterator jt=NNList[*it].begin();jt!=NNList[*it].end();jt++){
893  if(patch.find(*jt)==patch.end()){
894  new_front.insert(*jt);
895  patch.insert(*jt);
896  }
897  }
898  }
899 
900  if(patch.size()>=std::min(min_patch_size, NNodes))
901  break;
902 
903  front.swap(new_front);
904  }
905  }
906 
907  return patch;
908  }
909 
911  real_t calc_edge_length(index_t nid0, index_t nid1) const{
912  real_t length=-1.0;
913  if(ndims==2){
914  double m[3];
915  m[0] = (metric[nid0*3 ]+metric[nid1*3 ])*0.5;
916  m[1] = (metric[nid0*3+1]+metric[nid1*3+1])*0.5;
917  m[2] = (metric[nid0*3+2]+metric[nid1*3+2])*0.5;
918 
919  length = ElementProperty<real_t>::length2d(get_coords(nid0), get_coords(nid1), m);
920  }else{
921  double m[6];
922  m[0] = (metric[nid0*msize ]+metric[nid1*msize ])*0.5;
923  m[1] = (metric[nid0*msize+1]+metric[nid1*msize+1])*0.5;
924  m[2] = (metric[nid0*msize+2]+metric[nid1*msize+2])*0.5;
925 
926  m[3] = (metric[nid0*msize+3]+metric[nid1*msize+3])*0.5;
927  m[4] = (metric[nid0*msize+4]+metric[nid1*msize+4])*0.5;
928 
929  m[5] = (metric[nid0*msize+5]+metric[nid1*msize+5])*0.5;
930 
931  length = ElementProperty<real_t>::length3d(get_coords(nid0), get_coords(nid1), m);
932  }
933  return length;
934  }
935 
936  real_t maximal_edge_length() const{
937  double L_max = 0;
938 
939  for(index_t i=0;i<(index_t) NNodes;i++){
940  for(typename std::vector<index_t>::const_iterator it=NNList[i].begin();it!=NNList[i].end();++it){
941  if(i<*it){ // Ensure that every edge length is only calculated once.
942  L_max = std::max(L_max, calc_edge_length(i, *it));
943  }
944  }
945  }
946 
947 #ifdef HAVE_MPI
948  if(num_processes>1)
949  MPI_Allreduce(MPI_IN_PLACE, &L_max, 1, MPI_DOUBLE, MPI_MAX, _mpi_comm);
950 #endif
951 
952  return L_max;
953  }
954 
958  void defragment(){
959  // Discover which vertices and elements are active.
960  std::vector<index_t> active_vertex_map(NNodes);
961 
962 #pragma omp parallel for schedule(static)
963  for(size_t i=0;i<NNodes;i++){
964  active_vertex_map[i] = -1;
965  NNList[i].clear();
966  NEList[i].clear();
967  }
968 
969  // Identify active elements.
970  std::vector<index_t> active_element;
971 
972  active_element.reserve(NElements);
973 
974  std::map<index_t, std::set<int> > new_send_set, new_recv_set;
975  for(size_t e=0;e<NElements;e++){
976  index_t nid = _ENList[e*nloc];
977 
978  // Check if deleted.
979  if(nid<0)
980  continue;
981 
982  // Check if wholly owned by another process or if halo node.
983  bool local=false, halo_element=false;
984  for(size_t j=0;j<nloc;j++){
985  nid = _ENList[e*nloc+j];
986  if(recv_halo.count(nid)){
987  halo_element = true;
988  }else{
989  local = true;
990  }
991  }
992  if(!local)
993  continue;
994 
995  // Need these mesh entities.
996  active_element.push_back(e);
997 
998  std::set<int> neigh;
999  for(size_t j=0;j<nloc;j++){
1000  nid = _ENList[e*nloc+j];
1001  active_vertex_map[nid]=0;
1002 
1003  if(halo_element){
1004  for(int k=0;k<num_processes;k++){
1005  if(recv_map[k].count(lnn2gnn[nid])){
1006  new_recv_set[k].insert(nid);
1007  neigh.insert(k);
1008  }
1009  }
1010  }
1011  }
1012  for(size_t j=0;j<nloc;j++){
1013  nid = _ENList[e*nloc+j];
1014  for(std::set<int>::iterator kt=neigh.begin();kt!=neigh.end();++kt){
1015  if(send_map[*kt].count(lnn2gnn[nid]))
1016  new_send_set[*kt].insert(nid);
1017  }
1018  }
1019  }
1020 
1021  // Create a new numbering.
1022  index_t cnt=0;
1023  for(size_t i=0;i<NNodes;i++){
1024  if(active_vertex_map[i]<0)
1025  continue;
1026 
1027  active_vertex_map[i] = cnt++;
1028  }
1029 
1030  // Renumber elements
1031  int active_nelements = active_element.size();
1032  std::map< std::set<index_t>, index_t > ordered_elements;
1033  for(int i=0;i<active_nelements;i++){
1034  index_t old_eid = active_element[i];
1035  std::set<index_t> sorted_element;
1036  for(size_t j=0;j<nloc;j++){
1037  index_t new_nid = active_vertex_map[_ENList[old_eid*nloc+j]];
1038  sorted_element.insert(new_nid);
1039  }
1040  if(ordered_elements.find(sorted_element)==ordered_elements.end()){
1041  ordered_elements[sorted_element] = old_eid;
1042  }else{
1043  std::cerr<<"dup! "
1044  <<active_vertex_map[_ENList[old_eid*nloc]]<<" "
1045  <<active_vertex_map[_ENList[old_eid*nloc+1]]<<" "
1046  <<active_vertex_map[_ENList[old_eid*nloc+2]]<<std::endl;
1047  }
1048  }
1049  std::vector<index_t> element_renumber;
1050  element_renumber.reserve(ordered_elements.size());
1051  for(typename std::map< std::set<index_t>, index_t >::const_iterator it=ordered_elements.begin();it!=ordered_elements.end();++it){
1052  element_renumber.push_back(it->second);
1053  }
1054 
1055  // Compress data structures.
1056  NNodes = cnt;
1057  NElements = ordered_elements.size();
1058 
1059  std::vector<index_t> defrag_ENList(NElements*nloc);
1060  std::vector<real_t> defrag_coords(NNodes*ndims);
1061  std::vector<double> defrag_metric(NNodes*msize);
1062  std::vector<int> defrag_boundary(NElements*nloc);
1063 
1064  // This first touch is to bind memory locally.
1065 #pragma omp parallel
1066  {
1067 #pragma omp for schedule(static)
1068  for(int i=0;i<(int)NElements;i++){
1069  defrag_ENList[i*nloc] = 0;
1070  defrag_boundary[i*nloc] = 0;
1071  }
1072 
1073 #pragma omp for schedule(static)
1074  for(int i=0;i<(int)NNodes;i++){
1075  defrag_coords[i*ndims] = 0.0;
1076  defrag_metric[i*msize] = 0.0;
1077  }
1078  }
1079 
1080  // Second sweep writes elements with new numbering.
1081  for(int i=0;i<NElements;i++){
1082  index_t old_eid = element_renumber[i];
1083  index_t new_eid = i;
1084  for(size_t j=0;j<nloc;j++){
1085  index_t new_nid = active_vertex_map[_ENList[old_eid*nloc+j]];
1086  assert(new_nid<(index_t)NNodes);
1087  defrag_ENList[new_eid*nloc+j] = new_nid;
1088  defrag_boundary[new_eid*nloc+j] = boundary[old_eid*nloc+j];
1089  }
1090  }
1091 
1092  // Second sweep writes node data with new numbering.
1093  for(size_t old_nid=0;old_nid<active_vertex_map.size();++old_nid){
1094  index_t new_nid = active_vertex_map[old_nid];
1095  if(new_nid<0)
1096  continue;
1097 
1098  for(size_t j=0;j<ndims;j++)
1099  defrag_coords[new_nid*ndims+j] = _coords[old_nid*ndims+j];
1100  for(size_t j=0;j<msize;j++)
1101  defrag_metric[new_nid*msize+j] = metric[old_nid*msize+j];
1102  }
1103 
1104  memcpy(&_ENList[0], &defrag_ENList[0], NElements*nloc*sizeof(index_t));
1105  memcpy(&boundary[0], &defrag_boundary[0], NElements*nloc*sizeof(int));
1106  memcpy(&_coords[0], &defrag_coords[0], NNodes*ndims*sizeof(real_t));
1107  memcpy(&metric[0], &defrag_metric[0], NNodes*msize*sizeof(double));
1108 
1109  // Renumber halo, fix lnn2gnn and node_owner.
1110  if(num_processes>1){
1111  std::vector<index_t> defrag_lnn2gnn(NNodes);
1112  std::vector<int> defrag_owner(NNodes);
1113 
1114  for(size_t old_nid=0;old_nid<active_vertex_map.size();++old_nid){
1115  index_t new_nid = active_vertex_map[old_nid];
1116  if(new_nid<0)
1117  continue;
1118 
1119  defrag_lnn2gnn[new_nid] = lnn2gnn[old_nid];
1120  defrag_owner[new_nid] = node_owner[old_nid];
1121  }
1122 
1123  lnn2gnn.swap(defrag_lnn2gnn);
1124  node_owner.swap(defrag_owner);
1125 
1126  for(int k=0;k<num_processes;k++){
1127  std::vector<int> new_halo;
1128  send_map[k].clear();
1129  for(std::vector<int>::iterator jt=send[k].begin();jt!=send[k].end();++jt){
1130  if(new_send_set[k].count(*jt)){
1131  index_t new_lnn = active_vertex_map[*jt];
1132  new_halo.push_back(new_lnn);
1133  send_map[k][lnn2gnn[new_lnn]] = new_lnn;
1134  }
1135  }
1136  send[k].swap(new_halo);
1137  }
1138 
1139  for(int k=0;k<num_processes;k++){
1140  std::vector<int> new_halo;
1141  recv_map[k].clear();
1142  for(std::vector<int>::iterator jt=recv[k].begin();jt!=recv[k].end();++jt){
1143  if(new_recv_set[k].count(*jt)){
1144  index_t new_lnn = active_vertex_map[*jt];
1145  new_halo.push_back(new_lnn);
1146  recv_map[k][lnn2gnn[new_lnn]] = new_lnn;
1147  }
1148  }
1149  recv[k].swap(new_halo);
1150  }
1151 
1152  {
1153  send_halo.clear();
1154  for(int k=0;k<num_processes;k++){
1155  for(std::vector<int>::iterator jt=send[k].begin();jt!=send[k].end();++jt){
1156  send_halo.insert(*jt);
1157  }
1158  }
1159  }
1160 
1161  {
1162  recv_halo.clear();
1163  for(int k=0;k<num_processes;k++){
1164  for(std::vector<int>::iterator jt=recv[k].begin();jt!=recv[k].end();++jt){
1165  recv_halo.insert(*jt);
1166  }
1167  }
1168  }
1169  }else{
1170  for(size_t i=0; i<NNodes; ++i){
1171  lnn2gnn[i] = i;
1172  node_owner[i] = 0;
1173  }
1174  }
1175 
1176 #pragma omp parallel
1177  create_adjacency();
1178  }
1179 
1181  bool verify() const{
1182  bool state = true;
1183 
1184  std::vector<int> mpi_node_owner(NNodes, rank);
1185  if(num_processes>1)
1186  for(int p=0;p<num_processes;p++)
1187  for(std::vector<int>::const_iterator it=recv[p].begin();it!=recv[p].end();++it){
1188  mpi_node_owner[*it] = p;
1189  }
1190  std::vector<int> mpi_ele_owner(NElements, rank);
1191  if(num_processes>1)
1192  for(size_t i=0;i<NElements;i++){
1193  if(_ENList[i*nloc]<0)
1194  continue;
1195  int owner = mpi_node_owner[_ENList[i*nloc]];
1196  for(size_t j=1;j<nloc;j++)
1197  owner = std::min(owner, mpi_node_owner[_ENList[i*nloc+j]]);
1198  mpi_ele_owner[i] = owner;
1199  }
1200 
1201  // Check for the correctness of NNList and NEList.
1202  std::vector< std::set<index_t> > local_NEList(NNodes);
1203  std::vector< std::set<index_t> > local_NNList(NNodes);
1204  for(size_t i=0; i<NElements; i++){
1205  if(_ENList[i*nloc]<0)
1206  continue;
1207 
1208  for(size_t j=0;j<nloc;j++){
1209  index_t nid_j = _ENList[i*nloc+j];
1210 
1211  local_NEList[nid_j].insert(i);
1212  for(size_t k=j+1;k<nloc;k++){
1213  index_t nid_k = _ENList[i*nloc+k];
1214  local_NNList[nid_j].insert(nid_k);
1215  local_NNList[nid_k].insert(nid_j);
1216  }
1217  }
1218  }
1219  {
1220  if(rank==0) std::cout<<"VERIFY: NNList..................";
1221  if(NNList.size()==0){
1222  if(rank==0) std::cout<<"empty\n";
1223  }else{
1224  bool valid_nnlist=true;
1225  for(size_t i=0;i<NNodes;i++){
1226  size_t active_cnt=0;
1227  for(size_t j=0;j<NNList[i].size();j++){
1228  if(NNList[i][j]>=0)
1229  active_cnt++;
1230  }
1231  if(active_cnt!=local_NNList[i].size()){
1232  valid_nnlist=false;
1233 
1234  std::cerr<<std::endl<<"active_cnt!=local_NNList[i].size() "<<active_cnt<<", "<<local_NNList[i].size()<<std::endl;
1235  std::cerr<<"NNList["
1236  <<i
1237  <<"("
1238  <<get_coords(i)[0]<<", "
1239  <<get_coords(i)[1]<<", ";
1240  if(ndims==3) std::cerr<<get_coords(i)[2]<<", ";
1241  std::cerr<<send_halo.count(i)<<", "<<recv_halo.count(i)<<")] = ";
1242  for(size_t j=0;j<NNList[i].size();j++)
1243  std::cerr<<NNList[i][j]<<"("<<NNList[NNList[i][j]].size()<<", "
1244  <<send_halo.count(NNList[i][j])<<", "
1245  <<recv_halo.count(NNList[i][j])<<") ";
1246  std::cerr<<std::endl;
1247  std::cerr<<"local_NNList["<<i<<"] = ";
1248  for(typename std::set<index_t>::iterator kt=local_NNList[i].begin();kt!=local_NNList[i].end();++kt)
1249  std::cerr<<*kt<<" ";
1250  std::cerr<<std::endl;
1251 
1252  state = false;
1253  }
1254  }
1255  if(rank==0){
1256  if(valid_nnlist){
1257  std::cout<<"pass\n";
1258  }else{
1259  state = false;
1260  std::cout<<"warn\n";
1261  }
1262  }
1263  }
1264  }
1265  {
1266  if(rank==0) std::cout<<"VERIFY: NEList..................";
1267  std::string result="pass\n";
1268  if(NEList.size()==0){
1269  result = "empty\n";
1270  }else{
1271  for(size_t i=0;i<NNodes;i++){
1272  if(local_NEList[i].size()!=NEList[i].size()){
1273  result = "fail (NEList[i].size()!=local_NEList[i].size())\n";
1274  state = false;
1275  break;
1276  }
1277  if(local_NEList[i].size()==0)
1278  continue;
1279  if(local_NEList[i]!=NEList[i]){
1280  result = "fail (local_NEList[i]!=NEList[i])\n";
1281  state = false;
1282  break;
1283  }
1284  }
1285  }
1286  if(rank==0) std::cout<<result;
1287  }
1288  if(ndims==2){
1289  long double area=0, min_ele_area=0, max_ele_area=0;
1290 
1291  size_t i=0;
1292  for(;i<NElements;i++){
1293  const index_t *n=get_element(i);
1294  if((mpi_ele_owner[i]!=rank) || (n[0]<0))
1295  continue;
1296 
1297  area = property->area(get_coords(n[0]),
1298  get_coords(n[1]),
1299  get_coords(n[2]));
1300  min_ele_area = area;
1301  max_ele_area = area;
1302  i++; break;
1303  }
1304  for(;i<NElements;i++){
1305  const index_t *n=get_element(i);
1306  if((mpi_ele_owner[i]!=rank) || (n[0]<0))
1307  continue;
1308 
1309  long double larea = property->area(get_coords(n[0]),
1310  get_coords(n[1]),
1311  get_coords(n[2]));
1312  if(pragmatic_isnan(larea)){
1313  std::cerr<<"ERROR: Bad element "<<n[0]<<", "<<n[1]<<", "<<n[2]<<std::endl;
1314  }
1315 
1316  area += larea;
1317  min_ele_area = std::min(min_ele_area, larea);
1318  max_ele_area = std::max(max_ele_area, larea);
1319  }
1320 
1321 #ifdef HAVE_MPI
1322  MPI_Allreduce(MPI_IN_PLACE, &area, 1, MPI_LONG_DOUBLE, MPI_SUM, get_mpi_comm());
1323  MPI_Allreduce(MPI_IN_PLACE, &min_ele_area, 1, MPI_LONG_DOUBLE, MPI_MIN, get_mpi_comm());
1324  MPI_Allreduce(MPI_IN_PLACE, &max_ele_area, 1, MPI_LONG_DOUBLE, MPI_MAX, get_mpi_comm());
1325 #endif
1326 
1327  if(rank==0){
1328  std::cout<<"VERIFY: total area ............"<<area<<std::endl;
1329  std::cout<<"VERIFY: minimum element area...."<<min_ele_area<<std::endl;
1330  std::cout<<"VERIFY: maximum element area...."<<max_ele_area<<std::endl;
1331  }
1332  }else{
1333  long double volume=0, min_ele_vol=0, max_ele_vol=0;
1334  size_t i=0;
1335  for(;i<NElements;i++){
1336  const index_t *n=get_element(i);
1337  if((mpi_ele_owner[i]!=rank) || (n[0]<0))
1338  continue;
1339 
1340  volume = property->volume(get_coords(n[0]),
1341  get_coords(n[1]),
1342  get_coords(n[2]),
1343  get_coords(n[3]));
1344  min_ele_vol = volume;
1345  max_ele_vol = volume;
1346  i++; break;
1347  }
1348  for(;i<NElements;i++){
1349  const index_t *n=get_element(i);
1350  if((mpi_ele_owner[i]!=rank) || (n[0]<0))
1351  continue;
1352 
1353  long double lvolume = property->volume(get_coords(n[0]),
1354  get_coords(n[1]),
1355  get_coords(n[2]),
1356  get_coords(n[3]));
1357  volume += lvolume;
1358  min_ele_vol = std::min(min_ele_vol, lvolume);
1359  max_ele_vol = std::max(max_ele_vol, lvolume);
1360  }
1361 
1362 #ifdef HAVE_MPI
1363  MPI_Allreduce(MPI_IN_PLACE, &volume, 1, MPI_LONG_DOUBLE, MPI_SUM, get_mpi_comm());
1364  MPI_Allreduce(MPI_IN_PLACE, &min_ele_vol, 1, MPI_LONG_DOUBLE, MPI_MIN, get_mpi_comm());
1365  MPI_Allreduce(MPI_IN_PLACE, &max_ele_vol, 1, MPI_LONG_DOUBLE, MPI_MAX, get_mpi_comm());
1366 #endif
1367 
1368  if(rank==0){
1369  std::cout<<"VERIFY: total volume.............."<<volume<<std::endl;
1370  std::cout<<"VERIFY: minimum element volume...."<<min_ele_vol<<std::endl;
1371  std::cout<<"VERIFY: maximum element volume...."<<max_ele_vol<<std::endl;
1372  }
1373  }
1374  double qmean = get_qmean();
1375  double qmin = get_qmin();
1376  if(rank==0){
1377  std::cout<<"VERIFY: mean quality...."<<qmean<<std::endl;
1378  std::cout<<"VERIFY: min quality...."<<qmin<<std::endl;
1379  }
1380 
1381 #ifdef HAVE_MPI
1382  int false_cnt = state?0:1;
1383  if(num_processes>1){
1384  MPI_Allreduce(MPI_IN_PLACE, &false_cnt, 1, MPI_INT, MPI_SUM, _mpi_comm);
1385  }
1386  state = (false_cnt == 0);
1387 #endif
1388 
1389  return state;
1390  }
1391 
1392  void send_all_to_all(std::vector< std::vector<index_t> > send_vec,
1393  std::vector< std::vector<index_t> > *recv_vec) {
1394  int ierr, recv_size, tag = 123456;
1395  std::vector<MPI_Status> status(num_processes);
1396  std::vector<MPI_Request> send_req(num_processes);
1397  std::vector<MPI_Request> recv_req(num_processes);
1398 
1399  for (int proc=0; proc<num_processes; proc++) {
1400  if (proc == rank) {send_req[proc] = MPI_REQUEST_NULL; continue;}
1401 
1402  ierr = MPI_Isend(send_vec[proc].data(), send_vec[proc].size(), MPI_INDEX_T,
1403  proc, tag, _mpi_comm, &send_req[proc]); assert(ierr==0);
1404  }
1405 
1406  /* Receive send list from remote proc */
1407  for (int proc=0; proc<num_processes; proc++) {
1408  if (proc == rank) {recv_req[proc] = MPI_REQUEST_NULL; continue;}
1409 
1410  ierr = MPI_Probe(proc, tag, _mpi_comm, &(status[proc])); assert(ierr==0);
1411  ierr = MPI_Get_count(&(status[proc]), MPI_INT, &recv_size); assert(ierr==0);
1412  (*recv_vec)[proc].resize(recv_size);
1413  MPI_Irecv((*recv_vec)[proc].data(), recv_size, MPI_INT, proc,
1414  tag, _mpi_comm, &recv_req[proc]); assert(ierr==0);
1415  }
1416 
1417  MPI_Waitall(num_processes, &(send_req[0]), &(status[0]));
1418  MPI_Waitall(num_processes, &(recv_req[0]), &(status[0]));
1419  }
1420 
1421  private:
1422  template<typename _real_t, int _dim> friend class MetricField;
1423  template<typename _real_t, int _dim> friend class Smooth;
1424  template<typename _real_t, int _dim> friend class Swapping;
1425  template<typename _real_t, int _dim> friend class Coarsen;
1426  template<typename _real_t, int _dim> friend class Refine;
1427  template<typename _real_t> friend class DeferredOperations;
1428  template<typename _real_t> friend class VTKTools;
1429  template<typename _real_t> friend class CUDATools;
1430 
1431  void _init(int _NNodes, int _NElements, const index_t *globalENList,
1432  const real_t *x, const real_t *y, const real_t *z,
1433  const index_t *lnn2gnn, const index_t *owner_range){
1434  num_processes = 1;
1435  rank=0;
1436 
1437  NElements = _NElements;
1438  NNodes = _NNodes;
1439 
1440 #ifdef HAVE_MPI
1441  MPI_Comm_size(_mpi_comm, &num_processes);
1442  MPI_Comm_rank(_mpi_comm, &rank);
1443 
1444  // Assign the correct MPI data type to MPI_INDEX_T and MPI_REAL_T
1445  mpi_type_wrapper<index_t> mpi_index_t_wrapper;
1446  MPI_INDEX_T = mpi_index_t_wrapper.mpi_type;
1447  mpi_type_wrapper<real_t> mpi_real_t_wrapper;
1448  MPI_REAL_T = mpi_real_t_wrapper.mpi_type;
1449 #endif
1450 
1451  nthreads = pragmatic_nthreads();
1452 
1453  if(z==NULL){
1454  nloc = 3;
1455  ndims = 2;
1456  msize = 3;
1457  }else{
1458  nloc = 4;
1459  ndims = 3;
1460  msize = 6;
1461  }
1462 
1463  // From the globalENList, create the halo and a local ENList if num_processes>1.
1464  const index_t *ENList;
1465 #ifdef HAVE_BOOST_UNORDERED_MAP_HPP
1466  boost::unordered_map<index_t, index_t> gnn2lnn;
1467 #else
1468  std::map<index_t, index_t> gnn2lnn;
1469 #endif
1470  if(num_processes==1){
1471  ENList = globalENList;
1472  }else{
1473 #ifdef HAVE_MPI
1474  assert(lnn2gnn!=NULL);
1475  for(size_t i=0;i<(size_t)NNodes;i++){
1476  gnn2lnn[lnn2gnn[i]] = i;
1477  }
1478 
1479  std::vector< std::set<index_t> > recv_set(num_processes);
1480  index_t *localENList = new index_t[NElements*nloc];
1481  for(size_t i=0;i<(size_t)NElements*nloc;i++){
1482  index_t gnn = globalENList[i];
1483  for(int j=0;j<num_processes;j++){
1484  if(gnn<owner_range[j+1]){
1485  if(j!=rank)
1486  recv_set[j].insert(gnn);
1487  break;
1488  }
1489  }
1490  localENList[i] = gnn2lnn[gnn];
1491  }
1492  std::vector<int> recv_size(num_processes);
1493  recv.resize(num_processes);
1494  recv_map.resize(num_processes);
1495  for(int j=0;j<num_processes;j++){
1496  for(typename std::set<int>::const_iterator it=recv_set[j].begin();it!=recv_set[j].end();++it){
1497  recv[j].push_back(*it);
1498  }
1499  recv_size[j] = recv[j].size();
1500  }
1501  std::vector<int> send_size(num_processes);
1502  MPI_Alltoall(&(recv_size[0]), 1, MPI_INT,
1503  &(send_size[0]), 1, MPI_INT, _mpi_comm);
1504 
1505  // Setup non-blocking receives
1506  send.resize(num_processes);
1507  send_map.resize(num_processes);
1508  std::vector<MPI_Request> request(num_processes*2);
1509  for(int i=0;i<num_processes;i++){
1510  if((i==rank)||(send_size[i]==0)){
1511  request[i] = MPI_REQUEST_NULL;
1512  }else{
1513  send[i].resize(send_size[i]);
1514  MPI_Irecv(&(send[i][0]), send_size[i], MPI_INDEX_T, i, 0, _mpi_comm, &(request[i]));
1515  }
1516  }
1517 
1518  // Non-blocking sends.
1519  for(int i=0;i<num_processes;i++){
1520  if((i==rank)||(recv_size[i]==0)){
1521  request[num_processes+i] = MPI_REQUEST_NULL;
1522  }else{
1523  MPI_Isend(&(recv[i][0]), recv_size[i], MPI_INDEX_T, i, 0, _mpi_comm, &(request[num_processes+i]));
1524  }
1525  }
1526 
1527  std::vector<MPI_Status> status(num_processes*2);
1528  MPI_Waitall(num_processes, &(request[0]), &(status[0]));
1529  MPI_Waitall(num_processes, &(request[num_processes]), &(status[num_processes]));
1530 
1531  for(int j=0;j<num_processes;j++){
1532  for(int k=0;k<recv_size[j];k++){
1533  index_t gnn = recv[j][k];
1534  index_t lnn = gnn2lnn[gnn];
1535  recv_map[j][gnn] = lnn;
1536  recv[j][k] = lnn;
1537  }
1538 
1539  for(int k=0;k<send_size[j];k++){
1540  index_t gnn = send[j][k];
1541  index_t lnn = gnn2lnn[gnn];
1542  send_map[j][gnn] = lnn;
1543  send[j][k] = lnn;
1544  }
1545  }
1546 
1547  ENList = localENList;
1548 #endif
1549  }
1550 
1551  _ENList.resize(NElements*nloc);
1552  _coords.resize(NNodes*ndims);
1553  metric.resize(NNodes*msize);
1554  NNList.resize(NNodes);
1555  NEList.resize(NNodes);
1556  node_owner.resize(NNodes);
1557  this->lnn2gnn.resize(NNodes);
1558 
1559  // TODO I don't know whether this method makes sense anymore.
1560  // Enforce first-touch policy
1561 #pragma omp parallel
1562  {
1563 #pragma omp for schedule(static)
1564  for(int i=0;i<(int)NElements;i++){
1565  for(size_t j=0;j<nloc;j++){
1566  _ENList[i*nloc+j] = ENList[i*nloc+j];
1567  }
1568  }
1569  if(ndims==2){
1570 #pragma omp for schedule(static)
1571  for(int i=0;i<(int)NNodes;i++){
1572  _coords[i*2 ] = x[i];
1573  _coords[i*2+1] = y[i];
1574  }
1575  }else{
1576 #pragma omp for schedule(static)
1577  for(int i=0;i<(int)NNodes;i++){
1578  _coords[i*3 ] = x[i];
1579  _coords[i*3+1] = y[i];
1580  _coords[i*3+2] = z[i];
1581  }
1582  }
1583 
1584 #pragma omp single nowait
1585  {
1586  if(num_processes>1){
1587  // Take into account renumbering for halo.
1588  for(int j=0;j<num_processes;j++){
1589  for(size_t k=0;k<recv[j].size();k++){
1590  recv_halo.insert(recv[j][k]);
1591  }
1592  for(size_t k=0;k<send[j].size();k++){
1593  send_halo.insert(send[j][k]);
1594  }
1595  }
1596  }
1597  }
1598 
1599  // Set the orientation of elements.
1600 #pragma omp single
1601  {
1602  const int *n=get_element(0);
1603  assert(n[0]>=0);
1604 
1605  if(ndims==2)
1606  property = new ElementProperty<real_t>(get_coords(n[0]),
1607  get_coords(n[1]),
1608  get_coords(n[2]));
1609  else
1610  property = new ElementProperty<real_t>(get_coords(n[0]),
1611  get_coords(n[1]),
1612  get_coords(n[2]),
1613  get_coords(n[3]));
1614  }
1615 
1616  if(ndims==2){
1617 #pragma omp for schedule(static)
1618  for(size_t i=1;i<(size_t)NElements;i++){
1619  const int *n=get_element(i);
1620  assert(n[0]>=0);
1621 
1622  double volarea = property->area(get_coords(n[0]),
1623  get_coords(n[1]),
1624  get_coords(n[2]));
1625 
1626  if(volarea<0)
1627  invert_element(i);
1628  }
1629  }else{
1630 #pragma omp for schedule(static)
1631  for(size_t i=1;i<(size_t)NElements;i++){
1632  const int *n=get_element(i);
1633  assert(n[0]>=0);
1634 
1635  double volarea = property->volume(get_coords(n[0]),
1636  get_coords(n[1]),
1637  get_coords(n[2]),
1638  get_coords(n[3]));
1639 
1640  if(volarea<0)
1641  invert_element(i);
1642  }
1643  }
1644 
1645  // create_adjacency is meant to be called from inside a parallel region
1646  create_adjacency();
1647  }
1648 
1649  create_global_node_numbering();
1650  }
1651 
1653  void create_adjacency(){
1654  int tid = pragmatic_thread_id();
1655 
1656 #pragma omp for schedule(static)
1657  for(size_t i=0;i<NNodes;i++){
1658  NNList[i].clear();
1659  NEList[i].clear();
1660  }
1661 
1662  for(size_t i=0; i<NElements; i++){
1663  if(_ENList[i*nloc]<0)
1664  continue;
1665 
1666  for(size_t j=0;j<nloc;j++){
1667  index_t nid_j = _ENList[i*nloc+j];
1668  if((nid_j%nthreads)==tid){
1669  NEList[nid_j].insert(NEList[nid_j].end(), i);
1670  for(size_t k=0;k<nloc;k++){
1671  if(j!=k){
1672  index_t nid_k = _ENList[i*nloc+k];
1673  NNList[nid_j].push_back(nid_k);
1674  }
1675  }
1676  }
1677  }
1678  }
1679 
1680 #pragma omp barrier
1681 
1682  // Finalise
1683 #pragma omp for schedule(static)
1684  for(size_t i=0;i<NNodes;i++){
1685  if(NNList[i].empty())
1686  continue;
1687 
1688  std::vector<index_t> *nnset = new std::vector<index_t>();
1689 
1690  std::sort(NNList[i].begin(),NNList[i].end());
1691  std::unique_copy(NNList[i].begin(), NNList[i].end(), std::inserter(*nnset, nnset->begin()));
1692 
1693  NNList[i].swap(*nnset);
1694  delete nnset;
1695  }
1696  }
1697 
1698  void trim_halo(){
1699  std::set<index_t> recv_halo_temp, send_halo_temp;
1700 
1701  // Traverse all vertices V in all recv[i] vectors. Vertices in send[i] belong by definition to *this* MPI process,
1702  // so all elements adjacent to them either belong exclusively to *this* process or cross partitions.
1703  for(int i=0;i<num_processes;i++){
1704  if(recv[i].size()==0)
1705  continue;
1706 
1707  std::vector<index_t> recv_temp;
1708 #ifdef HAVE_BOOST_UNORDERED_MAP_HPP
1709  boost::unordered_map<index_t, index_t> recv_map_temp;
1710 #else
1711  std::map<index_t, index_t> recv_map_temp;
1712 #endif
1713 
1714  for(typename std::vector<index_t>::const_iterator vit = recv[i].begin(); vit != recv[i].end(); ++vit){
1715  // For each vertex, traverse a copy of the vertex's NEList.
1716  // We need a copy because erase_element modifies the original NEList.
1717  std::set<index_t> NEList_copy = NEList[*vit];
1718  for(typename std::set<index_t>::const_iterator eit = NEList_copy.begin(); eit != NEList_copy.end(); ++eit){
1719  // Check whether all vertices comprising the element belong to another MPI process.
1720  std::vector<index_t> n(nloc);
1721  get_element(*eit, &n[0]);
1722  if(n[0] < 0)
1723  continue;
1724 
1725  // If one of the vertices belongs to *this* partition, the element should be retained.
1726  bool to_be_deleted = true;
1727  for(size_t j=0; j<nloc; ++j)
1728  if(is_owned_node(n[j])){
1729  to_be_deleted = false;
1730  break;
1731  }
1732 
1733  if(to_be_deleted){
1734  erase_element(*eit);
1735 
1736  // Now check whether one of the edges must be deleted
1737  for(size_t j=0; j<nloc; ++j){
1738  for(size_t k=j+1; k<nloc; ++k){
1739  std::set<index_t> intersection;
1740  std::set_intersection(NEList[n[j]].begin(), NEList[n[j]].end(), NEList[n[k]].begin(), NEList[n[k]].end(),
1741  std::inserter(intersection, intersection.begin()));
1742 
1743  // If these two vertices have no element in common anymore,
1744  // then the corresponding edge does not exist, so update NNList.
1745  if(intersection.empty()){
1746  typename std::vector<index_t>::iterator it;
1747  it = std::find(NNList[n[j]].begin(), NNList[n[j]].end(), n[k]);
1748  NNList[n[j]].erase(it);
1749  it = std::find(NNList[n[k]].begin(), NNList[n[k]].end(), n[j]);
1750  NNList[n[k]].erase(it);
1751  }
1752  }
1753  }
1754  }
1755  }
1756 
1757  // If this vertex is no longer part of any element, then it is safe to be removed.
1758  if(NEList[*vit].empty()){
1759  // Update NNList of all neighbours
1760  for(typename std::vector<index_t>::const_iterator neigh_it = NNList[*vit].begin(); neigh_it != NNList[*vit].end(); ++neigh_it){
1761  typename std::vector<index_t>::iterator it = std::find(NNList[*neigh_it].begin(), NNList[*neigh_it].end(), *vit);
1762  NNList[*neigh_it].erase(it);
1763  }
1764 
1765  erase_vertex(*vit);
1766  }else{
1767  // We will keep this vertex, so put it into recv_halo_temp.
1768  recv_temp.push_back(*vit);
1769  recv_map_temp[lnn2gnn[*vit]] = *vit;
1770  recv_halo_temp.insert(*vit);
1771  }
1772  }
1773 
1774  recv[i].swap(recv_temp);
1775  recv_map[i].swap(recv_map_temp);
1776  }
1777 
1778  // Once all recv[i] have been traversed, update recv_halo.
1779  recv_halo.swap(recv_halo_temp);
1780 
1781  // Traverse all vertices V in all send[i] vectors.
1782  // If none of V's neighbours are owned by the i-th MPI process, it means that the i-th process
1783  // has removed V from its recv_halo, so remove the vertex from send[i].
1784  for(int i=0;i<num_processes;i++){
1785  if(send[i].size()==0)
1786  continue;
1787 
1788  std::vector<index_t> send_temp;
1789 #ifdef HAVE_BOOST_UNORDERED_MAP_HPP
1790  boost::unordered_map<index_t, index_t> send_map_temp;
1791 #else
1792  std::map<index_t, index_t> send_map_temp;
1793 #endif
1794 
1795  for(typename std::vector<index_t>::const_iterator vit = send[i].begin(); vit != send[i].end(); ++vit){
1796  bool to_be_deleted = true;
1797  for(typename std::vector<index_t>::const_iterator neigh_it = NNList[*vit].begin(); neigh_it != NNList[*vit].end(); ++neigh_it)
1798  if(node_owner[*neigh_it] == i){
1799  to_be_deleted = false;
1800  break;
1801  }
1802 
1803  if(!to_be_deleted){
1804  send_temp.push_back(*vit);
1805  send_map_temp[lnn2gnn[*vit]] = *vit;
1806  send_halo_temp.insert(*vit);
1807  }
1808  }
1809 
1810  send[i].swap(send_temp);
1811  send_map[i].swap(send_map_temp);
1812  }
1813 
1814  // Once all send[i] have been traversed, update send_halo.
1815  send_halo.swap(send_halo_temp);
1816  }
1817 
1818  void create_global_node_numbering(){
1819  if(num_processes>1){
1820 #ifdef HAVE_MPI
1821  // Calculate the global numbering offset for this partition.
1822  int gnn_offset;
1823  int NPNodes = NNodes - recv_halo.size();
1824  MPI_Scan(&NPNodes, &gnn_offset, 1, MPI_INT, MPI_SUM, get_mpi_comm());
1825  gnn_offset-=NPNodes;
1826 
1827  // Write global node numbering and ownership for nodes assigned to local process.
1828  for(index_t i=0; i < (index_t) NNodes; i++){
1829  if(recv_halo.count(i)){
1830  lnn2gnn[i] = 0;
1831  }else{
1832  lnn2gnn[i] = gnn_offset++;
1833  node_owner[i] = rank;
1834  }
1835  }
1836 
1837  // Update GNN's for the halo nodes.
1838  halo_update<int, 1>(_mpi_comm, send, recv, lnn2gnn);
1839 
1840  // Finish writing node ownerships.
1841  for(int i=0;i<num_processes;i++){
1842  for(std::vector<int>::const_iterator it=recv[i].begin();it!=recv[i].end();++it){
1843  node_owner[*it] = i;
1844  }
1845  }
1846 #endif
1847  }else{
1848  memset(&node_owner[0], 0, NNodes*sizeof(int));
1849  for(index_t i=0; i < (index_t) NNodes; i++)
1850  lnn2gnn[i] = i;
1851  }
1852  }
1853 
1854  void create_gappy_global_numbering(size_t pNElements){
1855 #ifdef HAVE_MPI
1856  // We expect to have NElements_predict/2 nodes in the partition,
1857  // so let's reserve 10 times more space for global node numbers.
1858  index_t gnn_reserve = 5*pNElements;
1859  MPI_Scan(&gnn_reserve, &gnn_offset, 1, MPI_INDEX_T, MPI_SUM, _mpi_comm);
1860  gnn_offset -= gnn_reserve;
1861 
1862  for(size_t i=0; i<NNodes; ++i){
1863  if(node_owner[i] == rank)
1864  lnn2gnn[i] = gnn_offset+i;
1865  else
1866  lnn2gnn[i] = -1;
1867  }
1868 
1869  halo_update<int, 1>(_mpi_comm, send, recv, lnn2gnn);
1870 
1871  for(int i=0;i<num_processes;i++){
1872  send_map[i].clear();
1873  for(std::vector<int>::const_iterator it=send[i].begin();it!=send[i].end();++it){
1874  assert(node_owner[*it]==rank);
1875  send_map[i][lnn2gnn[*it]] = *it;
1876  }
1877 
1878  recv_map[i].clear();
1879  for(std::vector<int>::const_iterator it=recv[i].begin();it!=recv[i].end();++it){
1880  node_owner[*it] = i;
1881  recv_map[i][lnn2gnn[*it]] = *it;
1882  }
1883  }
1884 #endif
1885  }
1886 
1887  void update_gappy_global_numbering(std::vector<size_t>& recv_cnt, std::vector<size_t>& send_cnt){
1888 #ifdef HAVE_MPI
1889  // MPI_Requests for all non-blocking communications.
1890  std::vector<MPI_Request> request(num_processes*2);
1891 
1892  // Setup non-blocking receives.
1893  std::vector< std::vector<index_t> > recv_buff(num_processes);
1894  for(int i=0;i<num_processes;i++){
1895  if(recv_cnt[i]==0){
1896  request[i] = MPI_REQUEST_NULL;
1897  }else{
1898  recv_buff[i].resize(recv_cnt[i]);
1899  MPI_Irecv(&(recv_buff[i][0]), recv_buff[i].size(), MPI_INDEX_T, i, 0, _mpi_comm, &(request[i]));
1900  }
1901  }
1902 
1903  // Non-blocking sends.
1904  std::vector< std::vector<index_t> > send_buff(num_processes);
1905  for(int i=0;i<num_processes;i++){
1906  if(send_cnt[i]==0){
1907  request[num_processes+i] = MPI_REQUEST_NULL;
1908  }else{
1909  for(typename std::vector<index_t>::const_iterator it=send[i].end()-send_cnt[i];it!=send[i].end();++it)
1910  send_buff[i].push_back(lnn2gnn[*it]);
1911 
1912  MPI_Isend(&(send_buff[i][0]), send_buff[i].size(), MPI_INDEX_T, i, 0, _mpi_comm, &(request[num_processes+i]));
1913  }
1914  }
1915 
1916  std::vector<MPI_Status> status(num_processes*2);
1917  MPI_Waitall(num_processes, &(request[0]), &(status[0]));
1918  MPI_Waitall(num_processes, &(request[num_processes]), &(status[num_processes]));
1919 
1920  for(int i=0;i<num_processes;i++){
1921  int k=0;
1922  for(typename std::vector<index_t>::const_iterator it=recv[i].end()-recv_cnt[i];it!=recv[i].end();++it, ++k)
1923  lnn2gnn[*it] = recv_buff[i][k];
1924  }
1925 #endif
1926  }
1927 
1928  size_t ndims, nloc, msize;
1929  std::vector<index_t> _ENList;
1930  std::vector<real_t> _coords;
1931 
1932  size_t NNodes, NElements;
1933 
1934  // Boundary Label
1935  std::vector<int> boundary;
1936 
1937  // Adjacency lists
1938  std::vector< std::set<index_t> > NEList;
1939  std::vector< std::vector<index_t> > NNList;
1940 
1941  ElementProperty<real_t> *property;
1942 
1943  // Metric tensor field.
1944  std::vector<double> metric;
1945 
1946  // Parallel support.
1947  int rank, num_processes, nthreads;
1948  std::vector< std::vector<index_t> > send, recv;
1949 #ifdef HAVE_BOOST_UNORDERED_MAP_HPP
1950  std::vector< boost::unordered_map<index_t, index_t> > send_map, recv_map;
1951 #else
1952  std::vector< std::map<index_t, index_t> > send_map, recv_map;
1953 #endif
1954  std::set<index_t> send_halo, recv_halo;
1955  std::vector<int> node_owner;
1956  std::vector<index_t> lnn2gnn;
1957 
1958 #ifdef HAVE_MPI
1959  MPI_Comm _mpi_comm;
1960  index_t gnn_offset;
1961 
1962  // MPI data type for index_t and real_t
1963  MPI_Datatype MPI_INDEX_T;
1964  MPI_Datatype MPI_REAL_T;
1965 #endif
1966 };
1967 
1968 #endif
void get_coords(index_t nid, real_t *x) const
Return copy of the coordinate.
Definition: Mesh.h:389
void get_element(size_t eid, index_t *ele) const
Return copy of element-node list.
Definition: Mesh.h:363
void create_boundary()
Definition: Mesh.h:204
void get_metric(index_t nid, double *m) const
Return copy of metric.
Definition: Mesh.h:402
const real_t * get_coords(index_t nid) const
Return positions vector.
Definition: Mesh.h:384
Performs 2D mesh coarsening.
Definition: Coarsen.h:59
~Mesh()
Default destructor.
Definition: Mesh.h:147
static double length2d(const real_t x0[], const real_t x1[], const double m[])
int index_t
double calculate_volume()
Calculate volume.
Definition: Mesh.h:693
static double length3d(const real_t x0[], const real_t x1[], const double m[])
bool verify() const
This is used to verify that the mesh and its metadata is correct.
Definition: Mesh.h:1181
Calculates a number of element properties.
void invert_element(size_t eid)
Flip orientation of element.
Definition: Mesh.h:351
double get_qmean() const
Get the element mean quality in metric space.
Definition: Mesh.h:764
Performs edge/face swapping.
Definition: Swapping.h:56
Manages mesh data.
Definition: Mesh.h:70
double get_qmin() const
Get the element minimum quality in metric space.
Definition: Mesh.h:825
real_t calc_edge_length(index_t nid0, index_t nid1) const
Calculates the edge lengths in metric space.
Definition: Mesh.h:911
double calculate_perimeter()
Calculate perimeter.
Definition: Mesh.h:454
void erase_element(const index_t eid)
Erase an element.
Definition: Mesh.h:341
Constructs metric tensor field which encodes anisotropic edge size information.
Definition: MetricField.h:73
size_t get_number_elements() const
Return the number of elements in the mesh.
Definition: Mesh.h:374
void print_quality() const
Print out the qualities. Useful if you want to plot a histogram of element qualities.
Definition: Mesh.h:801
int pragmatic_nthreads()
real_t maximal_edge_length() const
Definition: Mesh.h:936
index_t append_vertex(const real_t *x, const double *m)
Add a new vertex.
Definition: Mesh.h:152
double get_qmin_3d() const
Definition: Mesh.h:850
double get_qmin_2d() const
Definition: Mesh.h:832
double calculate_area()
Calculate area.
Definition: Mesh.h:508
double get_lmean()
Get the mean edge length metric space.
Definition: Mesh.h:420
#define pragmatic_isnan
const MPI_Datatype mpi_type
Definition: mpi_tools.h:46
Mesh(int NNodes, int NElements, const index_t *ENList, const real_t *x, const real_t *y, const real_t *z)
Definition: Mesh.h:117
const double * get_metric(index_t nid) const
Return metric at that vertex.
Definition: Mesh.h:396
size_t get_number_dimensions() const
Return the number of spatial dimensions.
Definition: Mesh.h:379
void erase_vertex(const index_t nid)
Erase a vertex.
Definition: Mesh.h:165
Toolkit for importing and exporting VTK files. This is mostly used as part of the internal unit testi...
Definition: VTKTools.h:95
Mesh(int NNodes, int NElements, const index_t *ENList, const real_t *x, const real_t *y)
Definition: Mesh.h:81
std::set< index_t > get_node_patch(index_t nid) const
Return the node id's connected to the specified node_id.
Definition: Mesh.h:876
size_t get_number_nodes() const
Return the number of nodes in the mesh.
Definition: Mesh.h:369
Applies Laplacian smoothen in metric space.
Definition: Smooth.h:67
std::set< index_t > get_node_patch(index_t nid, size_t min_patch_size)
Grow a node patch around node id's until it reaches a minimum size.
Definition: Mesh.h:885
int pragmatic_thread_id()
void set_boundary(int nfacets, const int *facets, const int *ids)
Definition: Mesh.h:309
index_t append_element(const index_t *n)
Add a new element.
Definition: Mesh.h:173
bool is_halo_node(index_t nid) const
Returns true if the node is in any of the partitioned elements.
Definition: Mesh.h:410
bool is_owned_node(index_t nid) const
Returns true if the node is assigned to the local partition.
Definition: Mesh.h:415
void send_all_to_all(std::vector< std::vector< index_t > > send_vec, std::vector< std::vector< index_t > > *recv_vec)
Definition: Mesh.h:1392
void defragment()
Definition: Mesh.h:958
const index_t * get_element(size_t eid) const
Return a pointer to the element-node list.
Definition: Mesh.h:358
Performs 2D mesh refinement.
Definition: Refine.h:56
index_t append_element(const index_t *n, const int *b)
Add a new element and boundary.
Definition: Mesh.h:188