openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
SF_mesh_utils.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
27 #ifndef _SF_MESH_UTILS_H
28 #define _SF_MESH_UTILS_H
29 
30 #include <algorithm>
31 #include <iostream>
32 #include <string>
33 
34 #include "asciiPlotter.hpp"
35 
36 #include "SF_container.h"
37 #include "SF_network.h"
38 #include "SF_parallel_layout.h"
39 #include "SF_numbering.h"
40 #include "SF_sort.h"
41 #include "SF_mesh_io.h"
42 #include "SF_vector.h"
43 
44 namespace SF {
45 
46 
55 template<class T, class S>
56 inline void permute_mesh(const meshdata<T, S> & inmesh, meshdata<T, S> & outmesh, const vector<T> & perm)
57 {
58  assert(perm.size() == inmesh.l_numelem);
59 
60  outmesh.comm = inmesh.comm;
61 
62  outmesh.g_numelem = inmesh.g_numelem;
63  outmesh.l_numelem = inmesh.l_numelem;
64 
65  // copy numberings
66  outmesh.nbr = inmesh.nbr;
67 
68  const vector<T> & ref_eidx_in = inmesh.get_numbering(NBR_ELEM_REF);
69  vector<T> & ref_eidx_out = outmesh.get_numbering(NBR_ELEM_REF);
70 
71  size_t numelem = outmesh.l_numelem, numcon = inmesh.con.size();
72  bool twoFib = inmesh.she.size() == (inmesh.l_numelem*3);
73 
74  outmesh.dsp .resize(numelem+1);
75  outmesh.tag .resize(numelem);
76  outmesh.type.resize(numelem);
77  outmesh.fib .resize(numelem*3);
78  if(twoFib)
79  outmesh.she .resize(numelem*3);
80 
81  outmesh.con.resize(numcon);
82 
83  vector<T> cnt(numelem);
84  T* elem = outmesh.con.data();
85 
86  for(size_t i=0; i<numelem; i++) {
87  outmesh.tag[i] = inmesh.tag[perm[i]];
88  ref_eidx_out[i] = ref_eidx_in[perm[i]];
89  outmesh.type[i] = inmesh.type[perm[i]];
90 
91  outmesh.fib[i*3+0] = inmesh.fib[perm[i]*3+0];
92  outmesh.fib[i*3+1] = inmesh.fib[perm[i]*3+1];
93  outmesh.fib[i*3+2] = inmesh.fib[perm[i]*3+2];
94 
95  if(twoFib) {
96  outmesh.she[i*3+0] = inmesh.she[perm[i]*3+0];
97  outmesh.she[i*3+1] = inmesh.she[perm[i]*3+1];
98  outmesh.she[i*3+2] = inmesh.she[perm[i]*3+2];
99  }
100 
101  int esize = inmesh.dsp[perm[i]+1] - inmesh.dsp[perm[i]];
102  cnt[i] = esize;
103 
104  T estart = inmesh.dsp[perm[i]];
105  for(int j=0; j<esize; j++) elem[j] = inmesh.con[estart+j];
106  elem += esize;
107  }
108 
109  dsp_from_cnt(cnt, outmesh.dsp);
110 
111  // check consistency
112  assert(inmesh.dsp[inmesh.l_numelem] == outmesh.dsp[outmesh.l_numelem]);
113 }
114 
124 template<class T, class S>
125 inline void redistribute_elements(meshdata<T, S> & mesh, meshdata<T, S> & sendbuff, vector<T> & part)
126 {
127  MPI_Comm comm = mesh.comm;
128 
129  int size, rank;
130  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
131 
132  // first, we reorder the elements into contiguous blocks so we can
133  // communicate them more easily
134  vector<T> perm;
135  interval(perm, 0, part.size());
136  binary_sort_copy(part, perm);
137 
138  permute_mesh(mesh, sendbuff, perm);
139 
140  // now we set up the communication graphs (i.e. cnt and dsp for sending and receiving)
141  commgraph<size_t> elem_grph, con_grph;
142  // configure element exchange with the partitioning information.
143  elem_grph.configure(part, comm);
144 
145  // configure connectivity exchange based on element exchange
146  con_grph.resize(size);
147  con_grph.sdsp[0] = 0;
148  for(int i=0; i<size; i++) con_grph.sdsp[i+1] = sendbuff.dsp[elem_grph.sdsp[i+1]];
149  cnt_from_dsp(con_grph.sdsp, con_grph.scnt);
150  MPI_Alltoall(con_grph.scnt.data(), sizeof(size_t), MPI_BYTE, con_grph.rcnt.data(), sizeof(size_t), MPI_BYTE, comm);
151  dsp_from_cnt(con_grph.rcnt, con_grph.rdsp);
152 
153  int twoFib = sendbuff.l_numelem > 0 ? sendbuff.she.size() == sendbuff.l_numelem*3 : 0;
154  MPI_Allreduce(MPI_IN_PLACE, &twoFib, 1, MPI_INT, MPI_MAX, comm);
155 
156  // resize meshdata to fit received data
157  vector<T> & ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
158  vector<T> & ref_eidx_sbuff = sendbuff.get_numbering(NBR_ELEM_REF);
159 
160  size_t recv_size = sum(elem_grph.rcnt);
161  mesh.l_numelem = recv_size;
162  mesh.dsp .resize(recv_size+1);
163  mesh.tag .resize(recv_size);
164  ref_eidx.resize(recv_size);
165  mesh.type.resize(recv_size);
166  mesh.fib .resize(recv_size*3);
167  if(twoFib)
168  mesh.she .resize(recv_size*3);
169 
170  // we need auxiliary mesh counts
171  vector<T> sendmesh_cnt(sendbuff.l_numelem), mesh_cnt(mesh.l_numelem);
172  cnt_from_dsp(sendbuff.dsp, sendmesh_cnt);
173 
174  MPI_Exchange(elem_grph, sendmesh_cnt, mesh_cnt, comm);
175  dsp_from_cnt(mesh_cnt, mesh.dsp);
176 
177  MPI_Exchange(elem_grph, sendbuff.tag, mesh.tag, comm);
178  MPI_Exchange(elem_grph, ref_eidx_sbuff, ref_eidx, comm);
179  MPI_Exchange(elem_grph, sendbuff.type, mesh.type, comm);
180 
181  elem_grph.scale(3); // fiber data has three values per element
182  MPI_Exchange(elem_grph, sendbuff.fib, mesh.fib, comm);
183  if(twoFib)
184  MPI_Exchange(elem_grph, sendbuff.she, mesh.she, comm);
185 
186  // the only thing left is to communicate connectivity
187  {
188  // we map the sendbuff connectivity to global reference indexing
189  vector<T> & rnod = sendbuff.get_numbering(NBR_REF);
190  for(size_t i=0; i<sendbuff.con.size(); i++) sendbuff.con[i] = rnod[sendbuff.con[i]];
191  }
192  recv_size = sum(con_grph.rcnt);
193  mesh.con.resize(recv_size);
194  MPI_Exchange(con_grph, sendbuff.con, mesh.con, comm);
195 
196  mesh.localize(NBR_REF);
197 }
205 template<class T, class S>
206 inline void redistribute_elements(meshdata<T, S> & mesh, vector<T> & part)
207 {
208  meshdata<T, S> sendbuff;
209  redistribute_elements(mesh, sendbuff, part);
210 }
211 
221 template<class T, class S>
222 inline void redistribute_mesh(meshdata<T, S> & mesh, vector<T> & part)
223 {
224  // the main idea is to compute the unique node set of the element block we send to
225  // each process. Therefore we know which coordinates to communicate.
226 
227  meshdata<T, S> sendmesh;
228  redistribute_elements(mesh, sendmesh, part);
229  sendmesh.xyz.assign(mesh.xyz.begin(), mesh.xyz.end());
230  // part and sendmesh have been sorted in redistribute_elements.
231 
232  MPI_Comm comm = mesh.comm;
233  int size, rank;
234  MPI_Comm_size(comm, &size);
235  MPI_Comm_rank(comm, &rank);
236 
237  // we use commgraphs to store the layouts of the elements, connectivities and nodes
238  commgraph<T> elem_layout, con_layout, nod_layout;
239  elem_layout.resize(size);
240  con_layout.resize(size);
241  nod_layout.resize(size);
242 
243  // compute layout of the elements
244  count(part, elem_layout.scnt);
245  dsp_from_cnt(elem_layout.scnt, elem_layout.sdsp);
246 
247  // compute layout of the connectivities using the element layout
248  con_layout.scnt.zero();
249  for(int pid=0; pid<size; pid++)
250  for(T i=elem_layout.sdsp[pid]; i<elem_layout.sdsp[pid+1]; i++)
251  con_layout.scnt[pid] += sendmesh.dsp[i+1] - sendmesh.dsp[i];
252  dsp_from_cnt(con_layout.scnt, con_layout.sdsp);
253 
254  // finally compute the nodes we need to send to each rank and their layout
255  vector<T> nod_sbuff(sendmesh.con.size());
256  nod_layout.sdsp[0] = 0; // initialize first entry of displacements
257 
258  for(int pid=0; pid<size; pid++)
259  {
260  T con_start = con_layout.sdsp[pid], con_end = con_layout.sdsp[pid+1];
261  vector<T> nod(con_end - con_start);
262  // copy connectivity block
263  vec_assign(nod.data(), sendmesh.con.data() + con_start, nod.size());
264  // get unique node set for block
265  binary_sort(nod); unique_resize(nod);
266 
267  // add block size to layout
268  nod_layout.scnt[pid] = nod.size();
269  nod_layout.sdsp[pid+1] = nod_layout.sdsp[pid] + nod_layout.scnt[pid];
270  // copy node set to send buffer
271  vec_assign(nod_sbuff.data() + nod_layout.sdsp[pid], nod.data(), nod.size());
272  }
273  nod_sbuff.resize(nod_layout.sdsp[nod_layout.sdsp.size()-1]);
274  // finish up nod_layout
275  MPI_Alltoall(nod_layout.scnt.data(), sizeof(T), MPI_BYTE, nod_layout.rcnt.data(), sizeof(T), MPI_BYTE, comm);
276  dsp_from_cnt(nod_layout.rcnt, nod_layout.rdsp);
277 
278  vector<T> nod_lidx(nod_sbuff);
279  global_to_local(sendmesh.get_numbering(NBR_REF), nod_lidx, false, true);
280 
281  // fill coordinates send buffer
282  vector<S> xyz_sbuff(nod_sbuff.size() * 3);
283  for(size_t i=0; i<nod_lidx.size(); i++)
284  {
285  T lidx = nod_lidx[i];
286  xyz_sbuff[i*3+0] = sendmesh.xyz[lidx*3+0];
287  xyz_sbuff[i*3+1] = sendmesh.xyz[lidx*3+1];
288  xyz_sbuff[i*3+2] = sendmesh.xyz[lidx*3+2];
289  }
290 
291  size_t rsize = sum(nod_layout.rcnt);
292  vector<T> nod_rbuff(rsize);
293  vector<S> xyz_rbuff(rsize*3);
294 
295  MPI_Exchange(nod_layout, nod_sbuff, nod_rbuff, comm);
296  nod_layout.scale(3);
297  MPI_Exchange(nod_layout, xyz_sbuff, xyz_rbuff, comm);
298 
299  // map received indices to local indexing
300  global_to_local(mesh.get_numbering(NBR_REF), nod_rbuff, false, true);
301 
302  // find unique coordinates
303  vector<T> acc_cnt(nod_rbuff.size(), 1), acc_dsp,
304  acc_col(nod_rbuff.size());
305 
306  interval(acc_col, 0, nod_rbuff.size());
307  binary_sort_copy(nod_rbuff, acc_col);
308  unique_accumulate(nod_rbuff, acc_cnt);
309  acc_dsp.resize(acc_cnt.size()+1);
310  dsp_from_cnt(acc_cnt, acc_dsp);
311 
312  // generate new coordinates vector
313  mesh.xyz.resize(acc_cnt.size()*3);
314  for(size_t i=0; i<acc_cnt.size(); i++)
315  {
316  T pidx = acc_col[acc_dsp[i]];
317  mesh.xyz[i*3+0] = xyz_rbuff[pidx*3+0];
318  mesh.xyz[i*3+1] = xyz_rbuff[pidx*3+1];
319  mesh.xyz[i*3+2] = xyz_rbuff[pidx*3+2];
320  }
321 }
322 
323 
333 template<class T, class S>
334 inline void write_mesh_parallel(const meshdata<T, S> & mesh, bool binary, std::string basename)
335 {
336  const MPI_Comm comm = mesh.comm;
337 
338  int size, rank;
339  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
340 
341  // we duplicate the mesh so that we can safely redistribute it
342  meshdata<T, S> wmesh = mesh;
343  vector<T> & ref_eidx = wmesh.get_numbering(NBR_ELEM_REF);
344 
345  T elem_gmin = global_min(ref_eidx, comm);
346  T elem_gmax = global_max(ref_eidx, comm);
347 
348  // block size
349  T elem_bsize = (elem_gmax - elem_gmin) / size + 1;
350 
351  // redistribute elements linearly ascending after their reference element index --------------------
352  vector<T> dest(wmesh.l_numelem);
353  for(size_t i=0; i<dest.size(); i++)
354  dest[i] = (ref_eidx[i] - elem_gmin) / elem_bsize;
355 
356  // redistribute elements of write mesh.
357  // WARNING: vertex coordinates are not redistributed. therefore the
358  // vertices will be redistributed based on the original mesh.
359  redistribute_elements(wmesh, dest);
360 
361  {
362  // temporary datastructs
363  vector<T> perm, eidx;
364  meshdata<T, S> tmesh = wmesh;
365 
366  // generate permutation
367  eidx = wmesh.get_numbering(NBR_ELEM_REF);
368  interval(perm, 0, eidx.size());
369  binary_sort_copy(eidx, perm);
370 
371  // permute local mesh to locally ascending element indices
372  permute_mesh(tmesh, wmesh, perm);
373  }
374 
375  vector<T> nbr_orig; // the global vertex numbering of the original mesh
376  if(mesh.nbr.count(NBR_SUBMESH)) {
377  // mesh is a submesh. generate submesh numbering for the redistributed
378  // mesh and map connectivity to it
379 
380  // copy original numbering for later
381  nbr_orig = mesh.get_numbering(NBR_SUBMESH);
382 
383  // generate new numbering on write mesh
384  submesh_numbering<T, S> submesh_nbr;
385  submesh_nbr(wmesh);
386  const vector<T> & nbr = wmesh.get_numbering(NBR_SUBMESH);
387  for(size_t i=0; i<wmesh.con.size(); i++) wmesh.con[i] = nbr[wmesh.con[i]];
388  }
389  else {
390  // mesh is a reference mesh. just map connectivity.
391 
392  // copy original numbering for later
393  nbr_orig = mesh.get_numbering(NBR_REF);
394 
395  const vector<T> & nbr = wmesh.get_numbering(NBR_REF);
396  for(size_t i=0; i<wmesh.con.size(); i++) wmesh.con[i] = nbr[wmesh.con[i]];
397  }
398 
399  // write .elem and .lon files
400  write_elements(wmesh, binary, basename);
401 
402  // now we need to redistribute and write the vertex coordinates
403  const vector<T> & alg_nod = mesh.pl.algebraic_nodes();
404  vector<T> xyz_cnt(alg_nod.size(), 3), xyz_idx(alg_nod.size()), srt_cnt, srt_idx;
405  vector<S> xyz(alg_nod.size()*3), srt_xyz;
406 
407  for(size_t i=0; i<alg_nod.size(); i++) {
408  T loc = alg_nod[i];
409 
410  xyz[i*3+0] = mesh.xyz[loc*3+0];
411  xyz[i*3+1] = mesh.xyz[loc*3+1];
412  xyz[i*3+2] = mesh.xyz[loc*3+2];
413 
414  xyz_idx[i] = nbr_orig[loc];
415  }
416 
417  sort_parallel(comm, xyz_idx, xyz_cnt, xyz, srt_idx, srt_cnt, srt_xyz);
418 
419  // write header with root
420  FILE* pts_fd;
421  std::string pts_file = binary ? basename + ".bpts" : basename + ".pts";
422 
423  if(rank == 0) {
424  pts_fd = fopen(pts_file.c_str(), "w");
425  if(!pts_fd) {
426  fprintf(stderr, "Error: could not open file: %s. Aborting!", pts_file.c_str());
427  exit(1);
428  }
429  write_pts_header(pts_fd, binary, mesh.g_numpts);
430  fclose(pts_fd);
431  }
432 
433  // write vertices sequentially rank after rank
434  for(int pid=0; pid < size; pid++) {
435  if(pid == rank) {
436  pts_fd = fopen(pts_file.c_str(), "a");
437  if(!pts_fd) {
438  fprintf(stderr, "Error: could not open file: %s. Aborting!", pts_file.c_str());
439  exit(1);
440  }
441  write_pts_block(pts_fd, binary, srt_xyz);
442  fclose(pts_fd);
443  }
444  MPI_Barrier(comm);
445  }
446 }
447 
448 
455 template<class T, class S>
456 inline void gather_mesh(const meshdata<T, S> & locmesh, meshdata<T, S> & globmesh)
457 {
458  MPI_Comm comm = locmesh.comm;
459 
460  int size, rank;
461  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
462 
463  // gather mesh data using the redistribute_mesh function
464  vector<T> part(locmesh.l_numelem, 0); // all elements to rank 0
465  globmesh = locmesh;
466  redistribute_mesh(globmesh, part);
467 }
468 
472 template<class T, class S>
473 inline void print_DD_info(const meshdata<T, S> & mesh)
474 {
475  MPI_Comm comm = mesh.comm;
476 
477  int size, rank;
478  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
479 
480  vector<size_t> npoint(size), nelem(size), ninterf(size), nidx(size);
481 
482  size_t intf_size = mesh.pl.interface().size(),
483  idx_size = mesh.pl.num_algebraic_idx();
484 
485  MPI_Gather(&mesh.l_numpts, sizeof(size_t), MPI_BYTE, npoint.data(), sizeof(size_t), MPI_BYTE, 0, comm);
486  MPI_Gather(&mesh.l_numelem, sizeof(size_t), MPI_BYTE, nelem.data(), sizeof(size_t), MPI_BYTE, 0, comm);
487  MPI_Gather(&intf_size, sizeof(size_t), MPI_BYTE, ninterf.data(), sizeof(size_t), MPI_BYTE, 0, comm);
488  MPI_Gather(&idx_size, sizeof(size_t), MPI_BYTE, nidx.data(), sizeof(size_t), MPI_BYTE, 0, comm);
489 
490  vector<int> mult(mesh.l_numpts, 1);
491  mesh.pl.reduce(mult, "sum");
492 
493  int hist_size = 16;
494  vector<int> mult_hist(hist_size, 0), global_mult_hist(hist_size, 0);
495  for(auto m : mult) mult_hist[m]++;
496 
497  MPI_Reduce(mult_hist.data(), global_mult_hist.data(), hist_size, MPI_INT, MPI_SUM, 0, comm);
498 
499  if(!rank) {
500  printf("===== Parallel mesh statistics =====\n");
501 
502  printf("#pid\t#nodes\t#elems\t#interf\t#alg\n");
503  for(int pid = 0; pid < size; pid++)
504  printf("%d\t%ld\t%ld\t%ld\t%ld\n", pid, (long int)npoint[pid], (long int)nelem[pid],
505  (long int)ninterf[pid], (long int)nidx[pid]);
506  printf("\n");
507 
508  std::cout << "Multiplicities :" << std::endl;
509  for(int i = 2; i < hist_size && global_mult_hist[i] > 0; i++)
510  std::cout << i << ": " << global_mult_hist[i] << std::endl;
511  }
512 }
513 
514 
523 template<class T, class S>
524 inline void extract_mesh(const vector<bool> & keep,
525  const meshdata<T, S> & mesh,
526  meshdata<T, S> & submesh)
527 {
528  assert(keep.size() == mesh.l_numelem);
529 
530  size_t num_extr_elem = 0, num_extr_entr = 0;
531  bool twoFib = ( mesh.she.size() == (mesh.l_numelem*3) );
532 
533  submesh.l_numpts = submesh.g_numpts = 0;
534  submesh.comm = mesh.comm;
535 
536  const vector<T> & mesh_ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
537  vector<T> & sub_ref_eidx = submesh.register_numbering(NBR_ELEM_REF);
538 
539  for(size_t i=0; i<mesh.l_numelem; i++) {
540  if(keep[i]) {
541  num_extr_elem++;
542  num_extr_entr += mesh.dsp[i+1] - mesh.dsp[i];
543  }
544  }
545 
546  submesh.l_numelem = num_extr_elem;
547 
548  vector<T> cnt(num_extr_elem);
549  submesh.dsp.resize(num_extr_elem+1);
550  submesh.tag.resize(num_extr_elem);
551  sub_ref_eidx.resize(num_extr_elem);
552  submesh.type.resize(num_extr_elem);
553 
554  submesh.fib.resize(num_extr_elem*3);
555  if(twoFib)
556  submesh.she.resize(num_extr_elem*3);
557 
558  submesh.con.resize(num_extr_entr);
559  // reference numbering
560  const vector<T> & rnod = mesh.get_numbering(NBR_REF);
561 
562  for(size_t ridx_ele=0, ridx_con=0, widx_ele=0, widx_con=0; ridx_ele<mesh.l_numelem; ridx_ele++)
563  {
564  ridx_con = mesh.dsp[ridx_ele];
565 
566  if(keep[ridx_ele]) {
567  // element data
568  cnt[widx_ele] = mesh.dsp[ridx_ele + 1] - mesh.dsp[ridx_ele];
569  submesh.tag [widx_ele] = mesh.tag [ridx_ele];
570  sub_ref_eidx[widx_ele] = mesh_ref_eidx[ridx_ele];
571  submesh.type[widx_ele] = mesh.type [ridx_ele];
572 
573  // fibers
574  submesh.fib[widx_ele*3+0] = mesh.fib[ridx_ele*3+0];
575  submesh.fib[widx_ele*3+1] = mesh.fib[ridx_ele*3+1];
576  submesh.fib[widx_ele*3+2] = mesh.fib[ridx_ele*3+2];
577  if(twoFib) {
578  submesh.she[widx_ele*3+0] = mesh.she[ridx_ele*3+0];
579  submesh.she[widx_ele*3+1] = mesh.she[ridx_ele*3+1];
580  submesh.she[widx_ele*3+2] = mesh.she[ridx_ele*3+2];
581  }
582 
583  // connectivity
584  for(int j=0; j<cnt[widx_ele]; j++)
585  submesh.con[widx_con++] = rnod[mesh.con[ridx_con++]];
586 
587  widx_ele++;
588  }
589  }
590  dsp_from_cnt(cnt, submesh.dsp);
591 
592  unsigned long int gnumele = submesh.l_numelem;
593  MPI_Allreduce(MPI_IN_PLACE, &gnumele, 1, MPI_UNSIGNED_LONG, MPI_SUM, submesh.comm);
594  submesh.g_numelem = gnumele;
595 
596  submesh.localize(NBR_REF);
597 }
598 
599 
605 template<class T, class S>
606 inline void rebalance_mesh(meshdata<T, S> & mesh)
607 {
608  MPI_Comm comm = mesh.comm;
609 
610  // we check if some ranks have 0 local elems
611  short errflag = 0;
612  if(mesh.l_numelem == 0) errflag = 1;
613  MPI_Allreduce(MPI_IN_PLACE, &errflag, 1, MPI_SHORT, MPI_SUM, comm);
614 
615  // we redistribute the elements if necessary
616  if(errflag) {
617  int size, rank;
618  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
619 
620  vector<T> part(mesh.l_numelem);
621  vector<size_t> elem_counts(size), layout;
622  MPI_Allgather(&mesh.l_numelem, sizeof(size_t), MPI_BYTE, elem_counts.data(),
623  sizeof(size_t), MPI_BYTE, comm);
624 
625  dsp_from_cnt(elem_counts, layout);
626 
627  for(size_t i=0; i<mesh.l_numelem; i++)
628  part[i] = (layout[rank] + i) % size;
629 
630  redistribute_elements(mesh, part);
631  }
632 }
633 
634 
643 template<class T, class S>
644 inline void extract_myocardium(const meshdata<T, S> & mesh, meshdata<T, S> & submesh)
645 {
646  MPI_Comm comm = mesh.comm;
647  vector<bool> keep(mesh.l_numelem);
648 
649  // fill keep based on empty fiber definitions
650  for(size_t i=0; i<mesh.l_numelem; i++) {
651  S l1 = mesh.fib[i*3+0];
652  S l2 = mesh.fib[i*3+1];
653  S l3 = mesh.fib[i*3+2];
654 
655  if( l1*l1 + l2*l2 + l3*l3 )
656  keep[i] = true;
657  else
658  keep[i] = false;
659  }
660 
661  extract_mesh(keep, mesh, submesh);
662  rebalance_mesh(submesh);
663 
664  // get unique tag set
665  submesh.extr_tag.insert(submesh.tag.begin(), submesh.tag.end());
666  {
667  // we have to collect the global list of tags of this submesh
668  vector<T> tags, glob_tags;
669  tags.assign(submesh.extr_tag.begin(), submesh.extr_tag.end()); // local list
670  make_global(tags, glob_tags, mesh.comm); // now globalized
671 
672  submesh.extr_tag.insert(glob_tags.begin(), glob_tags.end());
673  }
674 }
675 
676 
686 template<class T, class S>
687 inline void extract_tagbased(const meshdata<T, S> & mesh, meshdata<T, S> & submesh)
688 {
689  if(submesh.extr_tag.size())
690  {
691  vector<bool> keep(mesh.l_numelem, false);
692 
693  // fill keep based on empty fiber definitions
694  for(size_t i=0; i<mesh.l_numelem; i++) {
695  if( submesh.extr_tag.count(mesh.tag[i]) )
696  keep[i] = true;
697  }
698 
699  extract_mesh(keep, mesh, submesh);
700  }
701  else {
702  // if the tags are not provided, we assume that the whole mesh is copied into the
703  // submesh
704  std::string oldname = submesh.name;
705  // copy reference mesh
706  submesh = mesh;
707  // revert name to old name
708  submesh.name = oldname;
709  // compute tags
710  submesh.extr_tag.insert(submesh.tag.begin(), submesh.tag.end());
711  {
712  // we have to collect the global list of tags of this submesh
713  vector<T> tags, glob_tags;
714  tags.assign(submesh.extr_tag.begin(), submesh.extr_tag.end()); // local list
715  make_global(tags, glob_tags, mesh.comm); // now globalized
716 
717  submesh.extr_tag.insert(glob_tags.begin(), glob_tags.end());
718  }
719  }
720 
721  rebalance_mesh(submesh);
722 }
723 
729 template<class T, class S>
730 inline void print_mesh_graph(meshdata<T,S> & mesh)
731 {
732  int rank, size;
733  MPI_Comm_size(mesh.comm, &size); MPI_Comm_rank(mesh.comm, &rank);
734 
735  vector<T> n2n_cnt, n2n_con;
736  nodal_connectivity_graph(mesh, n2n_cnt, n2n_con);
737  matrixgraph_plotter<vector<T> > plotter(30, 60);
738 
739  for(int pid=0; pid<size; pid++)
740  {
741  if(pid == rank) {
742  std::cout << "\n\n Rank " << rank << ": \n" << std::endl;
743  plotter.print(n2n_cnt, n2n_con, '*');
744  }
745  MPI_Barrier(mesh.comm);
746  }
747 }
748 
762 template<class T, class S> inline
763 void inter_domain_mapping(const meshdata<T,S> & mesh_a, const meshdata<T,S> & mesh_b,
764  const SF_nbr snbr, index_mapping<T> & a_to_b)
765 {
766  assert(mesh_a.comm == mesh_b.comm);
767  MPI_Comm comm = mesh_a.comm;
768 
769  int size, rank;
770  MPI_Comm_size(comm, &size);
771  MPI_Comm_rank(comm, &rank);
772 
773  // reference and submesh numberings of both meshes
774  const vector<T> & mesh_a_rnbr = mesh_a.get_numbering(NBR_REF);
775  const vector<T> & mesh_a_snbr = mesh_a.get_numbering(snbr);
776  const vector<T> & mesh_b_rnbr = mesh_b.get_numbering(NBR_REF);
777  const vector<T> & mesh_b_snbr = mesh_b.get_numbering(snbr);
778  // send- and receive-buffer for the reference and submesh numberings
779  vector<T> mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff;
780  vector<T> mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff;
781 
782  // communicate reference- and submesh numbering of mesh a ----------------------------
783  vector<T> dest(mesh_a_rnbr.size()), perm_a, perm_b;
784  for(size_t i=0; i<dest.size(); i++) dest[i] = mesh_a_rnbr[i] % size;
785 
786  interval(perm_a, 0, mesh_a_rnbr.size());
787  binary_sort_copy(dest, perm_a);
788 
789  mesh_a_rnbr_sbuff.resize(dest.size()); mesh_a_snbr_sbuff.resize(dest.size());
790  for(size_t i=0; i<dest.size(); i++) {
791  mesh_a_rnbr_sbuff[i] = mesh_a_rnbr[perm_a[i]];
792  //mesh_a_snbr_sbuff[i] = mesh_a_snbr[perm_a[i]];
793  }
794 
795  commgraph<size_t> grph_a;
796  grph_a.configure(dest, comm);
797  size_t rcv_size = sum(grph_a.rcnt);
798 
799  mesh_a_rnbr_rbuff.resize(rcv_size);
800  mesh_a_snbr_rbuff.resize(rcv_size);
801 
802  MPI_Exchange(grph_a, mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, comm);
803  //MPI_Exchange(grph_a, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff, comm);
804 
805  // communicate reference- and submesh numbering of mesh b ----------------------------
806  dest.resize(mesh_b_rnbr.size());
807  for(size_t i=0; i<dest.size(); i++) dest[i] = mesh_b_rnbr[i] % size;
808 
809  interval(perm_b, 0, mesh_b_rnbr.size());
810  binary_sort_copy(dest, perm_b);
811 
812  mesh_b_rnbr_sbuff.resize(dest.size()); mesh_b_snbr_sbuff.resize(dest.size());
813  for(size_t i=0; i<dest.size(); i++) {
814  mesh_b_rnbr_sbuff[i] = mesh_b_rnbr[perm_b[i]];
815  mesh_b_snbr_sbuff[i] = mesh_b_snbr[perm_b[i]];
816  }
817 
818  commgraph<size_t> grph_b;
819  grph_b.configure(dest, comm);
820  rcv_size = sum(grph_b.rcnt);
821 
822  mesh_b_rnbr_rbuff.resize(rcv_size);
823  mesh_b_snbr_rbuff.resize(rcv_size);
824 
825  MPI_Exchange(grph_b, mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, comm);
826  MPI_Exchange(grph_b, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff, comm);
827 
828  // generate mapping ------------------------------------------------------------------
829  // We do this by first setting up a map between reference index and submesh index for
830  // mesh b. This allows us to check fast if a reference index is in b and do the mapping.
831  hashmap::unordered_map<T,T> ref_to_sub_b;
832  for(size_t i=0; i<mesh_b_rnbr_rbuff.size(); i++) {
833  T ref_idx = mesh_b_rnbr_rbuff[i];
834  T sub_idx = mesh_b_snbr_rbuff[i];
835 
836  if(ref_to_sub_b.count(ref_idx) && ref_to_sub_b[ref_idx] != sub_idx)
837  fprintf(stderr, "inter_domain_mapping error: Missmatching multiple mappings: %d : %d \n",
838  ref_to_sub_b[ref_idx], sub_idx);
839  ref_to_sub_b[ref_idx] = sub_idx;
840  }
841 
842  // Now we either map the reference indices of mesh a to the submesh index of b, or, if
843  // the reference index is not in b, set it to -1
844  for(size_t i=0; i<mesh_a_rnbr_rbuff.size(); i++) {
845  auto it = ref_to_sub_b.find(mesh_a_rnbr_rbuff[i]);
846  if(it != ref_to_sub_b.end())
847  mesh_a_snbr_rbuff[i] = it->second;
848  else
849  mesh_a_snbr_rbuff[i] = -1;
850  }
851 
852  // now we simply communicate back by transposing grph_a
853  grph_a.transpose();
854  MPI_Exchange(grph_a, mesh_a_snbr_rbuff, mesh_a_snbr_sbuff, comm);
855 
856  size_t num_mapped = 0;
857  for(size_t i=0; i<mesh_a_snbr_sbuff.size(); i++)
858  if(mesh_a_snbr_sbuff[i] > -1) num_mapped++;
859 
860  // the submesh indices that could be mapped
861  vector<T> snbr_a(num_mapped), snbr_b(num_mapped);
862  binary_sort_copy(perm_a, mesh_a_snbr_sbuff);
863 
864  for(size_t i=0, idx=0; i<mesh_a_snbr_sbuff.size(); i++) {
865  if(mesh_a_snbr_sbuff[i] > -1) {
866  snbr_a[idx] = mesh_a_snbr[i];
867  snbr_b[idx] = mesh_a_snbr_sbuff[i];
868  idx++;
869  }
870  }
871 
872  // now we can set up the a-to-b mapping
873  a_to_b.assign(snbr_a, snbr_b);
874 }
875 
876 template<class T> inline
877 void insert_surf_tri(T n1, T n2, T n3, size_t eidx,
878  triple<T> & surf,
879  tri_sele<T> & sele,
881 {
882  sele.v1 = n1, sele.v2 = n2, sele.v3 = n3; sele.eidx = eidx;
883  sort_triple(n1, n2, n3, surf.v1, surf.v2, surf.v3);
884 
885  auto it = surfmap.find(surf);
886  if(it != surfmap.end()) surfmap.erase(it);
887  else surfmap[surf] = sele;
888 }
889 
890 template<class T> inline
891 void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx,
892  vector<T> & buff,
893  quadruple<T> & surf,
894  quad_sele<T> & sele,
896 {
897  buff[0] = n1, buff[1] = n2, buff[2] = n3, buff[3] = n4;
898  binary_sort(buff);
899 
900  sele.v1 = n1, sele.v2 = n2, sele.v3 = n3, sele.v4 = n4, sele.eidx = eidx;
901  surf.v1 = buff[0], surf.v2 = buff[1], surf.v3 = buff[2], surf.v4 = buff[3];
902 
903  auto it = surfmap.find(surf);
904  if(it != surfmap.end()) surfmap.erase(it);
905  else surfmap[surf] = sele;
906 }
907 
908 template<class T> inline
909 void insert_surf_tet(const T* nod,
910  const size_t eidx,
912 {
913  // surfaces are (2,3,1) , (1,4,2) , (2,4,3) , (1,3,4)
914  struct triple<T> surf;
915  struct tri_sele<T> sele;
916 
917  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3];
918 
919  insert_surf_tri(n2, n3, n1, eidx, surf, sele, surfmap);
920  insert_surf_tri(n1, n4, n2, eidx, surf, sele, surfmap);
921  insert_surf_tri(n2, n4, n3, eidx, surf, sele, surfmap);
922  insert_surf_tri(n1, n3, n4, eidx, surf, sele, surfmap);
923 }
924 
925 template<class T> inline
926 void insert_surf_pyr(const T* nod, const size_t eidx, vector<T> & buff,
929 {
930  // surfaces are (1,5,2) , (2,5,3) , (3,5,4) , (4,5,1) , (1,2,3,4)
931  triple<T> surf; quadruple<T> qsurf;
932  tri_sele<T> sele; quad_sele<T> qsele;
933 
934  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4];
935 
936  insert_surf_tri(n1, n5, n2, eidx, surf, sele, surfmap);
937  insert_surf_tri(n2, n5, n3, eidx, surf, sele, surfmap);
938  insert_surf_tri(n3, n5, n4, eidx, surf, sele, surfmap);
939  insert_surf_tri(n4, n5, n1, eidx, surf, sele, surfmap);
940  insert_surf_quad(n1, n2, n3, n4, eidx, buff, qsurf, qsele, qsurfmap);
941 }
942 
943 template<class T> inline
944 void insert_surf_pri(const T* nod, const size_t eidx, vector<T> & buff,
947 {
948  struct triple<T> surf;
949  struct quadruple<T> qsurf;
950  struct tri_sele<T> sele;
951  struct quad_sele<T> qsele;
952 
953  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4], n6 = nod[5];
954 
955  // surfaces are (1,2,3) , (4,5,6) , (1,2,6,4) , (2,3,5,6) , (3,1,4,5)
956  insert_surf_tri(n1, n2, n3, eidx, surf, sele, surfmap);
957  insert_surf_tri(n4, n5, n6, eidx, surf, sele, surfmap);
958  insert_surf_quad(n1, n2, n6, n4, eidx, buff, qsurf, qsele, qsurfmap);
959  insert_surf_quad(n2, n3, n5, n6, eidx, buff, qsurf, qsele, qsurfmap);
960  insert_surf_quad(n3, n1, n4, n5, eidx, buff, qsurf, qsele, qsurfmap);
961 }
962 
963 template<class T> inline
964 void insert_surf_hex(const T* nod, const size_t eidx, vector<T> & buff,
966 {
967  // surfaces are (1,2,3,4) , (3,2,8,7) , (4,3,7,6) , (1,4,6,5) , (2,1,5,8), (6,7,8,5)
968  struct quadruple<T> qsurf;
969  struct quad_sele<T> qsele;
970 
971  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3],
972  n5 = nod[4], n6 = nod[5], n7 = nod[6], n8 = nod[7];
973 
974  insert_surf_quad(n1, n2, n3, n4, eidx, buff, qsurf, qsele, surfmap);
975  insert_surf_quad(n3, n2, n8, n7, eidx, buff, qsurf, qsele, surfmap);
976  insert_surf_quad(n4, n3, n7, n6, eidx, buff, qsurf, qsele, surfmap);
977  insert_surf_quad(n1, n4, n6, n5, eidx, buff, qsurf, qsele, surfmap);
978  insert_surf_quad(n2, n1, n5, n8, eidx, buff, qsurf, qsele, surfmap);
979  insert_surf_quad(n6, n7, n8, n5, eidx, buff, qsurf, qsele, surfmap);
980 }
981 
982 template<class T, class S> inline
984  const hashmap::unordered_map<triple<T>, tri_sele<T> > & search_tri,
985  const hashmap::unordered_map<quadruple<T>, quad_sele<T> > & search_quad,
988 {
989  const T* con = mesh.con.data();
990  const T* nbr = mesh.get_numbering(numbering).data();
991 
994 
995  vector<T> nodvec(mesh.con.size()); T* nod = nodvec.data();
996  for(size_t i=0; i<nodvec.size(); i++)
997  nod[i] = nbr[con[i]];
998 
999  const vector<T> & ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
1000  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1001 
1002  for(size_t eidx = 0; eidx < mesh.l_numelem; eidx++) {
1003  tri_surf.clear();
1004  quad_surf.clear();
1005 
1006  switch(mesh.type[eidx]) {
1007  case Tri: {
1008  triple<T> tri;
1009  tri_sele<T> trie;
1010  insert_surf_tri(nod[0], nod[1], nod[2], ref_eidx[eidx], tri, trie, tri_surf);
1011  break;
1012  }
1013 
1014  case Quad: {
1015  quadruple<T> qd;
1016  quad_sele<T> qde;
1017  insert_surf_quad(nod[0], nod[1], nod[2], nod[3],
1018  ref_eidx[eidx], qbuff, qd, qde, quad_surf);
1019  break;
1020  }
1021 
1022  case Tetra:
1023  insert_surf_tet(nod, ref_eidx[eidx], tri_surf);
1024  break;
1025 
1026  case Pyramid:
1027  insert_surf_pyr(nod, ref_eidx[eidx], qbuff, tri_surf, quad_surf);
1028  break;
1029 
1030  case Prism:
1031  insert_surf_pri(nod, ref_eidx[eidx], qbuff, tri_surf, quad_surf);
1032  break;
1033 
1034  case Hexa:
1035  insert_surf_hex(nod, ref_eidx[eidx], qbuff, quad_surf);
1036  break;
1037 
1038  default: break;
1039  }
1040 
1041  for(auto it = tri_surf.begin(); it != tri_surf.end(); ++it) {
1042  auto ft = search_tri.find(it->first);
1043  if(ft != search_tri.end() && !found_tri.count(ft->first)) {
1044  // now we also look for the same orientation
1045  T iv1 = mesh.pl.localize(it->second.v1);
1046  T iv2 = mesh.pl.localize(it->second.v2);
1047  T iv3 = mesh.pl.localize(it->second.v3);
1048  SF::Point ip1 = {mesh.xyz[iv1*3+0], mesh.xyz[iv1*3+1], mesh.xyz[iv1*3+2]};
1049  SF::Point ip2 = {mesh.xyz[iv2*3+0], mesh.xyz[iv2*3+1], mesh.xyz[iv2*3+2]};
1050  SF::Point ip3 = {mesh.xyz[iv3*3+0], mesh.xyz[iv3*3+1], mesh.xyz[iv3*3+2]};
1051  SF::Point e12 = normalize(ip2 - ip1);
1052  SF::Point e13 = normalize(ip3 - ip1);
1053  SF::Point in = normalize(cross(e12, e13));
1054 
1055  T fv1 = mesh.pl.localize(ft->second.v1);
1056  T fv2 = mesh.pl.localize(ft->second.v2);
1057  T fv3 = mesh.pl.localize(ft->second.v3);
1058  SF::Point fp1 = {mesh.xyz[fv1*3+0], mesh.xyz[fv1*3+1], mesh.xyz[fv1*3+2]};
1059  SF::Point fp2 = {mesh.xyz[fv2*3+0], mesh.xyz[fv2*3+1], mesh.xyz[fv2*3+2]};
1060  SF::Point fp3 = {mesh.xyz[fv3*3+0], mesh.xyz[fv3*3+1], mesh.xyz[fv3*3+2]};
1061  e12 = normalize(fp2 - fp1);
1062  e13 = normalize(fp3 - fp1);
1063  SF::Point fn = normalize(cross(e12, e13));
1064 
1065  if (inner_prod(in, fn) > 0.9) {
1066  tri_sele<T> found = ft->second; found.eidx = eidx;
1067  found_tri[ft->first] = found;
1068  }
1069  }
1070  }
1071 
1072  for(auto it = quad_surf.begin(); it != quad_surf.end(); ++it) {
1073  auto ft = search_quad.find(it->first);
1074  if(ft != search_quad.end() && !found_quad.count(ft->first)) {
1075  // now we also look for the same orientation
1076  T iv1 = mesh.pl.localize(it->second.v1);
1077  T iv2 = mesh.pl.localize(it->second.v2);
1078  T iv3 = mesh.pl.localize(it->second.v3);
1079  SF::Point ip1 = {mesh.xyz[iv1*3+0], mesh.xyz[iv1*3+1], mesh.xyz[iv1*3+2]};
1080  SF::Point ip2 = {mesh.xyz[iv2*3+0], mesh.xyz[iv2*3+1], mesh.xyz[iv2*3+2]};
1081  SF::Point ip3 = {mesh.xyz[iv3*3+0], mesh.xyz[iv3*3+1], mesh.xyz[iv3*3+2]};
1082  SF::Point e12 = normalize(ip2 - ip1);
1083  SF::Point e13 = normalize(ip3 - ip1);
1084  SF::Point in = normalize(cross(e12, e13));
1085 
1086  T fv1 = mesh.pl.localize(ft->second.v1);
1087  T fv2 = mesh.pl.localize(ft->second.v2);
1088  T fv3 = mesh.pl.localize(ft->second.v3);
1089  SF::Point fp1 = {mesh.xyz[fv1*3+0], mesh.xyz[fv1*3+1], mesh.xyz[fv1*3+2]};
1090  SF::Point fp2 = {mesh.xyz[fv2*3+0], mesh.xyz[fv2*3+1], mesh.xyz[fv2*3+2]};
1091  SF::Point fp3 = {mesh.xyz[fv3*3+0], mesh.xyz[fv3*3+1], mesh.xyz[fv3*3+2]};
1092  e12 = normalize(fp2 - fp1);
1093  e13 = normalize(fp3 - fp1);
1094  SF::Point fn = normalize(cross(e12, e13));
1095 
1096  if (inner_prod(in, fn) > 0.9) {
1097  quad_sele<T> found = ft->second; found.eidx = eidx;
1098  found_quad[ft->first] = found;
1099  }
1100  }
1101  }
1102 
1103  nod += mesh.dsp[eidx+1] - mesh.dsp[eidx];
1104  }
1105 }
1106 
1107 template<class T, class S> inline
1109  const hashmap::unordered_set<T> & tags,
1112 {
1113  const T* con = mesh.con.data();
1114  const T* nbr = mesh.get_numbering(numbering).data();
1115 
1116  bool have_tags = tags.size() > 0;
1117 
1118  vector<T> nodvec(mesh.con.size()); T* nod = nodvec.data();
1119  for(size_t i=0; i<nodvec.size(); i++)
1120  nod[i] = nbr[con[i]];
1121 
1122  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1123 
1124  for(size_t eidx = 0; eidx < mesh.l_numelem; eidx++) {
1125  if(have_tags == false || tags.count(mesh.tag[eidx])) {
1126  switch(mesh.type[eidx]) {
1127  case Tri: {
1128  triple<T> tri;
1129  tri_sele<T> trie;
1130  insert_surf_tri(nod[0], nod[1], nod[2], eidx, tri, trie, tri_surf);
1131  break;
1132  }
1133 
1134  case Quad: {
1135  quadruple<T> qd;
1136  quad_sele<T> qde;
1137  insert_surf_quad(nod[0], nod[1], nod[2], nod[3],
1138  eidx, qbuff, qd, qde, quad_surf);
1139  break;
1140  }
1141 
1142  case Tetra:
1143  insert_surf_tet(nod, eidx, tri_surf);
1144  break;
1145 
1146  case Pyramid:
1147  insert_surf_pyr(nod, eidx, qbuff, tri_surf, quad_surf);
1148  break;
1149 
1150  case Prism:
1151  insert_surf_pri(nod, eidx, qbuff, tri_surf, quad_surf);
1152  break;
1153 
1154  case Hexa:
1155  insert_surf_hex(nod, eidx, qbuff, quad_surf);
1156  break;
1157 
1158  default: break;
1159  }
1160  }
1161 
1162  nod += mesh.dsp[eidx+1] - mesh.dsp[eidx];
1163  }
1164 }
1165 
1166 template<class T, class S> inline
1170 {
1172  compute_surface(mesh, numbering, tags, tri_surf, quad_surf);
1173 }
1174 
1175 template<class V> inline
1176 void get_hashmap_duplicates(const vector<V> & data, const MPI_Comm comm,
1177  vector<bool> & is_dup)
1178 {
1179  int size, rank;
1180  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1181 
1182  size_t dsize = data.size();
1183  is_dup.assign(dsize, false);
1184 
1185  vector<int> perm, dest(dsize);
1186 
1187  // set up destinations
1188  for(size_t i=0; i<dsize; i++)
1189  dest[i] = hashmap::hash_ops<V>::hash(data[i]) % size;
1190 
1191  commgraph<size_t> grph;
1192  grph.configure(dest, comm);
1193  size_t nrecv = sum(grph.rcnt);
1194 
1195  interval(perm, 0, dsize);
1196  binary_sort_copy(dest, perm);
1197 
1198  // fill send buffer and communicate
1199  vector<V> sbuff(dsize), rbuff(nrecv);
1200  for(size_t i=0; i<dsize; i++)
1201  sbuff[i] = data[perm[i]];
1202  MPI_Exchange(grph, sbuff, rbuff, comm);
1203 
1204  // now check duplicates. values occurring multiple times should go into dup, those occurring
1205  // only once go into not_dup
1206  hashmap::unordered_set<V> not_dup, dup;
1207  vector<bool> cdup(rbuff.size());
1208 
1209  for(const V & val : rbuff) {
1210  if(not_dup.count(val) == 0 && dup.count(val) == 0)
1211  not_dup.insert(val);
1212  else {
1213  not_dup.erase(val);
1214  dup.insert(val);
1215  }
1216  }
1217 
1218  for(size_t i=0; i<nrecv; i++) {
1219  V val = rbuff[i];
1220  bool d = dup.count(val);
1221 
1222  // just a debug check
1223  assert(d == (not_dup.count(val) == 0));
1224 
1225  cdup[i] = d;
1226  }
1227 
1228  // now we communicate cdup back
1229  grph.transpose();
1230  MPI_Exchange(grph, cdup, is_dup, comm);
1231 
1232  // permute is_dup back to input permutation
1233  binary_sort_copy(perm, is_dup);
1234 }
1235 
1237 template<class K, class V> inline
1238 void remove_duplicates(hashmap::unordered_map<K,V> & map, const MPI_Comm comm)
1239 {
1240  vector<K> check_vec (map.size());
1241  vector<bool> is_dup (map.size());
1242 
1243  size_t idx=0;
1244  for(const auto & v : map) check_vec[idx++] = v.first;
1245 
1246  get_hashmap_duplicates(check_vec, comm, is_dup);
1247 
1248  for(size_t i=0; i<is_dup.size(); i++)
1249  if(is_dup[i])
1250  map.erase(check_vec[i]);
1251 }
1253 template<class K> inline
1254 void remove_duplicates(hashmap::unordered_set<K> & set, const MPI_Comm comm)
1255 {
1256  vector<K> check_vec (set.size());
1257  vector<bool> is_dup (set.size());
1258 
1259  check_vec.assign(set.begin(), set.end());
1260  get_hashmap_duplicates(check_vec, comm, is_dup);
1261 
1262  for(size_t i=0; i<is_dup.size(); i++)
1263  if(is_dup[i])
1264  set.erase(check_vec[i]);
1265 }
1266 
1267 template<class T> inline
1270  MPI_Comm comm)
1271 {
1272  long int g_num_tri = tri_surf.size();
1273  long int g_num_quad = quad_surf.size();
1274  MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1275  MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1276 
1277  if(g_num_tri)
1278  remove_duplicates(tri_surf, comm);
1279  if(g_num_quad)
1280  remove_duplicates(quad_surf, comm);
1281 }
1282 
1283 
1284 template<class T, class S> inline
1287  meshdata<T,S> & surfmesh,
1288  vector<T> & elem_orig)
1289 {
1290  MPI_Comm comm = surfmesh.comm;
1291 
1292  long int g_num_tri = tri_surf.size();
1293  long int g_num_quad = quad_surf.size();
1294  MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1295  MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1296 
1297  surfmesh.g_numelem = g_num_tri + g_num_quad;
1298  surfmesh.l_numelem = tri_surf.size() + quad_surf.size();
1299 
1300  vector<T> cnt(surfmesh.l_numelem);
1301  surfmesh.type.resize(surfmesh.l_numelem);
1302  surfmesh.con.resize(tri_surf.size() * 3 + quad_surf.size() * 4);
1303  elem_orig.resize(surfmesh.l_numelem);
1304 
1305  size_t idx = 0, cidx = 0;
1306  for(const auto & v : tri_surf) {
1307  cnt[idx] = 3;
1308  surfmesh.type[idx] = Tri;
1309  surfmesh.con[cidx + 0] = v.second.v1;
1310  surfmesh.con[cidx + 1] = v.second.v2;
1311  surfmesh.con[cidx + 2] = v.second.v3;
1312 
1313  elem_orig[idx] = v.second.eidx;
1314 
1315  idx += 1;
1316  cidx += 3;
1317  }
1318 
1319  for(const auto & v : quad_surf) {
1320  cnt[idx] = 4;
1321  surfmesh.type[idx] = Quad;
1322  surfmesh.con[cidx + 0] = v.second.v1;
1323  surfmesh.con[cidx + 1] = v.second.v2;
1324  surfmesh.con[cidx + 2] = v.second.v3;
1325  surfmesh.con[cidx + 3] = v.second.v4;
1326 
1327  elem_orig[idx] = v.second.eidx;
1328 
1329  idx += 1;
1330  cidx += 4;
1331  }
1332 
1333  dsp_from_cnt(cnt, surfmesh.dsp);
1334 }
1335 
1340 template<class T, class S> inline
1341 void convert_mesh_surface(const meshdata<T,S> & surfmesh,
1344 {
1345  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1346  quadruple<T> qd; quad_sele<T> qde;
1347  triple<T> tri; tri_sele<T> trie;
1348 
1349  for(size_t eidx=0; eidx<surfmesh.l_numelem; eidx++) {
1350  int esize = surfmesh.dsp[eidx+1] - surfmesh.dsp[eidx];
1351 
1352  const T* nod = surfmesh.con.data() + surfmesh.dsp[eidx];
1353  if(esize == 4) {
1354  insert_surf_quad(nod[0], nod[1], nod[2], nod[3], eidx, qbuff, qd, qde, quad_surf);
1355  }
1356  else {
1357  insert_surf_tri(nod[0], nod[1], nod[2], eidx, tri, trie, tri_surf);
1358  }
1359  }
1360 }
1361 
1372 template<class T, class S> inline
1374  const hashmap::unordered_set<T> & tags,
1375  meshdata<T,S> & surfmesh)
1376 {
1379  vector<T> elem_orig; // local element indices of "mesh"
1380 
1381  compute_surface(mesh, numbering, tags, tri_surf, quad_surf);
1382 
1383  surfmesh.comm = mesh.comm;
1384  remove_parallel_duplicates(tri_surf, quad_surf, surfmesh.comm);
1385  convert_surface_mesh(tri_surf, quad_surf, surfmesh, elem_orig);
1386 
1387  surfmesh.tag.resize(size_t(surfmesh.l_numelem));
1388  surfmesh.fib.resize(size_t(surfmesh.l_numelem * 3));
1389 
1390  if(mesh.she.size())
1391  surfmesh.she.resize(surfmesh.l_numelem * 3);
1392 
1393  for(size_t eidx = 0; eidx < elem_orig.size(); eidx++) {
1394  T orig = elem_orig[eidx];
1395 
1396  surfmesh.tag[eidx] = mesh.tag[orig];
1397  surfmesh.fib[eidx*3+0] = mesh.fib[orig*3+0];
1398  surfmesh.fib[eidx*3+1] = mesh.fib[orig*3+1];
1399  surfmesh.fib[eidx*3+2] = mesh.fib[orig*3+2];
1400 
1401  if(mesh.she.size()) {
1402  surfmesh.she[eidx*3+0] = mesh.she[orig*3+0];
1403  surfmesh.she[eidx*3+1] = mesh.she[orig*3+1];
1404  surfmesh.she[eidx*3+2] = mesh.she[orig*3+2];
1405  }
1406  }
1407 }
1408 
1418 template<class T, class S> inline
1420  meshdata<T,S> & surfmesh)
1421 {
1423  compute_surface_mesh(mesh, numbering, tags, surfmesh);
1424 }
1425 
1426 template<class T, class S> inline
1428  meshdata<T,S> & surface,
1429  std::string filename)
1430 {
1431  int rank; MPI_Comm_rank(mesh.comm, &rank);
1432  FILE *fd = NULL;
1433 
1434  size_t numele = 0;
1435  size_t numcon = 0;
1436  bool twoFib = false;
1437  bool read_bin = false;
1438 
1439  if(rank == 0)
1440  {
1441  fd = fopen(filename.c_str(), "r");
1442  if(fd)
1443  SF::read_headers(fd, NULL, read_bin, numele, twoFib);
1444  }
1445  MPI_Bcast(&numele, sizeof(size_t), MPI_BYTE, 0, mesh.comm);
1446  surface.g_numelem = numele;
1447 
1448  // We read the whole surface at once, this could be rewritten into multiple reads to
1449  // save memory
1450  if(rank == 0) {
1451  SF::read_elem_block(fd, read_bin, 0, numele, surface);
1452 
1453  numcon = surface.con.size();
1454 
1455  if(numele != surface.l_numelem) {
1456  std::cerr << "Error: Incomplete surface file! Aborting!" << std::endl;
1457  fclose(fd);
1458  exit(1);
1459  }
1460  }
1461 
1462  surface.l_numelem = numele;
1463 
1464  surface.tag.resize(surface.l_numelem);
1465  MPI_Bcast(surface.tag.data(), surface.tag.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1466 
1467  surface.dsp.resize(surface.l_numelem + 1);
1468  MPI_Bcast(surface.dsp.data(), surface.dsp.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1469 
1470  surface.type.resize(surface.l_numelem);
1471  MPI_Bcast(surface.type.data(), surface.type.size()*sizeof(elem_t), MPI_BYTE, 0, mesh.comm);
1472 
1473  vector<T> & ref_eidx = surface.register_numbering(NBR_ELEM_REF);
1474  ref_eidx.resize(surface.l_numelem);
1475 
1476  MPI_Bcast(ref_eidx.data(), surface.l_numelem*sizeof(T), MPI_BYTE, 0, mesh.comm);
1477 
1478  MPI_Bcast(&numcon, sizeof(size_t), MPI_BYTE, 0, mesh.comm);
1479  surface.con.resize(numcon);
1480  MPI_Bcast(surface.con.data(), surface.con.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1481 
1482  // now we have to restrict the surface elements to those in the local partition
1483  {
1484  size_t initial_gnumelem = surface.g_numelem;
1485 
1486  hashmap::unordered_map<triple<T>, tri_sele<T>> search_tri, found_tri;
1487  hashmap::unordered_map<quadruple<T>, quad_sele<T>> search_quad, found_quad;
1488 
1489  convert_mesh_surface(surface, search_tri, search_quad);
1490  search_for_surface (mesh, SF::NBR_REF, search_tri, search_quad, found_tri, found_quad);
1491 
1492  vector<T> elem_orig;
1493  convert_surface_mesh(found_tri, found_quad, surface, elem_orig);
1494  mesh.pl.localize(surface.con);
1495 
1496  surface.tag.resize(size_t(surface.l_numelem));
1497  surface.fib.resize(size_t(surface.l_numelem * 3));
1498 
1499  if(mesh.she.size())
1500  surface.she.resize(surface.l_numelem * 3);
1501 
1502  for(size_t eidx = 0; eidx < elem_orig.size(); eidx++) {
1503  T orig = elem_orig[eidx];
1504 
1505  surface.tag[eidx] = mesh.tag[orig];
1506  surface.fib[eidx*3+0] = mesh.fib[orig*3+0];
1507  surface.fib[eidx*3+1] = mesh.fib[orig*3+1];
1508  surface.fib[eidx*3+2] = mesh.fib[orig*3+2];
1509 
1510  if(mesh.she.size()) {
1511  surface.she[eidx*3+0] = mesh.she[orig*3+0];
1512  surface.she[eidx*3+1] = mesh.she[orig*3+1];
1513  surface.she[eidx*3+2] = mesh.she[orig*3+2];
1514  }
1515  }
1516 
1517  // we want to remember to original gnumelems, so that the next consistency check is
1518  // actually useful.
1519  surface.g_numelem = initial_gnumelem;
1520  }
1521 
1522  {
1523  long int numele_check = surface.l_numelem;
1524  MPI_Allreduce(MPI_IN_PLACE, &numele_check, 1, MPI_LONG, MPI_SUM, mesh.comm);
1525  if(numele_check != surface.g_numelem) {
1526  if(rank == 0)
1527  fprintf(stderr, "ERROR: Bad partitioning of surface %s!"
1528  " Global elem sum should be %ld, but is %ld!\n\n",
1529  filename.c_str(), surface.g_numelem, numele_check);
1530  }
1531  }
1532 
1533  // copy indexing from parent mesh
1534  SF::vector<T> & nod_ref = surface.register_numbering(SF::NBR_REF);
1535  SF::vector<T> & nod_petsc = surface.register_numbering(SF::NBR_PETSC);
1536  nod_ref = mesh.get_numbering(SF::NBR_REF);
1537  nod_petsc = mesh.get_numbering(SF::NBR_PETSC);
1538 
1539  // copy coords from parent mesh
1540  surface.l_numpts = mesh.l_numpts;
1541  surface.g_numpts = mesh.g_numpts;
1542  surface.xyz = mesh.xyz;
1543 }
1544 
1545 template<class T>
1547 {
1548  size_t widx = 0;
1549 
1550  for(size_t i=0; i<v.size(); i++)
1551  if(set.count(v[i]))
1552  v[widx++] = v[i];
1553 
1554  v.resize(widx);
1555 }
1556 template<class T>
1557 inline void restrict_to_set(vector<T> & v, const vector<T> & vset)
1558 {
1560  set.insert(vset.begin(), vset.end());
1561 
1562  restrict_to_set(v, set);
1563 }
1564 
1565 }
1566 
1567 #endif
Basic containers.
void sort_triple(const T in1, const T in2, const T in3, T &out1, T &out2, T &out3)
sort the "in" triple into the "out" triple
Definition: SF_container.h:881
Functions related to mesh IO.
Functions related to network communication.
Classes related to mesh node renumbering.
Classes and algorithms related to the layout of distributed meshes.
Various sorting algorithms.
The vector class and related algorithms.
Class used to plot functions on the terminal.
The class holds the communication graph for a MPI_Exchange() call.
Definition: SF_container.h:626
vector< T > rcnt
Number of elements received from each rank.
Definition: SF_container.h:630
vector< T > scnt
Number of elements sent to each rank.
Definition: SF_container.h:628
void resize(size_t size)
Resize all vectors to size.
Definition: SF_container.h:634
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
Definition: SF_container.h:667
vector< T > sdsp
Displacements w.r.t. scnt.
Definition: SF_container.h:629
vector< T > rdsp
Displacements w.r.t. rcnt.
Definition: SF_container.h:631
void transpose()
transpose comm graph (receive becomes send, and vice versa)
Definition: SF_container.h:651
void scale(V fac)
scale comm graph layout data
Definition: SF_container.h:641
Index mapping class. This is a bijective mapping.
Definition: SF_container.h:208
void assign(const vector< T > &a, const vector< T > &b)
Set up the index mapping between a and b.
Definition: SF_container.h:235
The mesh storage class. It contains both element and vertex data.
Definition: SF_container.h:384
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:417
size_t g_numpts
global number of points
Definition: SF_container.h:388
vector< T > dsp
connectivity starting index of each element
Definition: SF_container.h:404
vector< S > she
sheet direction
Definition: SF_container.h:409
vector< S > fib
fiber direction
Definition: SF_container.h:408
size_t l_numelem
local number of elements
Definition: SF_container.h:387
vector< elem_t > type
element type
Definition: SF_container.h:406
std::map< SF_nbr, vector< T > > nbr
container for different numberings
Definition: SF_container.h:414
std::string name
the mesh name
Definition: SF_container.h:395
vector< T > & register_numbering(SF_nbr nbr_type)
Register a new numbering to the mesh and return the associated index vector.
Definition: SF_container.h:432
vector< T > con
Definition: SF_container.h:400
vector< S > xyz
node cooridnates
Definition: SF_container.h:415
size_t l_numpts
local number of points
Definition: SF_container.h:389
size_t g_numelem
global number of elements
Definition: SF_container.h:386
void localize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
Definition: SF_container.h:484
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:392
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:452
vector< T > tag
element tag
Definition: SF_container.h:405
hashmap::unordered_set< int > extr_tag
the element tags based on which the mesh has been extracted
Definition: SF_container.h:412
The abstract numbering class.
Definition: SF_numbering.h:48
Functor class applying a submesh renumbering.
Definition: SF_numbering.h:68
A vector storing arbitrary data.
Definition: SF_vector.h:43
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
const T * end() const
Pointer to the vector's end.
Definition: SF_vector.h:128
void assign(InputIterator s, InputIterator e)
Assign a memory range.
Definition: SF_vector.h:161
const T * begin() const
Pointer to the vector's start.
Definition: SF_vector.h:116
T * data()
Pointer to the vector's start.
Definition: SF_vector.h:91
iterator find(const K &key)
Search for key. Return iterator.
Definition: hashmap.hpp:593
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
Definition: hashmap.hpp:687
hm_int erase(const K &key)
Erase by key.
Definition: hashmap.hpp:565
Custom unordered_set implementation.
Definition: hashmap.hpp:706
size_t size() const
Definition: hashmap.hpp:1066
hm_int erase(const K &key)
Definition: hashmap.hpp:978
hm_int count(const K &key) const
Definition: hashmap.hpp:992
void insert(InputIterator first, InputIterator last)
Definition: hashmap.hpp:962
Ascii matrix graph plotter.
void print(const VEC &cnt, const VEC &col, char s)
Print a matrix graph to stdout.
Definition: dense_mat.hpp:34
void extract_tagbased(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh based on element tags.
void cnt_from_dsp(const vector< T > &dsp, vector< T > &cnt)
Compute counts from displacements.
Definition: SF_vector.h:319
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.
Definition: SF_vector.h:310
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void print_DD_info(const meshdata< T, S > &mesh)
Print some basic information on the domain decomposition of a mesh.
void get_hashmap_duplicates(const vector< V > &data, const MPI_Comm comm, vector< bool > &is_dup)
void write_pts_block(FILE *&fd, bool write_binary, const vector< S > &xyz)
Write a chunk of points to a file.
Definition: SF_mesh_io.h:817
void read_surface_mesh(const meshdata< T, S > &mesh, meshdata< T, S > &surface, std::string filename)
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
Definition: SF_vector.h:350
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
void unique_accumulate(vector< T > &_P, vector< S > &_A)
Definition: SF_sort.h:409
void convert_surface_mesh(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, meshdata< T, S > &surfmesh, vector< T > &elem_orig)
void rebalance_mesh(meshdata< T, S > &mesh)
Rebalance the parallel distribution of a mesh, if a local size is 0.
void read_headers(FILE *ele_fd, FILE *fib_fd, bool read_binary, size_t &numelem, bool &twoFib)
Read the header from the element and fiber files.
Definition: SF_mesh_io.h:90
void extract_mesh(const vector< bool > &keep, const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh from a given mesh.
void search_for_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_map< triple< T >, tri_sele< T > > &search_tri, const hashmap::unordered_map< quadruple< T >, quad_sele< T > > &search_quad, hashmap::unordered_map< triple< T >, tri_sele< T > > &found_tri, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &found_quad)
void permute_mesh(const meshdata< T, S > &inmesh, meshdata< T, S > &outmesh, const vector< T > &perm)
Permute the element data of a mesh based on a given permutation.
Definition: SF_mesh_utils.h:56
double inner_prod(const Point &a, const Point &b)
Definition: SF_container.h:90
void sort_parallel(MPI_Comm comm, const vector< T > &idx, vector< T > &out_idx)
Sort index values parallel ascending across the ranks.
void write_elements(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write the element data (elements and fibers) of a CARP mesh.
Definition: SF_mesh_io.h:674
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
Definition: SF_sort.h:296
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
Definition: SF_vector.h:340
void insert_surf_pyr(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
Definition: SF_vector.h:332
void write_pts_header(FILE *&pts_fd, bool binary, size_t numpts)
Write the header of the points file.
Definition: SF_mesh_io.h:183
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
Definition: SF_network.h:126
void read_elem_block(FILE *&fd, bool read_binary, size_t bstart, size_t bsize, meshdata< T, S > &mesh)
Read a block of size bsize from an CARP element file.
Definition: SF_mesh_io.h:210
void insert_surf_hex(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
Point normalize(const Point &vect)
Definition: SF_container.h:116
void gather_mesh(const meshdata< T, S > &locmesh, meshdata< T, S > &globmesh)
Gather a mesh on rank 0.
void restrict_to_set(vector< T > &v, const hashmap::unordered_set< T > &set)
void compute_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, hashmap::unordered_map< triple< T >, tri_sele< T > > &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf)
void global_to_local(const vector< T > &glob, vector< T > &data, bool sortedData, bool doWarn)
Definition: SF_sort.h:540
void insert_surf_pri(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void vec_assign(S *lhs, const V *rhs, size_t size)
Assign the values in rhs to lhs. The data-type of rhs is cast to the type of lhs.
Definition: SF_vector.h:371
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
Definition: SF_network.h:47
void insert_surf_tri(T n1, T n2, T n3, size_t eidx, triple< T > &surf, tri_sele< T > &sele, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
void remove_parallel_duplicates(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, MPI_Comm comm)
void convert_mesh_surface(const meshdata< T, S > &surfmesh, hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T >> &quad_surf)
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
Definition: SF_network.h:156
Point cross(const Point &a, const Point &b)
cross product
Definition: SF_container.h:84
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
elem_t
element type enum
Definition: SF_container.h:53
@ Tri
Definition: SF_container.h:60
@ Prism
Definition: SF_container.h:58
@ Pyramid
Definition: SF_container.h:57
@ Tetra
Definition: SF_container.h:54
@ Quad
Definition: SF_container.h:59
@ Hexa
Definition: SF_container.h:55
void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx, vector< T > &buff, quadruple< T > &surf, quad_sele< T > &sele, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_mesh(meshdata< T, S > &mesh, vector< T > &part)
Redistribute both element and vertex data of a mesh.
void print_mesh_graph(meshdata< T, S > &mesh)
One-by-one each process prints the graph of a given mesh.
void inter_domain_mapping(const meshdata< T, S > &mesh_a, const meshdata< T, S > &mesh_b, const SF_nbr snbr, index_mapping< T > &a_to_b)
Submesh index mapping between different domains/meshes.
void nodal_connectivity_graph(const meshdata< T, S > &mesh, vector< T > &n2n_cnt, vector< T > &n2n_con)
Compute the node-to-node connectivity.
Definition: SF_container.h:571
void extract_myocardium(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract the myocardium submesh.
SF_nbr
Enumeration encoding the different supported numberings.
Definition: SF_container.h:188
@ NBR_PETSC
PETSc numbering of nodes.
Definition: SF_container.h:191
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:192
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:189
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:190
void remove_duplicates(hashmap::unordered_map< K, V > &map, const MPI_Comm comm)
remove parallel duplicates from a hashmap::unordered_map
void insert_surf_tet(const T *nod, const size_t eidx, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
Point and vector struct.
Definition: SF_container.h:65
Base hashing class.
Definition: hashmap.hpp:88