openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
SF_mesh_utils.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
27 #ifndef _SF_MESH_UTILS_H
28 #define _SF_MESH_UTILS_H
29 
30 #include <algorithm>
31 #include <iostream>
32 #include <string>
33 
34 #include "asciiPlotter.hpp"
35 
36 #include "SF_container.h"
37 #include "SF_network.h"
38 #include "SF_parallel_layout.h"
39 #include "SF_numbering.h"
40 #include "SF_sort.h"
41 #include "SF_mesh_io.h"
42 #include "SF_vector.h"
43 
44 namespace SF {
45 
46 
55 template<class T, class S>
56 inline void permute_mesh(const meshdata<T, S> & inmesh, meshdata<T, S> & outmesh, const vector<T> & perm)
57 {
58  assert(perm.size() == inmesh.l_numelem);
59 
60  outmesh.comm = inmesh.comm;
61 
62  outmesh.g_numelem = inmesh.g_numelem;
63  outmesh.l_numelem = inmesh.l_numelem;
64 
65  // copy numberings
66  outmesh.nbr = inmesh.nbr;
67 
68  const vector<T> & ref_eidx_in = inmesh.get_numbering(NBR_ELEM_REF);
69  vector<T> & ref_eidx_out = outmesh.get_numbering(NBR_ELEM_REF);
70 
71  size_t numelem = outmesh.l_numelem, numcon = inmesh.con.size();
72  int nFib = 0;
73  if(inmesh.fib.size() > 0) {
74  nFib = 1;
75  if(inmesh.she.size() == inmesh.fib.size())
76  nFib = 2;
77  }
78 
79  outmesh.dsp .resize(numelem+1);
80  outmesh.tag .resize(numelem);
81  outmesh.type.resize(numelem);
82  if(nFib>=1)
83  outmesh.fib .resize(numelem*3);
84  if(nFib==2)
85  outmesh.she .resize(numelem*3);
86 
87  outmesh.con.resize(numcon);
88 
89  vector<T> cnt(numelem);
90  T* elem = outmesh.con.data();
91 
92  for(size_t i=0; i<numelem; i++) {
93  outmesh.tag[i] = inmesh.tag[perm[i]];
94  ref_eidx_out[i] = ref_eidx_in[perm[i]];
95  outmesh.type[i] = inmesh.type[perm[i]];
96 
97  if(nFib>=1) {
98  outmesh.fib[i*3+0] = inmesh.fib[perm[i]*3+0];
99  outmesh.fib[i*3+1] = inmesh.fib[perm[i]*3+1];
100  outmesh.fib[i*3+2] = inmesh.fib[perm[i]*3+2];
101  }
102 
103  if(nFib==2) {
104  outmesh.she[i*3+0] = inmesh.she[perm[i]*3+0];
105  outmesh.she[i*3+1] = inmesh.she[perm[i]*3+1];
106  outmesh.she[i*3+2] = inmesh.she[perm[i]*3+2];
107  }
108 
109  int esize = inmesh.dsp[perm[i]+1] - inmesh.dsp[perm[i]];
110  cnt[i] = esize;
111 
112  T estart = inmesh.dsp[perm[i]];
113  for(int j=0; j<esize; j++) elem[j] = inmesh.con[estart+j];
114  elem += esize;
115  }
116 
117  dsp_from_cnt(cnt, outmesh.dsp);
118 
119  // check consistency
120  assert(inmesh.dsp[inmesh.l_numelem] == outmesh.dsp[outmesh.l_numelem]);
121 }
122 
132 template<class T, class S>
133 inline void redistribute_elements(meshdata<T, S> & mesh, meshdata<T, S> & sendbuff, vector<T> & part)
134 {
135  MPI_Comm comm = mesh.comm;
136 
137  int size, rank;
138  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
139 
140  // first, we reorder the elements into contiguous blocks so we can
141  // communicate them more easily
142  vector<T> perm;
143  interval(perm, 0, part.size());
144  binary_sort_copy(part, perm);
145 
146  permute_mesh(mesh, sendbuff, perm);
147 
148  // now we set up the communication graphs (i.e. cnt and dsp for sending and receiving)
149  commgraph<size_t> elem_grph, con_grph;
150  // configure element exchange with the partitioning information.
151  elem_grph.configure(part, comm);
152 
153  // configure connectivity exchange based on element exchange
154  con_grph.resize(size);
155  con_grph.sdsp[0] = 0;
156  for(int i=0; i<size; i++) con_grph.sdsp[i+1] = sendbuff.dsp[elem_grph.sdsp[i+1]];
157  cnt_from_dsp(con_grph.sdsp, con_grph.scnt);
158  MPI_Alltoall(con_grph.scnt.data(), sizeof(size_t), MPI_BYTE, con_grph.rcnt.data(), sizeof(size_t), MPI_BYTE, comm);
159  dsp_from_cnt(con_grph.rcnt, con_grph.rdsp);
160 
161  int nFib=0;
162  if((sendbuff.l_numelem > 0) && (sendbuff.fib.size() == sendbuff.l_numelem*3))
163  nFib = 1;
164  if((sendbuff.l_numelem > 0) && (sendbuff.she.size() == sendbuff.l_numelem*3))
165  nFib = 2;
166  MPI_Allreduce(MPI_IN_PLACE, &nFib, 1, MPI_INT, MPI_MAX, comm);
167 
168  // resize meshdata to fit received data
169  vector<T> & ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
170  vector<T> & ref_eidx_sbuff = sendbuff.get_numbering(NBR_ELEM_REF);
171 
172  size_t recv_size = sum(elem_grph.rcnt);
173  mesh.l_numelem = recv_size;
174  mesh.dsp .resize(recv_size+1);
175  mesh.tag .resize(recv_size);
176  ref_eidx.resize(recv_size);
177  mesh.type.resize(recv_size);
178  if(nFib>=1)
179  mesh.fib .resize(recv_size*3);
180  if(nFib==2)
181  mesh.she .resize(recv_size*3);
182 
183  // we need auxiliary mesh counts
184  vector<T> sendmesh_cnt(sendbuff.l_numelem), mesh_cnt(mesh.l_numelem);
185  cnt_from_dsp(sendbuff.dsp, sendmesh_cnt);
186 
187  MPI_Exchange(elem_grph, sendmesh_cnt, mesh_cnt, comm);
188  dsp_from_cnt(mesh_cnt, mesh.dsp);
189 
190  MPI_Exchange(elem_grph, sendbuff.tag, mesh.tag, comm);
191  MPI_Exchange(elem_grph, ref_eidx_sbuff, ref_eidx, comm);
192  MPI_Exchange(elem_grph, sendbuff.type, mesh.type, comm);
193 
194  elem_grph.scale(3); // fiber data has three values per element
195  if(nFib >= 1)
196  MPI_Exchange(elem_grph, sendbuff.fib, mesh.fib, comm);
197  if(nFib >= 2)
198  MPI_Exchange(elem_grph, sendbuff.she, mesh.she, comm);
199 
200  // the only thing left is to communicate connectivity
201  {
202  // we map the sendbuff connectivity to global reference indexing
203  vector<T> & rnod = sendbuff.get_numbering(NBR_REF);
204  for(size_t i=0; i<sendbuff.con.size(); i++) sendbuff.con[i] = rnod[sendbuff.con[i]];
205  }
206  recv_size = sum(con_grph.rcnt);
207  mesh.con.resize(recv_size);
208  MPI_Exchange(con_grph, sendbuff.con, mesh.con, comm);
209 
210  mesh.localize(NBR_REF);
211 }
219 template<class T, class S>
220 inline void redistribute_elements(meshdata<T, S> & mesh, vector<T> & part)
221 {
222  meshdata<T, S> sendbuff;
223  redistribute_elements(mesh, sendbuff, part);
224 }
225 
235 template<class T, class S>
236 inline void redistribute_mesh(meshdata<T, S> & mesh, vector<T> & part)
237 {
238  // the main idea is to compute the unique node set of the element block we send to
239  // each process. Therefore we know which coordinates to communicate.
240 
241  meshdata<T, S> sendmesh;
242  redistribute_elements(mesh, sendmesh, part);
243  sendmesh.xyz.assign(mesh.xyz.begin(), mesh.xyz.end());
244  // part and sendmesh have been sorted in redistribute_elements.
245 
246  MPI_Comm comm = mesh.comm;
247  int size, rank;
248  MPI_Comm_size(comm, &size);
249  MPI_Comm_rank(comm, &rank);
250 
251  // we use commgraphs to store the layouts of the elements, connectivities and nodes
252  commgraph<T> elem_layout, con_layout, nod_layout;
253  elem_layout.resize(size);
254  con_layout.resize(size);
255  nod_layout.resize(size);
256 
257  // compute layout of the elements
258  count(part, elem_layout.scnt);
259  dsp_from_cnt(elem_layout.scnt, elem_layout.sdsp);
260 
261  // compute layout of the connectivities using the element layout
262  con_layout.scnt.zero();
263  for(int pid=0; pid<size; pid++)
264  for(T i=elem_layout.sdsp[pid]; i<elem_layout.sdsp[pid+1]; i++)
265  con_layout.scnt[pid] += sendmesh.dsp[i+1] - sendmesh.dsp[i];
266  dsp_from_cnt(con_layout.scnt, con_layout.sdsp);
267 
268  // finally compute the nodes we need to send to each rank and their layout
269  vector<T> nod_sbuff(sendmesh.con.size());
270  nod_layout.sdsp[0] = 0; // initialize first entry of displacements
271 
272  for(int pid=0; pid<size; pid++)
273  {
274  T con_start = con_layout.sdsp[pid], con_end = con_layout.sdsp[pid+1];
275  vector<T> nod(con_end - con_start);
276  // copy connectivity block
277  vec_assign(nod.data(), sendmesh.con.data() + con_start, nod.size());
278  // get unique node set for block
279  binary_sort(nod); unique_resize(nod);
280 
281  // add block size to layout
282  nod_layout.scnt[pid] = nod.size();
283  nod_layout.sdsp[pid+1] = nod_layout.sdsp[pid] + nod_layout.scnt[pid];
284  // copy node set to send buffer
285  vec_assign(nod_sbuff.data() + nod_layout.sdsp[pid], nod.data(), nod.size());
286  }
287  nod_sbuff.resize(nod_layout.sdsp[nod_layout.sdsp.size()-1]);
288  // finish up nod_layout
289  MPI_Alltoall(nod_layout.scnt.data(), sizeof(T), MPI_BYTE, nod_layout.rcnt.data(), sizeof(T), MPI_BYTE, comm);
290  dsp_from_cnt(nod_layout.rcnt, nod_layout.rdsp);
291 
292  vector<T> nod_lidx(nod_sbuff);
293  global_to_local(sendmesh.get_numbering(NBR_REF), nod_lidx, false, true);
294 
295  // fill coordinates send buffer
296  vector<S> xyz_sbuff(nod_sbuff.size() * 3);
297  for(size_t i=0; i<nod_lidx.size(); i++)
298  {
299  T lidx = nod_lidx[i];
300  xyz_sbuff[i*3+0] = sendmesh.xyz[lidx*3+0];
301  xyz_sbuff[i*3+1] = sendmesh.xyz[lidx*3+1];
302  xyz_sbuff[i*3+2] = sendmesh.xyz[lidx*3+2];
303  }
304 
305  size_t rsize = sum(nod_layout.rcnt);
306  vector<T> nod_rbuff(rsize);
307  vector<S> xyz_rbuff(rsize*3);
308 
309  MPI_Exchange(nod_layout, nod_sbuff, nod_rbuff, comm);
310  nod_layout.scale(3);
311  MPI_Exchange(nod_layout, xyz_sbuff, xyz_rbuff, comm);
312 
313  // map received indices to local indexing
314  global_to_local(mesh.get_numbering(NBR_REF), nod_rbuff, false, true);
315 
316  // find unique coordinates
317  vector<T> acc_cnt(nod_rbuff.size(), 1), acc_dsp,
318  acc_col(nod_rbuff.size());
319 
320  interval(acc_col, 0, nod_rbuff.size());
321  binary_sort_copy(nod_rbuff, acc_col);
322  unique_accumulate(nod_rbuff, acc_cnt);
323  acc_dsp.resize(acc_cnt.size()+1);
324  dsp_from_cnt(acc_cnt, acc_dsp);
325 
326  // generate new coordinates vector
327  mesh.xyz.resize(acc_cnt.size()*3);
328  for(size_t i=0; i<acc_cnt.size(); i++)
329  {
330  T pidx = acc_col[acc_dsp[i]];
331  mesh.xyz[i*3+0] = xyz_rbuff[pidx*3+0];
332  mesh.xyz[i*3+1] = xyz_rbuff[pidx*3+1];
333  mesh.xyz[i*3+2] = xyz_rbuff[pidx*3+2];
334  }
335 }
336 
337 
347 template<class T, class S>
348 inline void write_mesh_parallel(const meshdata<T, S> & mesh, bool binary, std::string basename)
349 {
350  const MPI_Comm comm = mesh.comm;
351 
352  int size, rank;
353  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
354 
355  // we duplicate the mesh so that we can safely redistribute it
356  meshdata<T, S> wmesh = mesh;
357  vector<T> & ref_eidx = wmesh.get_numbering(NBR_ELEM_REF);
358 
359  T elem_gmin = global_min(ref_eidx, comm);
360  T elem_gmax = global_max(ref_eidx, comm);
361 
362  // block size
363  T elem_bsize = (elem_gmax - elem_gmin) / size + 1;
364 
365  // redistribute elements linearly ascending after their reference element index --------------------
366  vector<T> dest(wmesh.l_numelem);
367  for(size_t i=0; i<dest.size(); i++)
368  dest[i] = (ref_eidx[i] - elem_gmin) / elem_bsize;
369 
370  // redistribute elements of write mesh.
371  // WARNING: vertex coordinates are not redistributed. therefore the
372  // vertices will be redistributed based on the original mesh.
373  redistribute_elements(wmesh, dest);
374 
375  {
376  // temporary datastructs
377  vector<T> perm, eidx;
378  meshdata<T, S> tmesh = wmesh;
379 
380  // generate permutation
381  eidx = wmesh.get_numbering(NBR_ELEM_REF);
382  interval(perm, 0, eidx.size());
383  binary_sort_copy(eidx, perm);
384 
385  // permute local mesh to locally ascending element indices
386  permute_mesh(tmesh, wmesh, perm);
387  }
388 
389  vector<T> nbr_orig; // the global vertex numbering of the original mesh
390  if(mesh.nbr.count(NBR_SUBMESH)) {
391  // mesh is a submesh. generate submesh numbering for the redistributed
392  // mesh and map connectivity to it
393 
394  // copy original numbering for later
395  nbr_orig = mesh.get_numbering(NBR_SUBMESH);
396 
397  // generate new numbering on write mesh
398  submesh_numbering<T, S> submesh_nbr;
399  submesh_nbr(wmesh);
400  const vector<T> & nbr = wmesh.get_numbering(NBR_SUBMESH);
401  for(size_t i=0; i<wmesh.con.size(); i++) wmesh.con[i] = nbr[wmesh.con[i]];
402  }
403  else {
404  // mesh is a reference mesh. just map connectivity.
405 
406  // copy original numbering for later
407  nbr_orig = mesh.get_numbering(NBR_REF);
408 
409  const vector<T> & nbr = wmesh.get_numbering(NBR_REF);
410  for(size_t i=0; i<wmesh.con.size(); i++) wmesh.con[i] = nbr[wmesh.con[i]];
411  }
412 
413  // write .elem and .lon files
414  write_elements(wmesh, binary, basename);
415 
416  // now we need to redistribute and write the vertex coordinates
417  const vector<T> & alg_nod = mesh.pl.algebraic_nodes();
418  vector<T> xyz_cnt(alg_nod.size(), 3), xyz_idx(alg_nod.size()), srt_cnt, srt_idx;
419  vector<S> xyz(alg_nod.size()*3), srt_xyz;
420 
421  for(size_t i=0; i<alg_nod.size(); i++) {
422  T loc = alg_nod[i];
423 
424  xyz[i*3+0] = mesh.xyz[loc*3+0];
425  xyz[i*3+1] = mesh.xyz[loc*3+1];
426  xyz[i*3+2] = mesh.xyz[loc*3+2];
427 
428  xyz_idx[i] = nbr_orig[loc];
429  }
430 
431  sort_parallel(comm, xyz_idx, xyz_cnt, xyz, srt_idx, srt_cnt, srt_xyz);
432 
433  // write header with root
434  FILE* pts_fd;
435  std::string pts_file = binary ? basename + ".bpts" : basename + ".pts";
436 
437  if(rank == 0) {
438  pts_fd = fopen(pts_file.c_str(), "w");
439  if(!pts_fd) {
440  fprintf(stderr, "Error: could not open file: %s. Aborting!", pts_file.c_str());
441  exit(1);
442  }
443  write_pts_header(pts_fd, binary, mesh.g_numpts);
444  fclose(pts_fd);
445  }
446 
447  // write vertices sequentially rank after rank
448  for(int pid=0; pid < size; pid++) {
449  if(pid == rank) {
450  pts_fd = fopen(pts_file.c_str(), "a");
451  if(!pts_fd) {
452  fprintf(stderr, "Error: could not open file: %s. Aborting!", pts_file.c_str());
453  exit(1);
454  }
455  write_pts_block(pts_fd, binary, srt_xyz);
456  fclose(pts_fd);
457  }
458  MPI_Barrier(comm);
459  }
460 }
461 
462 
469 template<class T, class S>
470 inline void gather_mesh(const meshdata<T, S> & locmesh, meshdata<T, S> & globmesh)
471 {
472  MPI_Comm comm = locmesh.comm;
473 
474  int size, rank;
475  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
476 
477  // gather mesh data using the redistribute_mesh function
478  vector<T> part(locmesh.l_numelem, 0); // all elements to rank 0
479  globmesh = locmesh;
480  redistribute_mesh(globmesh, part);
481 }
482 
486 template<class T, class S>
487 inline void print_DD_info(const meshdata<T, S> & mesh)
488 {
489  MPI_Comm comm = mesh.comm;
490 
491  int size, rank;
492  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
493 
494  vector<size_t> npoint(size), nelem(size), ninterf(size), nidx(size);
495 
496  size_t intf_size = mesh.pl.interface().size(),
497  idx_size = mesh.pl.num_algebraic_idx();
498 
499  MPI_Gather(&mesh.l_numpts, sizeof(size_t), MPI_BYTE, npoint.data(), sizeof(size_t), MPI_BYTE, 0, comm);
500  MPI_Gather(&mesh.l_numelem, sizeof(size_t), MPI_BYTE, nelem.data(), sizeof(size_t), MPI_BYTE, 0, comm);
501  MPI_Gather(&intf_size, sizeof(size_t), MPI_BYTE, ninterf.data(), sizeof(size_t), MPI_BYTE, 0, comm);
502  MPI_Gather(&idx_size, sizeof(size_t), MPI_BYTE, nidx.data(), sizeof(size_t), MPI_BYTE, 0, comm);
503 
504  vector<int> mult(mesh.l_numpts, 1);
505  mesh.pl.reduce(mult, "sum");
506 
507  int hist_size = 16;
508  vector<int> mult_hist(hist_size, 0), global_mult_hist(hist_size, 0);
509  for(auto m : mult) mult_hist[m]++;
510 
511  MPI_Reduce(mult_hist.data(), global_mult_hist.data(), hist_size, MPI_INT, MPI_SUM, 0, comm);
512 
513  if(!rank) {
514  printf("===== Parallel mesh statistics =====\n");
515 
516  printf("#pid\t#nodes\t#elems\t#interf\t#alg\n");
517  for(int pid = 0; pid < size; pid++)
518  printf("%d\t%ld\t%ld\t%ld\t%ld\n", pid, (long int)npoint[pid], (long int)nelem[pid],
519  (long int)ninterf[pid], (long int)nidx[pid]);
520  printf("\n");
521 
522  std::cout << "Multiplicities :" << std::endl;
523  for(int i = 2; i < hist_size && global_mult_hist[i] > 0; i++)
524  std::cout << i << ": " << global_mult_hist[i] << std::endl;
525  }
526 }
527 
528 
537 template<class T, class S>
538 inline void extract_mesh(const vector<bool> & keep,
539  const meshdata<T, S> & mesh,
540  meshdata<T, S> & submesh)
541 {
542  assert(keep.size() == mesh.l_numelem);
543 
544  size_t num_extr_elem = 0, num_extr_entr = 0;
545  int nFib = 0;
546  if( mesh.fib.size() == (mesh.l_numelem*3) ) {
547  nFib = 1;
548  if( mesh.she.size() == (mesh.l_numelem*3) )
549  nFib = 2;
550  }
551 
552  submesh.l_numpts = submesh.g_numpts = 0;
553  submesh.comm = mesh.comm;
554 
555  const vector<T> & mesh_ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
556  vector<T> & sub_ref_eidx = submesh.register_numbering(NBR_ELEM_REF);
557 
558  for(size_t i=0; i<mesh.l_numelem; i++) {
559  if(keep[i]) {
560  num_extr_elem++;
561  num_extr_entr += mesh.dsp[i+1] - mesh.dsp[i];
562  }
563  }
564 
565  submesh.l_numelem = num_extr_elem;
566 
567  vector<T> cnt(num_extr_elem);
568  submesh.dsp.resize(num_extr_elem+1);
569  submesh.tag.resize(num_extr_elem);
570  sub_ref_eidx.resize(num_extr_elem);
571  submesh.type.resize(num_extr_elem);
572  if(nFib>=1)
573  submesh.fib.resize(num_extr_elem*3);
574  if(nFib==2)
575  submesh.she.resize(num_extr_elem*3);
576 
577  submesh.con.resize(num_extr_entr);
578  // reference numbering
579  const vector<T> & rnod = mesh.get_numbering(NBR_REF);
580 
581  for(size_t ridx_ele=0, ridx_con=0, widx_ele=0, widx_con=0; ridx_ele<mesh.l_numelem; ridx_ele++)
582  {
583  ridx_con = mesh.dsp[ridx_ele];
584 
585  if(keep[ridx_ele]) {
586  // element data
587  cnt[widx_ele] = mesh.dsp[ridx_ele + 1] - mesh.dsp[ridx_ele];
588  submesh.tag [widx_ele] = mesh.tag [ridx_ele];
589  sub_ref_eidx[widx_ele] = mesh_ref_eidx[ridx_ele];
590  submesh.type[widx_ele] = mesh.type [ridx_ele];
591 
592  // fibers
593  if(nFib>=1) {
594  submesh.fib[widx_ele*3+0] = mesh.fib[ridx_ele*3+0];
595  submesh.fib[widx_ele*3+1] = mesh.fib[ridx_ele*3+1];
596  submesh.fib[widx_ele*3+2] = mesh.fib[ridx_ele*3+2];
597  }
598  if(nFib==2) {
599  submesh.she[widx_ele*3+0] = mesh.she[ridx_ele*3+0];
600  submesh.she[widx_ele*3+1] = mesh.she[ridx_ele*3+1];
601  submesh.she[widx_ele*3+2] = mesh.she[ridx_ele*3+2];
602  }
603 
604  // connectivity
605  for(int j=0; j<cnt[widx_ele]; j++)
606  submesh.con[widx_con++] = rnod[mesh.con[ridx_con++]];
607 
608  widx_ele++;
609  }
610  }
611  dsp_from_cnt(cnt, submesh.dsp);
612 
613  unsigned long int gnumele = submesh.l_numelem;
614  MPI_Allreduce(MPI_IN_PLACE, &gnumele, 1, MPI_UNSIGNED_LONG, MPI_SUM, submesh.comm);
615  submesh.g_numelem = gnumele;
616 
617  submesh.localize(NBR_REF);
618 }
619 
620 
626 template<class T, class S>
627 inline void rebalance_mesh(meshdata<T, S> & mesh)
628 {
629  MPI_Comm comm = mesh.comm;
630 
631  // we check if some ranks have 0 local elems
632  short errflag = 0;
633  if(mesh.l_numelem == 0) errflag = 1;
634  MPI_Allreduce(MPI_IN_PLACE, &errflag, 1, MPI_SHORT, MPI_SUM, comm);
635 
636  // we redistribute the elements if necessary
637  if(errflag) {
638  int size, rank;
639  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
640 
641  vector<T> part(mesh.l_numelem);
642  vector<size_t> elem_counts(size), layout;
643  MPI_Allgather(&mesh.l_numelem, sizeof(size_t), MPI_BYTE, elem_counts.data(),
644  sizeof(size_t), MPI_BYTE, comm);
645 
646  dsp_from_cnt(elem_counts, layout);
647 
648  for(size_t i=0; i<mesh.l_numelem; i++)
649  part[i] = (layout[rank] + i) % size;
650 
651  redistribute_elements(mesh, part);
652  }
653 }
654 
655 
664 template<class T, class S>
665 inline void extract_myocardium(const meshdata<T, S> & mesh, meshdata<T, S> & submesh, bool require_fibers=true)
666 {
667  MPI_Comm comm = mesh.comm;
668  vector<bool> keep(mesh.l_numelem);
669 
670  // fill keep based on empty fiber definitions
671  for(size_t i=0; i<mesh.l_numelem; i++) {
672  S l1 = 1,l2=0,l3=0;
673  if(require_fibers){
674  l1 = mesh.fib[i*3+0];
675  l2 = mesh.fib[i*3+1];
676  l3 = mesh.fib[i*3+2];
677  }
678 
679  if( l1*l1 + l2*l2 + l3*l3 )
680  keep[i] = true;
681  else
682  keep[i] = false;
683  }
684 
685  extract_mesh(keep, mesh, submesh);
686  rebalance_mesh(submesh);
687 
688  // get unique tag set
689  submesh.extr_tag.insert(submesh.tag.begin(), submesh.tag.end());
690  {
691  // we have to collect the global list of tags of this submesh
692  vector<T> tags, glob_tags;
693  tags.assign(submesh.extr_tag.begin(), submesh.extr_tag.end()); // local list
694  make_global(tags, glob_tags, mesh.comm); // now globalized
695 
696  submesh.extr_tag.insert(glob_tags.begin(), glob_tags.end());
697  }
698 }
699 
700 
710 template<class T, class S>
711 inline void extract_tagbased(const meshdata<T, S> & mesh, meshdata<T, S> & submesh)
712 {
713  if(submesh.extr_tag.size())
714  {
715  vector<bool> keep(mesh.l_numelem, false);
716 
717  // fill keep based on empty fiber definitions
718  for(size_t i=0; i<mesh.l_numelem; i++) {
719  if( submesh.extr_tag.count(mesh.tag[i]) )
720  keep[i] = true;
721  }
722 
723  extract_mesh(keep, mesh, submesh);
724  }
725  else {
726  // if the tags are not provided, we assume that the whole mesh is copied into the
727  // submesh
728  std::string oldname = submesh.name;
729  // copy reference mesh
730  submesh = mesh;
731  // revert name to old name
732  submesh.name = oldname;
733  // compute tags
734  submesh.extr_tag.insert(submesh.tag.begin(), submesh.tag.end());
735  {
736  // we have to collect the global list of tags of this submesh
737  vector<T> tags, glob_tags;
738  tags.assign(submesh.extr_tag.begin(), submesh.extr_tag.end()); // local list
739  make_global(tags, glob_tags, mesh.comm); // now globalized
740 
741  submesh.extr_tag.insert(glob_tags.begin(), glob_tags.end());
742  }
743  }
744 
745  rebalance_mesh(submesh);
746 }
747 
753 template<class T, class S>
754 inline void print_mesh_graph(meshdata<T,S> & mesh)
755 {
756  int rank, size;
757  MPI_Comm_size(mesh.comm, &size); MPI_Comm_rank(mesh.comm, &rank);
758 
759  vector<T> n2n_cnt, n2n_con;
760  nodal_connectivity_graph(mesh, n2n_cnt, n2n_con);
761  matrixgraph_plotter<vector<T> > plotter(30, 60);
762 
763  for(int pid=0; pid<size; pid++)
764  {
765  if(pid == rank) {
766  std::cout << "\n\n Rank " << rank << ": \n" << std::endl;
767  plotter.print(n2n_cnt, n2n_con, '*');
768  }
769  MPI_Barrier(mesh.comm);
770  }
771 }
772 
786 template<class T, class S> inline
787 void inter_domain_mapping(const meshdata<T,S> & mesh_a, const meshdata<T,S> & mesh_b,
788  const SF_nbr snbr, index_mapping<T> & a_to_b)
789 {
790  assert(mesh_a.comm == mesh_b.comm);
791  MPI_Comm comm = mesh_a.comm;
792 
793  int size, rank;
794  MPI_Comm_size(comm, &size);
795  MPI_Comm_rank(comm, &rank);
796 
797  // reference and submesh numberings of both meshes
798  const vector<T> & mesh_a_rnbr = mesh_a.get_numbering(NBR_REF);
799  const vector<T> & mesh_a_snbr = mesh_a.get_numbering(snbr);
800  const vector<T> & mesh_b_rnbr = mesh_b.get_numbering(NBR_REF);
801  const vector<T> & mesh_b_snbr = mesh_b.get_numbering(snbr);
802  // send- and receive-buffer for the reference and submesh numberings
803  vector<T> mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff;
804  vector<T> mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff;
805 
806  // communicate reference- and submesh numbering of mesh a ----------------------------
807  vector<T> dest(mesh_a_rnbr.size()), perm_a, perm_b;
808  for(size_t i=0; i<dest.size(); i++) dest[i] = mesh_a_rnbr[i] % size;
809 
810  interval(perm_a, 0, mesh_a_rnbr.size());
811  binary_sort_copy(dest, perm_a);
812 
813  mesh_a_rnbr_sbuff.resize(dest.size()); mesh_a_snbr_sbuff.resize(dest.size());
814  for(size_t i=0; i<dest.size(); i++) {
815  mesh_a_rnbr_sbuff[i] = mesh_a_rnbr[perm_a[i]];
816  //mesh_a_snbr_sbuff[i] = mesh_a_snbr[perm_a[i]];
817  }
818 
819  commgraph<size_t> grph_a;
820  grph_a.configure(dest, comm);
821  size_t rcv_size = sum(grph_a.rcnt);
822 
823  mesh_a_rnbr_rbuff.resize(rcv_size);
824  mesh_a_snbr_rbuff.resize(rcv_size);
825 
826  MPI_Exchange(grph_a, mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, comm);
827  //MPI_Exchange(grph_a, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff, comm);
828 
829  // communicate reference- and submesh numbering of mesh b ----------------------------
830  dest.resize(mesh_b_rnbr.size());
831  for(size_t i=0; i<dest.size(); i++) dest[i] = mesh_b_rnbr[i] % size;
832 
833  interval(perm_b, 0, mesh_b_rnbr.size());
834  binary_sort_copy(dest, perm_b);
835 
836  mesh_b_rnbr_sbuff.resize(dest.size()); mesh_b_snbr_sbuff.resize(dest.size());
837  for(size_t i=0; i<dest.size(); i++) {
838  mesh_b_rnbr_sbuff[i] = mesh_b_rnbr[perm_b[i]];
839  mesh_b_snbr_sbuff[i] = mesh_b_snbr[perm_b[i]];
840  }
841 
842  commgraph<size_t> grph_b;
843  grph_b.configure(dest, comm);
844  rcv_size = sum(grph_b.rcnt);
845 
846  mesh_b_rnbr_rbuff.resize(rcv_size);
847  mesh_b_snbr_rbuff.resize(rcv_size);
848 
849  MPI_Exchange(grph_b, mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, comm);
850  MPI_Exchange(grph_b, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff, comm);
851 
852  // generate mapping ------------------------------------------------------------------
853  // We do this by first setting up a map between reference index and submesh index for
854  // mesh b. This allows us to check fast if a reference index is in b and do the mapping.
855  hashmap::unordered_map<T,T> ref_to_sub_b;
856  for(size_t i=0; i<mesh_b_rnbr_rbuff.size(); i++) {
857  T ref_idx = mesh_b_rnbr_rbuff[i];
858  T sub_idx = mesh_b_snbr_rbuff[i];
859 
860  if(ref_to_sub_b.count(ref_idx) && ref_to_sub_b[ref_idx] != sub_idx)
861  fprintf(stderr, "inter_domain_mapping error: Missmatching multiple mappings: %d : %d \n",
862  ref_to_sub_b[ref_idx], sub_idx);
863  ref_to_sub_b[ref_idx] = sub_idx;
864  }
865 
866  // Now we either map the reference indices of mesh a to the submesh index of b, or, if
867  // the reference index is not in b, set it to -1
868  for(size_t i=0; i<mesh_a_rnbr_rbuff.size(); i++) {
869  auto it = ref_to_sub_b.find(mesh_a_rnbr_rbuff[i]);
870  if(it != ref_to_sub_b.end())
871  mesh_a_snbr_rbuff[i] = it->second;
872  else
873  mesh_a_snbr_rbuff[i] = -1;
874  }
875 
876  // now we simply communicate back by transposing grph_a
877  grph_a.transpose();
878  MPI_Exchange(grph_a, mesh_a_snbr_rbuff, mesh_a_snbr_sbuff, comm);
879 
880  size_t num_mapped = 0;
881  for(size_t i=0; i<mesh_a_snbr_sbuff.size(); i++)
882  if(mesh_a_snbr_sbuff[i] > -1) num_mapped++;
883 
884  // the submesh indices that could be mapped
885  vector<T> snbr_a(num_mapped), snbr_b(num_mapped);
886  binary_sort_copy(perm_a, mesh_a_snbr_sbuff);
887 
888  for(size_t i=0, idx=0; i<mesh_a_snbr_sbuff.size(); i++) {
889  if(mesh_a_snbr_sbuff[i] > -1) {
890  snbr_a[idx] = mesh_a_snbr[i];
891  snbr_b[idx] = mesh_a_snbr_sbuff[i];
892  idx++;
893  }
894  }
895 
896  // now we can set up the a-to-b mapping
897  a_to_b.assign(snbr_a, snbr_b);
898 }
899 
900 template<class T> inline
901 void insert_surf_tri(T n1, T n2, T n3, size_t eidx,
902  triple<T> & surf,
903  tri_sele<T> & sele,
905 {
906  sele.v1 = n1, sele.v2 = n2, sele.v3 = n3; sele.eidx = eidx;
907  sort_triple(n1, n2, n3, surf.v1, surf.v2, surf.v3);
908 
909  auto it = surfmap.find(surf);
910  if(it != surfmap.end()) surfmap.erase(it);
911  else surfmap[surf] = sele;
912 }
913 
914 template<class T> inline
915 void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx,
916  vector<T> & buff,
917  quadruple<T> & surf,
918  quad_sele<T> & sele,
920 {
921  buff[0] = n1, buff[1] = n2, buff[2] = n3, buff[3] = n4;
922  binary_sort(buff);
923 
924  sele.v1 = n1, sele.v2 = n2, sele.v3 = n3, sele.v4 = n4, sele.eidx = eidx;
925  surf.v1 = buff[0], surf.v2 = buff[1], surf.v3 = buff[2], surf.v4 = buff[3];
926 
927  auto it = surfmap.find(surf);
928  if(it != surfmap.end()) surfmap.erase(it);
929  else surfmap[surf] = sele;
930 }
931 
932 template<class T> inline
933 void insert_surf_tet(const T* nod,
934  const size_t eidx,
936 {
937  // surfaces are (2,3,1) , (1,4,2) , (2,4,3) , (1,3,4)
938  struct triple<T> surf;
939  struct tri_sele<T> sele;
940 
941  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3];
942 
943  insert_surf_tri(n2, n3, n1, eidx, surf, sele, surfmap);
944  insert_surf_tri(n1, n4, n2, eidx, surf, sele, surfmap);
945  insert_surf_tri(n2, n4, n3, eidx, surf, sele, surfmap);
946  insert_surf_tri(n1, n3, n4, eidx, surf, sele, surfmap);
947 }
948 
949 template<class T> inline
950 void insert_surf_pyr(const T* nod, const size_t eidx, vector<T> & buff,
953 {
954  // surfaces are (1,5,2) , (2,5,3) , (3,5,4) , (4,5,1) , (1,2,3,4)
955  triple<T> surf; quadruple<T> qsurf;
956  tri_sele<T> sele; quad_sele<T> qsele;
957 
958  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4];
959 
960  insert_surf_tri(n1, n5, n2, eidx, surf, sele, surfmap);
961  insert_surf_tri(n2, n5, n3, eidx, surf, sele, surfmap);
962  insert_surf_tri(n3, n5, n4, eidx, surf, sele, surfmap);
963  insert_surf_tri(n4, n5, n1, eidx, surf, sele, surfmap);
964  insert_surf_quad(n1, n2, n3, n4, eidx, buff, qsurf, qsele, qsurfmap);
965 }
966 
967 template<class T> inline
968 void insert_surf_pri(const T* nod, const size_t eidx, vector<T> & buff,
971 {
972  struct triple<T> surf;
973  struct quadruple<T> qsurf;
974  struct tri_sele<T> sele;
975  struct quad_sele<T> qsele;
976 
977  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4], n6 = nod[5];
978 
979  // surfaces are (1,2,3) , (4,5,6) , (1,2,6,4) , (2,3,5,6) , (3,1,4,5)
980  insert_surf_tri(n1, n2, n3, eidx, surf, sele, surfmap);
981  insert_surf_tri(n4, n5, n6, eidx, surf, sele, surfmap);
982  insert_surf_quad(n1, n2, n6, n4, eidx, buff, qsurf, qsele, qsurfmap);
983  insert_surf_quad(n2, n3, n5, n6, eidx, buff, qsurf, qsele, qsurfmap);
984  insert_surf_quad(n3, n1, n4, n5, eidx, buff, qsurf, qsele, qsurfmap);
985 }
986 
987 template<class T> inline
988 void insert_surf_hex(const T* nod, const size_t eidx, vector<T> & buff,
990 {
991  // surfaces are (1,2,3,4) , (3,2,8,7) , (4,3,7,6) , (1,4,6,5) , (2,1,5,8), (6,7,8,5)
992  struct quadruple<T> qsurf;
993  struct quad_sele<T> qsele;
994 
995  T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3],
996  n5 = nod[4], n6 = nod[5], n7 = nod[6], n8 = nod[7];
997 
998  insert_surf_quad(n1, n2, n3, n4, eidx, buff, qsurf, qsele, surfmap);
999  insert_surf_quad(n3, n2, n8, n7, eidx, buff, qsurf, qsele, surfmap);
1000  insert_surf_quad(n4, n3, n7, n6, eidx, buff, qsurf, qsele, surfmap);
1001  insert_surf_quad(n1, n4, n6, n5, eidx, buff, qsurf, qsele, surfmap);
1002  insert_surf_quad(n2, n1, n5, n8, eidx, buff, qsurf, qsele, surfmap);
1003  insert_surf_quad(n6, n7, n8, n5, eidx, buff, qsurf, qsele, surfmap);
1004 }
1005 
1006 template<class T, class S> inline
1008  const hashmap::unordered_map<triple<T>, tri_sele<T> > & search_tri,
1009  const hashmap::unordered_map<quadruple<T>, quad_sele<T> > & search_quad,
1012 {
1013  const T* con = mesh.con.data();
1014  const T* nbr = mesh.get_numbering(numbering).data();
1015 
1018 
1019  vector<T> nodvec(mesh.con.size()); T* nod = nodvec.data();
1020  for(size_t i=0; i<nodvec.size(); i++)
1021  nod[i] = nbr[con[i]];
1022 
1023  const vector<T> & ref_eidx = mesh.get_numbering(NBR_ELEM_REF);
1024  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1025 
1026  for(size_t eidx = 0; eidx < mesh.l_numelem; eidx++) {
1027  tri_surf.clear();
1028  quad_surf.clear();
1029 
1030  switch(mesh.type[eidx]) {
1031  case Tri: {
1032  triple<T> tri;
1033  tri_sele<T> trie;
1034  insert_surf_tri(nod[0], nod[1], nod[2], ref_eidx[eidx], tri, trie, tri_surf);
1035  break;
1036  }
1037 
1038  case Quad: {
1039  quadruple<T> qd;
1040  quad_sele<T> qde;
1041  insert_surf_quad(nod[0], nod[1], nod[2], nod[3],
1042  ref_eidx[eidx], qbuff, qd, qde, quad_surf);
1043  break;
1044  }
1045 
1046  case Tetra:
1047  insert_surf_tet(nod, ref_eidx[eidx], tri_surf);
1048  break;
1049 
1050  case Pyramid:
1051  insert_surf_pyr(nod, ref_eidx[eidx], qbuff, tri_surf, quad_surf);
1052  break;
1053 
1054  case Prism:
1055  insert_surf_pri(nod, ref_eidx[eidx], qbuff, tri_surf, quad_surf);
1056  break;
1057 
1058  case Hexa:
1059  insert_surf_hex(nod, ref_eidx[eidx], qbuff, quad_surf);
1060  break;
1061 
1062  default: break;
1063  }
1064 
1065  for(auto it = tri_surf.begin(); it != tri_surf.end(); ++it) {
1066  auto ft = search_tri.find(it->first);
1067  if(ft != search_tri.end() && !found_tri.count(ft->first)) {
1068  // now we also look for the same orientation
1069  T iv1 = mesh.pl.localize(it->second.v1);
1070  T iv2 = mesh.pl.localize(it->second.v2);
1071  T iv3 = mesh.pl.localize(it->second.v3);
1072  SF::Point ip1 = {mesh.xyz[iv1*3+0], mesh.xyz[iv1*3+1], mesh.xyz[iv1*3+2]};
1073  SF::Point ip2 = {mesh.xyz[iv2*3+0], mesh.xyz[iv2*3+1], mesh.xyz[iv2*3+2]};
1074  SF::Point ip3 = {mesh.xyz[iv3*3+0], mesh.xyz[iv3*3+1], mesh.xyz[iv3*3+2]};
1075  SF::Point e12 = normalize(ip2 - ip1);
1076  SF::Point e13 = normalize(ip3 - ip1);
1077  SF::Point in = normalize(cross(e12, e13));
1078 
1079  T fv1 = mesh.pl.localize(ft->second.v1);
1080  T fv2 = mesh.pl.localize(ft->second.v2);
1081  T fv3 = mesh.pl.localize(ft->second.v3);
1082  SF::Point fp1 = {mesh.xyz[fv1*3+0], mesh.xyz[fv1*3+1], mesh.xyz[fv1*3+2]};
1083  SF::Point fp2 = {mesh.xyz[fv2*3+0], mesh.xyz[fv2*3+1], mesh.xyz[fv2*3+2]};
1084  SF::Point fp3 = {mesh.xyz[fv3*3+0], mesh.xyz[fv3*3+1], mesh.xyz[fv3*3+2]};
1085  e12 = normalize(fp2 - fp1);
1086  e13 = normalize(fp3 - fp1);
1087  SF::Point fn = normalize(cross(e12, e13));
1088 
1089  if (inner_prod(in, fn) > 0.9) {
1090  tri_sele<T> found = ft->second; found.eidx = eidx;
1091  found_tri[ft->first] = found;
1092  }
1093  }
1094  }
1095 
1096  for(auto it = quad_surf.begin(); it != quad_surf.end(); ++it) {
1097  auto ft = search_quad.find(it->first);
1098  if(ft != search_quad.end() && !found_quad.count(ft->first)) {
1099  // now we also look for the same orientation
1100  T iv1 = mesh.pl.localize(it->second.v1);
1101  T iv2 = mesh.pl.localize(it->second.v2);
1102  T iv3 = mesh.pl.localize(it->second.v3);
1103  SF::Point ip1 = {mesh.xyz[iv1*3+0], mesh.xyz[iv1*3+1], mesh.xyz[iv1*3+2]};
1104  SF::Point ip2 = {mesh.xyz[iv2*3+0], mesh.xyz[iv2*3+1], mesh.xyz[iv2*3+2]};
1105  SF::Point ip3 = {mesh.xyz[iv3*3+0], mesh.xyz[iv3*3+1], mesh.xyz[iv3*3+2]};
1106  SF::Point e12 = normalize(ip2 - ip1);
1107  SF::Point e13 = normalize(ip3 - ip1);
1108  SF::Point in = normalize(cross(e12, e13));
1109 
1110  T fv1 = mesh.pl.localize(ft->second.v1);
1111  T fv2 = mesh.pl.localize(ft->second.v2);
1112  T fv3 = mesh.pl.localize(ft->second.v3);
1113  SF::Point fp1 = {mesh.xyz[fv1*3+0], mesh.xyz[fv1*3+1], mesh.xyz[fv1*3+2]};
1114  SF::Point fp2 = {mesh.xyz[fv2*3+0], mesh.xyz[fv2*3+1], mesh.xyz[fv2*3+2]};
1115  SF::Point fp3 = {mesh.xyz[fv3*3+0], mesh.xyz[fv3*3+1], mesh.xyz[fv3*3+2]};
1116  e12 = normalize(fp2 - fp1);
1117  e13 = normalize(fp3 - fp1);
1118  SF::Point fn = normalize(cross(e12, e13));
1119 
1120  if (inner_prod(in, fn) > 0.9) {
1121  quad_sele<T> found = ft->second; found.eidx = eidx;
1122  found_quad[ft->first] = found;
1123  }
1124  }
1125  }
1126 
1127  nod += mesh.dsp[eidx+1] - mesh.dsp[eidx];
1128  }
1129 }
1130 
1131 template<class T, class S> inline
1133  const hashmap::unordered_set<T> & tags,
1136 {
1137  const T* con = mesh.con.data();
1138  const T* nbr = mesh.get_numbering(numbering).data();
1139 
1140  bool have_tags = tags.size() > 0;
1141 
1142  vector<T> nodvec(mesh.con.size()); T* nod = nodvec.data();
1143  for(size_t i=0; i<nodvec.size(); i++)
1144  nod[i] = nbr[con[i]];
1145 
1146  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1147 
1148  for(size_t eidx = 0; eidx < mesh.l_numelem; eidx++) {
1149  if(have_tags == false || tags.count(mesh.tag[eidx])) {
1150  switch(mesh.type[eidx]) {
1151  case Tri: {
1152  triple<T> tri;
1153  tri_sele<T> trie;
1154  insert_surf_tri(nod[0], nod[1], nod[2], eidx, tri, trie, tri_surf);
1155  break;
1156  }
1157 
1158  case Quad: {
1159  quadruple<T> qd;
1160  quad_sele<T> qde;
1161  insert_surf_quad(nod[0], nod[1], nod[2], nod[3],
1162  eidx, qbuff, qd, qde, quad_surf);
1163  break;
1164  }
1165 
1166  case Tetra:
1167  insert_surf_tet(nod, eidx, tri_surf);
1168  break;
1169 
1170  case Pyramid:
1171  insert_surf_pyr(nod, eidx, qbuff, tri_surf, quad_surf);
1172  break;
1173 
1174  case Prism:
1175  insert_surf_pri(nod, eidx, qbuff, tri_surf, quad_surf);
1176  break;
1177 
1178  case Hexa:
1179  insert_surf_hex(nod, eidx, qbuff, quad_surf);
1180  break;
1181 
1182  default: break;
1183  }
1184  }
1185 
1186  nod += mesh.dsp[eidx+1] - mesh.dsp[eidx];
1187  }
1188 }
1189 
1190 template<class T, class S> inline
1194 {
1196  compute_surface(mesh, numbering, tags, tri_surf, quad_surf);
1197 }
1198 
1199 template<class V> inline
1200 void get_hashmap_duplicates(const vector<V> & data, const MPI_Comm comm,
1201  vector<bool> & is_dup)
1202 {
1203  int size, rank;
1204  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1205 
1206  size_t dsize = data.size();
1207  is_dup.assign(dsize, false);
1208 
1209  vector<int> perm, dest(dsize);
1210 
1211  // set up destinations
1212  for(size_t i=0; i<dsize; i++)
1213  dest[i] = hashmap::hash_ops<V>::hash(data[i]) % size;
1214 
1215  commgraph<size_t> grph;
1216  grph.configure(dest, comm);
1217  size_t nrecv = sum(grph.rcnt);
1218 
1219  interval(perm, 0, dsize);
1220  binary_sort_copy(dest, perm);
1221 
1222  // fill send buffer and communicate
1223  vector<V> sbuff(dsize), rbuff(nrecv);
1224  for(size_t i=0; i<dsize; i++)
1225  sbuff[i] = data[perm[i]];
1226  MPI_Exchange(grph, sbuff, rbuff, comm);
1227 
1228  // now check duplicates. values occurring multiple times should go into dup, those occurring
1229  // only once go into not_dup
1230  hashmap::unordered_set<V> not_dup, dup;
1231  vector<bool> cdup(rbuff.size());
1232 
1233  for(const V & val : rbuff) {
1234  if(not_dup.count(val) == 0 && dup.count(val) == 0)
1235  not_dup.insert(val);
1236  else {
1237  not_dup.erase(val);
1238  dup.insert(val);
1239  }
1240  }
1241 
1242  for(size_t i=0; i<nrecv; i++) {
1243  V val = rbuff[i];
1244  bool d = dup.count(val);
1245 
1246  // just a debug check
1247  assert(d == (not_dup.count(val) == 0));
1248 
1249  cdup[i] = d;
1250  }
1251 
1252  // now we communicate cdup back
1253  grph.transpose();
1254  MPI_Exchange(grph, cdup, is_dup, comm);
1255 
1256  // permute is_dup back to input permutation
1257  binary_sort_copy(perm, is_dup);
1258 }
1259 
1261 template<class K, class V> inline
1262 void remove_duplicates(hashmap::unordered_map<K,V> & map, const MPI_Comm comm)
1263 {
1264  vector<K> check_vec (map.size());
1265  vector<bool> is_dup (map.size());
1266 
1267  size_t idx=0;
1268  for(const auto & v : map) check_vec[idx++] = v.first;
1269 
1270  get_hashmap_duplicates(check_vec, comm, is_dup);
1271 
1272  for(size_t i=0; i<is_dup.size(); i++)
1273  if(is_dup[i])
1274  map.erase(check_vec[i]);
1275 }
1277 template<class K> inline
1278 void remove_duplicates(hashmap::unordered_set<K> & set, const MPI_Comm comm)
1279 {
1280  vector<K> check_vec (set.size());
1281  vector<bool> is_dup (set.size());
1282 
1283  check_vec.assign(set.begin(), set.end());
1284  get_hashmap_duplicates(check_vec, comm, is_dup);
1285 
1286  for(size_t i=0; i<is_dup.size(); i++)
1287  if(is_dup[i])
1288  set.erase(check_vec[i]);
1289 }
1290 
1291 template<class T> inline
1294  MPI_Comm comm)
1295 {
1296  long int g_num_tri = tri_surf.size();
1297  long int g_num_quad = quad_surf.size();
1298  MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1299  MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1300 
1301  if(g_num_tri)
1302  remove_duplicates(tri_surf, comm);
1303  if(g_num_quad)
1304  remove_duplicates(quad_surf, comm);
1305 }
1306 
1307 
1308 template<class T, class S> inline
1311  meshdata<T,S> & surfmesh,
1312  vector<T> & elem_orig)
1313 {
1314  MPI_Comm comm = surfmesh.comm;
1315 
1316  long int g_num_tri = tri_surf.size();
1317  long int g_num_quad = quad_surf.size();
1318  MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1319  MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1320 
1321  surfmesh.g_numelem = g_num_tri + g_num_quad;
1322  surfmesh.l_numelem = tri_surf.size() + quad_surf.size();
1323 
1324  vector<T> cnt(surfmesh.l_numelem);
1325  surfmesh.type.resize(surfmesh.l_numelem);
1326  surfmesh.con.resize(tri_surf.size() * 3 + quad_surf.size() * 4);
1327  elem_orig.resize(surfmesh.l_numelem);
1328 
1329  size_t idx = 0, cidx = 0;
1330  for(const auto & v : tri_surf) {
1331  cnt[idx] = 3;
1332  surfmesh.type[idx] = Tri;
1333  surfmesh.con[cidx + 0] = v.second.v1;
1334  surfmesh.con[cidx + 1] = v.second.v2;
1335  surfmesh.con[cidx + 2] = v.second.v3;
1336 
1337  elem_orig[idx] = v.second.eidx;
1338 
1339  idx += 1;
1340  cidx += 3;
1341  }
1342 
1343  for(const auto & v : quad_surf) {
1344  cnt[idx] = 4;
1345  surfmesh.type[idx] = Quad;
1346  surfmesh.con[cidx + 0] = v.second.v1;
1347  surfmesh.con[cidx + 1] = v.second.v2;
1348  surfmesh.con[cidx + 2] = v.second.v3;
1349  surfmesh.con[cidx + 3] = v.second.v4;
1350 
1351  elem_orig[idx] = v.second.eidx;
1352 
1353  idx += 1;
1354  cidx += 4;
1355  }
1356 
1357  dsp_from_cnt(cnt, surfmesh.dsp);
1358 }
1359 
1364 template<class T, class S> inline
1365 void convert_mesh_surface(const meshdata<T,S> & surfmesh,
1368 {
1369  vector<T> qbuff(4); // buffer if quad nodes need to be sorted
1370  quadruple<T> qd; quad_sele<T> qde;
1371  triple<T> tri; tri_sele<T> trie;
1372 
1373  for(size_t eidx=0; eidx<surfmesh.l_numelem; eidx++) {
1374  int esize = surfmesh.dsp[eidx+1] - surfmesh.dsp[eidx];
1375 
1376  const T* nod = surfmesh.con.data() + surfmesh.dsp[eidx];
1377  if(esize == 4) {
1378  insert_surf_quad(nod[0], nod[1], nod[2], nod[3], eidx, qbuff, qd, qde, quad_surf);
1379  }
1380  else {
1381  insert_surf_tri(nod[0], nod[1], nod[2], eidx, tri, trie, tri_surf);
1382  }
1383  }
1384 }
1385 
1396 template<class T, class S> inline
1398  const hashmap::unordered_set<T> & tags,
1399  meshdata<T,S> & surfmesh)
1400 {
1403  vector<T> elem_orig; // local element indices of "mesh"
1404 
1405  compute_surface(mesh, numbering, tags, tri_surf, quad_surf);
1406 
1407  surfmesh.comm = mesh.comm;
1408  remove_parallel_duplicates(tri_surf, quad_surf, surfmesh.comm);
1409  convert_surface_mesh(tri_surf, quad_surf, surfmesh, elem_orig);
1410 
1411  surfmesh.tag.resize(size_t(surfmesh.l_numelem));
1412  surfmesh.fib.resize(size_t(surfmesh.l_numelem * 3));
1413 
1414  if(mesh.she.size())
1415  surfmesh.she.resize(surfmesh.l_numelem * 3);
1416 
1417  for(size_t eidx = 0; eidx < elem_orig.size(); eidx++) {
1418  T orig = elem_orig[eidx];
1419 
1420  surfmesh.tag[eidx] = mesh.tag[orig];
1421  surfmesh.fib[eidx*3+0] = mesh.fib[orig*3+0];
1422  surfmesh.fib[eidx*3+1] = mesh.fib[orig*3+1];
1423  surfmesh.fib[eidx*3+2] = mesh.fib[orig*3+2];
1424 
1425  if(mesh.she.size()) {
1426  surfmesh.she[eidx*3+0] = mesh.she[orig*3+0];
1427  surfmesh.she[eidx*3+1] = mesh.she[orig*3+1];
1428  surfmesh.she[eidx*3+2] = mesh.she[orig*3+2];
1429  }
1430  }
1431 }
1432 
1442 template<class T, class S> inline
1444  meshdata<T,S> & surfmesh)
1445 {
1447  compute_surface_mesh(mesh, numbering, tags, surfmesh);
1448 }
1449 
1450 template<class T, class S> inline
1452  meshdata<T,S> & surface,
1453  std::string filename)
1454 {
1455  int rank; MPI_Comm_rank(mesh.comm, &rank);
1456  FILE *fd = NULL;
1457 
1458  size_t numele = 0;
1459  size_t numcon = 0;
1460  int nFib = 0;
1461  bool read_bin = false;
1462 
1463  if(rank == 0)
1464  {
1465  fd = fopen(filename.c_str(), "r");
1466  if(fd)
1467  SF::read_headers(fd, NULL, read_bin, numele, nFib);
1468  }
1469  MPI_Bcast(&numele, sizeof(size_t), MPI_BYTE, 0, mesh.comm);
1470  surface.g_numelem = numele;
1471 
1472  // We read the whole surface at once, this could be rewritten into multiple reads to
1473  // save memory
1474  if(rank == 0) {
1475  SF::read_elem_block(fd, read_bin, 0, numele, surface);
1476 
1477  numcon = surface.con.size();
1478 
1479  if(numele != surface.l_numelem) {
1480  std::cerr << "Error: Incomplete surface file! Aborting!" << std::endl;
1481  fclose(fd);
1482  exit(1);
1483  }
1484  }
1485 
1486  surface.l_numelem = numele;
1487 
1488  surface.tag.resize(surface.l_numelem);
1489  MPI_Bcast(surface.tag.data(), surface.tag.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1490 
1491  surface.dsp.resize(surface.l_numelem + 1);
1492  MPI_Bcast(surface.dsp.data(), surface.dsp.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1493 
1494  surface.type.resize(surface.l_numelem);
1495  MPI_Bcast(surface.type.data(), surface.type.size()*sizeof(elem_t), MPI_BYTE, 0, mesh.comm);
1496 
1497  vector<T> & ref_eidx = surface.register_numbering(NBR_ELEM_REF);
1498  ref_eidx.resize(surface.l_numelem);
1499 
1500  MPI_Bcast(ref_eidx.data(), surface.l_numelem*sizeof(T), MPI_BYTE, 0, mesh.comm);
1501 
1502  MPI_Bcast(&numcon, sizeof(size_t), MPI_BYTE, 0, mesh.comm);
1503  surface.con.resize(numcon);
1504  MPI_Bcast(surface.con.data(), surface.con.size()*sizeof(T), MPI_BYTE, 0, mesh.comm);
1505 
1506  // now we have to restrict the surface elements to those in the local partition
1507  {
1508  size_t initial_gnumelem = surface.g_numelem;
1509 
1510  hashmap::unordered_map<triple<T>, tri_sele<T>> search_tri, found_tri;
1511  hashmap::unordered_map<quadruple<T>, quad_sele<T>> search_quad, found_quad;
1512 
1513  convert_mesh_surface(surface, search_tri, search_quad);
1514  search_for_surface (mesh, SF::NBR_REF, search_tri, search_quad, found_tri, found_quad);
1515 
1516  vector<T> elem_orig;
1517  convert_surface_mesh(found_tri, found_quad, surface, elem_orig);
1518  mesh.pl.localize(surface.con);
1519 
1520  surface.tag.resize(size_t(surface.l_numelem));
1521  surface.fib.resize(size_t(surface.l_numelem * 3));
1522 
1523  if(mesh.she.size())
1524  surface.she.resize(surface.l_numelem * 3);
1525 
1526  for(size_t eidx = 0; eidx < elem_orig.size(); eidx++) {
1527  T orig = elem_orig[eidx];
1528 
1529  surface.tag[eidx] = mesh.tag[orig];
1530  surface.fib[eidx*3+0] = mesh.fib[orig*3+0];
1531  surface.fib[eidx*3+1] = mesh.fib[orig*3+1];
1532  surface.fib[eidx*3+2] = mesh.fib[orig*3+2];
1533 
1534  if(mesh.she.size()) {
1535  surface.she[eidx*3+0] = mesh.she[orig*3+0];
1536  surface.she[eidx*3+1] = mesh.she[orig*3+1];
1537  surface.she[eidx*3+2] = mesh.she[orig*3+2];
1538  }
1539  }
1540 
1541  // we want to remember to original gnumelems, so that the next consistency check is
1542  // actually useful.
1543  surface.g_numelem = initial_gnumelem;
1544  }
1545 
1546  {
1547  long int numele_check = surface.l_numelem;
1548  MPI_Allreduce(MPI_IN_PLACE, &numele_check, 1, MPI_LONG, MPI_SUM, mesh.comm);
1549  if(numele_check != surface.g_numelem) {
1550  if(rank == 0)
1551  fprintf(stderr, "ERROR: Bad partitioning of surface %s!"
1552  " Global elem sum should be %ld, but is %ld!\n\n",
1553  filename.c_str(), surface.g_numelem, numele_check);
1554  }
1555  }
1556 
1557  // copy indexing from parent mesh
1558  SF::vector<T> & nod_ref = surface.register_numbering(SF::NBR_REF);
1559  SF::vector<T> & nod_petsc = surface.register_numbering(SF::NBR_PETSC);
1560  nod_ref = mesh.get_numbering(SF::NBR_REF);
1561  nod_petsc = mesh.get_numbering(SF::NBR_PETSC);
1562 
1563  // copy coords from parent mesh
1564  surface.l_numpts = mesh.l_numpts;
1565  surface.g_numpts = mesh.g_numpts;
1566  surface.xyz = mesh.xyz;
1567 }
1568 
1569 template<class T>
1571 {
1572  size_t widx = 0;
1573 
1574  for(size_t i=0; i<v.size(); i++)
1575  if(set.count(v[i]))
1576  v[widx++] = v[i];
1577 
1578  v.resize(widx);
1579 }
1580 template<class T>
1581 inline void restrict_to_set(vector<T> & v, const vector<T> & vset)
1582 {
1584  set.insert(vset.begin(), vset.end());
1585 
1586  restrict_to_set(v, set);
1587 }
1588 
1589 }
1590 
1591 #endif
Basic containers.
void sort_triple(const T in1, const T in2, const T in3, T &out1, T &out2, T &out3)
sort the "in" triple into the "out" triple
Definition: SF_container.h:893
Functions related to mesh IO.
Functions related to network communication.
Classes related to mesh node renumbering.
Classes and algorithms related to the layout of distributed meshes.
Various sorting algorithms.
The vector class and related algorithms.
Class used to plot functions on the terminal.
The class holds the communication graph for a MPI_Exchange() call.
Definition: SF_container.h:638
vector< T > rcnt
Number of elements received from each rank.
Definition: SF_container.h:642
vector< T > scnt
Number of elements sent to each rank.
Definition: SF_container.h:640
void resize(size_t size)
Resize all vectors to size.
Definition: SF_container.h:646
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
Definition: SF_container.h:679
vector< T > sdsp
Displacements w.r.t. scnt.
Definition: SF_container.h:641
vector< T > rdsp
Displacements w.r.t. rcnt.
Definition: SF_container.h:643
void transpose()
transpose comm graph (receive becomes send, and vice versa)
Definition: SF_container.h:663
void scale(V fac)
scale comm graph layout data
Definition: SF_container.h:653
Index mapping class. This is a bijective mapping.
Definition: SF_container.h:220
void assign(const vector< T > &a, const vector< T > &b)
Set up the index mapping between a and b.
Definition: SF_container.h:247
The mesh storage class. It contains both element and vertex data.
Definition: SF_container.h:396
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:429
size_t g_numpts
global number of points
Definition: SF_container.h:400
vector< T > dsp
connectivity starting index of each element
Definition: SF_container.h:416
vector< S > she
sheet direction
Definition: SF_container.h:421
vector< S > fib
fiber direction
Definition: SF_container.h:420
size_t l_numelem
local number of elements
Definition: SF_container.h:399
vector< elem_t > type
element type
Definition: SF_container.h:418
std::map< SF_nbr, vector< T > > nbr
container for different numberings
Definition: SF_container.h:426
std::string name
the mesh name
Definition: SF_container.h:407
vector< T > & register_numbering(SF_nbr nbr_type)
Register a new numbering to the mesh and return the associated index vector.
Definition: SF_container.h:444
vector< T > con
Definition: SF_container.h:412
vector< S > xyz
node cooridnates
Definition: SF_container.h:427
size_t l_numpts
local number of points
Definition: SF_container.h:401
size_t g_numelem
global number of elements
Definition: SF_container.h:398
void localize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
Definition: SF_container.h:496
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:404
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:464
vector< T > tag
element tag
Definition: SF_container.h:417
hashmap::unordered_set< int > extr_tag
the element tags based on which the mesh has been extracted
Definition: SF_container.h:424
The abstract numbering class.
Definition: SF_numbering.h:48
Functor class applying a submesh renumbering.
Definition: SF_numbering.h:68
A vector storing arbitrary data.
Definition: SF_vector.h:43
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
const T * end() const
Pointer to the vector's end.
Definition: SF_vector.h:128
void assign(InputIterator s, InputIterator e)
Assign a memory range.
Definition: SF_vector.h:161
const T * begin() const
Pointer to the vector's start.
Definition: SF_vector.h:116
T * data()
Pointer to the vector's start.
Definition: SF_vector.h:91
iterator find(const K &key)
Search for key. Return iterator.
Definition: hashmap.hpp:593
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
Definition: hashmap.hpp:687
hm_int erase(const K &key)
Erase by key.
Definition: hashmap.hpp:565
Custom unordered_set implementation.
Definition: hashmap.hpp:706
size_t size() const
Definition: hashmap.hpp:1066
hm_int erase(const K &key)
Definition: hashmap.hpp:978
hm_int count(const K &key) const
Definition: hashmap.hpp:992
void insert(InputIterator first, InputIterator last)
Definition: hashmap.hpp:962
Ascii matrix graph plotter.
void print(const VEC &cnt, const VEC &col, char s)
Print a matrix graph to stdout.
Definition: dense_mat.hpp:34
void extract_tagbased(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh based on element tags.
void cnt_from_dsp(const vector< T > &dsp, vector< T > &cnt)
Compute counts from displacements.
Definition: SF_vector.h:319
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.
Definition: SF_vector.h:310
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void print_DD_info(const meshdata< T, S > &mesh)
Print some basic information on the domain decomposition of a mesh.
void get_hashmap_duplicates(const vector< V > &data, const MPI_Comm comm, vector< bool > &is_dup)
void read_headers(FILE *ele_fd, FILE *fib_fd, bool read_binary, size_t &numelem, int &nFib)
Read the header from the element and fiber files.
Definition: SF_mesh_io.h:90
void write_pts_block(FILE *&fd, bool write_binary, const vector< S > &xyz)
Write a chunk of points to a file.
Definition: SF_mesh_io.h:797
void read_surface_mesh(const meshdata< T, S > &mesh, meshdata< T, S > &surface, std::string filename)
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
Definition: SF_vector.h:350
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
void unique_accumulate(vector< T > &_P, vector< S > &_A)
Definition: SF_sort.h:409
void convert_surface_mesh(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, meshdata< T, S > &surfmesh, vector< T > &elem_orig)
void rebalance_mesh(meshdata< T, S > &mesh)
Rebalance the parallel distribution of a mesh, if a local size is 0.
void extract_mesh(const vector< bool > &keep, const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh from a given mesh.
void search_for_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_map< triple< T >, tri_sele< T > > &search_tri, const hashmap::unordered_map< quadruple< T >, quad_sele< T > > &search_quad, hashmap::unordered_map< triple< T >, tri_sele< T > > &found_tri, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &found_quad)
void permute_mesh(const meshdata< T, S > &inmesh, meshdata< T, S > &outmesh, const vector< T > &perm)
Permute the element data of a mesh based on a given permutation.
Definition: SF_mesh_utils.h:56
double inner_prod(const Point &a, const Point &b)
Definition: SF_container.h:90
void sort_parallel(MPI_Comm comm, const vector< T > &idx, vector< T > &out_idx)
Sort index values parallel ascending across the ranks.
void write_elements(const meshdata< T, S > &mesh, bool binary, std::string basename)
Read the element data (elements and fibers) of a CARP mesh.
Definition: SF_mesh_io.h:646
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
Definition: SF_sort.h:296
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
Definition: SF_vector.h:340
void insert_surf_pyr(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
Definition: SF_vector.h:332
void write_pts_header(FILE *&pts_fd, bool binary, size_t numpts)
Write the header of the points file.
Definition: SF_mesh_io.h:182
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
Definition: SF_network.h:126
void read_elem_block(FILE *&fd, bool read_binary, size_t bstart, size_t bsize, meshdata< T, S > &mesh)
Read a block of size bsize from an CARP element file.
Definition: SF_mesh_io.h:209
void insert_surf_hex(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
Point normalize(const Point &vect)
Definition: SF_container.h:116
void gather_mesh(const meshdata< T, S > &locmesh, meshdata< T, S > &globmesh)
Gather a mesh on rank 0.
void restrict_to_set(vector< T > &v, const hashmap::unordered_set< T > &set)
void compute_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, hashmap::unordered_map< triple< T >, tri_sele< T > > &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf)
void global_to_local(const vector< T > &glob, vector< T > &data, bool sortedData, bool doWarn)
Definition: SF_sort.h:540
void insert_surf_pri(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void vec_assign(S *lhs, const V *rhs, size_t size)
Assign the values in rhs to lhs. The data-type of rhs is cast to the type of lhs.
Definition: SF_vector.h:371
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
Definition: SF_network.h:47
void insert_surf_tri(T n1, T n2, T n3, size_t eidx, triple< T > &surf, tri_sele< T > &sele, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
void remove_parallel_duplicates(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, MPI_Comm comm)
void convert_mesh_surface(const meshdata< T, S > &surfmesh, hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T >> &quad_surf)
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
Definition: SF_network.h:156
void extract_myocardium(const meshdata< T, S > &mesh, meshdata< T, S > &submesh, bool require_fibers=true)
Extract the myocardium submesh.
Point cross(const Point &a, const Point &b)
cross product
Definition: SF_container.h:84
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
elem_t
element type enum
Definition: SF_container.h:53
@ Tri
Definition: SF_container.h:60
@ Prism
Definition: SF_container.h:58
@ Pyramid
Definition: SF_container.h:57
@ Tetra
Definition: SF_container.h:54
@ Quad
Definition: SF_container.h:59
@ Hexa
Definition: SF_container.h:55
void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx, vector< T > &buff, quadruple< T > &surf, quad_sele< T > &sele, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_mesh(meshdata< T, S > &mesh, vector< T > &part)
Redistribute both element and vertex data of a mesh.
void print_mesh_graph(meshdata< T, S > &mesh)
One-by-one each process prints the graph of a given mesh.
void inter_domain_mapping(const meshdata< T, S > &mesh_a, const meshdata< T, S > &mesh_b, const SF_nbr snbr, index_mapping< T > &a_to_b)
Submesh index mapping between different domains/meshes.
void nodal_connectivity_graph(const meshdata< T, S > &mesh, vector< T > &n2n_cnt, vector< T > &n2n_con)
Compute the node-to-node connectivity.
Definition: SF_container.h:583
SF_nbr
Enumeration encoding the different supported numberings.
Definition: SF_container.h:200
@ NBR_PETSC
PETSc numbering of nodes.
Definition: SF_container.h:203
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:204
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:201
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:202
void remove_duplicates(hashmap::unordered_map< K, V > &map, const MPI_Comm comm)
remove parallel duplicates from a hashmap::unordered_map
void insert_surf_tet(const T *nod, const size_t eidx, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
Point and vector struct.
Definition: SF_container.h:65
Base hashing class.
Definition: hashmap.hpp:88