27 #ifndef _SF_MESH_UTILS_H
28 #define _SF_MESH_UTILS_H
55 template<
class T,
class S>
71 size_t numelem = outmesh.
l_numelem, numcon = inmesh.
con.size();
74 outmesh.
dsp .resize(numelem+1);
75 outmesh.
tag .resize(numelem);
81 outmesh.
con.resize(numcon);
84 T* elem = outmesh.
con.data();
86 for(
size_t i=0; i<numelem; i++) {
87 outmesh.
tag[i] = inmesh.
tag[perm[i]];
88 ref_eidx_out[i] = ref_eidx_in[perm[i]];
89 outmesh.
type[i] = inmesh.
type[perm[i]];
91 outmesh.
fib[i*3+0] = inmesh.
fib[perm[i]*3+0];
92 outmesh.
fib[i*3+1] = inmesh.
fib[perm[i]*3+1];
93 outmesh.
fib[i*3+2] = inmesh.
fib[perm[i]*3+2];
96 outmesh.
she[i*3+0] = inmesh.
she[perm[i]*3+0];
97 outmesh.
she[i*3+1] = inmesh.
she[perm[i]*3+1];
98 outmesh.
she[i*3+2] = inmesh.
she[perm[i]*3+2];
101 int esize = inmesh.
dsp[perm[i]+1] - inmesh.
dsp[perm[i]];
104 T estart = inmesh.
dsp[perm[i]];
105 for(
int j=0; j<esize; j++) elem[j] = inmesh.
con[estart+j];
124 template<
class T,
class S>
127 MPI_Comm comm = mesh.
comm;
130 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
147 con_grph.
sdsp[0] = 0;
148 for(
int i=0; i<size; i++) con_grph.
sdsp[i+1] = sendbuff.
dsp[elem_grph.
sdsp[i+1]];
150 MPI_Alltoall(con_grph.
scnt.data(),
sizeof(
size_t), MPI_BYTE, con_grph.
rcnt.data(),
sizeof(
size_t), MPI_BYTE, comm);
154 MPI_Allreduce(MPI_IN_PLACE, &twoFib, 1, MPI_INT, MPI_MAX, comm);
160 size_t recv_size =
sum(elem_grph.
rcnt);
162 mesh.
dsp .resize(recv_size+1);
163 mesh.
tag .resize(recv_size);
164 ref_eidx.
resize(recv_size);
178 MPI_Exchange(elem_grph, ref_eidx_sbuff, ref_eidx, comm);
190 for(
size_t i=0; i<sendbuff.
con.size(); i++) sendbuff.
con[i] = rnod[sendbuff.
con[i]];
192 recv_size =
sum(con_grph.
rcnt);
193 mesh.
con.resize(recv_size);
205 template<
class T,
class S>
221 template<
class T,
class S>
232 MPI_Comm comm = mesh.
comm;
234 MPI_Comm_size(comm, &size);
235 MPI_Comm_rank(comm, &rank);
248 con_layout.
scnt.zero();
249 for(
int pid=0; pid<size; pid++)
250 for(T i=elem_layout.
sdsp[pid]; i<elem_layout.
sdsp[pid+1]; i++)
251 con_layout.
scnt[pid] += sendmesh.
dsp[i+1] - sendmesh.
dsp[i];
256 nod_layout.
sdsp[0] = 0;
258 for(
int pid=0; pid<size; pid++)
260 T con_start = con_layout.
sdsp[pid], con_end = con_layout.
sdsp[pid+1];
269 nod_layout.
sdsp[pid+1] = nod_layout.
sdsp[pid] + nod_layout.
scnt[pid];
275 MPI_Alltoall(nod_layout.
scnt.data(),
sizeof(T), MPI_BYTE, nod_layout.
rcnt.data(),
sizeof(T), MPI_BYTE, comm);
283 for(
size_t i=0; i<nod_lidx.
size(); i++)
285 T lidx = nod_lidx[i];
286 xyz_sbuff[i*3+0] = sendmesh.
xyz[lidx*3+0];
287 xyz_sbuff[i*3+1] = sendmesh.
xyz[lidx*3+1];
288 xyz_sbuff[i*3+2] = sendmesh.
xyz[lidx*3+2];
291 size_t rsize =
sum(nod_layout.
rcnt);
304 acc_col(nod_rbuff.
size());
309 acc_dsp.
resize(acc_cnt.size()+1);
314 for(
size_t i=0; i<acc_cnt.size(); i++)
316 T pidx = acc_col[acc_dsp[i]];
317 mesh.
xyz[i*3+0] = xyz_rbuff[pidx*3+0];
318 mesh.
xyz[i*3+1] = xyz_rbuff[pidx*3+1];
319 mesh.
xyz[i*3+2] = xyz_rbuff[pidx*3+2];
333 template<
class T,
class S>
336 const MPI_Comm comm = mesh.
comm;
339 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
349 T elem_bsize = (elem_gmax - elem_gmin) / size + 1;
353 for(
size_t i=0; i<dest.
size(); i++)
354 dest[i] = (ref_eidx[i] - elem_gmin) / elem_bsize;
387 for(
size_t i=0; i<wmesh.
con.size(); i++) wmesh.
con[i] = nbr[wmesh.
con[i]];
396 for(
size_t i=0; i<wmesh.
con.size(); i++) wmesh.
con[i] = nbr[wmesh.
con[i]];
403 const vector<T> & alg_nod = mesh.
pl.algebraic_nodes();
407 for(
size_t i=0; i<alg_nod.
size(); i++) {
410 xyz[i*3+0] = mesh.
xyz[loc*3+0];
411 xyz[i*3+1] = mesh.
xyz[loc*3+1];
412 xyz[i*3+2] = mesh.
xyz[loc*3+2];
414 xyz_idx[i] = nbr_orig[loc];
417 sort_parallel(comm, xyz_idx, xyz_cnt, xyz, srt_idx, srt_cnt, srt_xyz);
421 std::string pts_file = binary ? basename +
".bpts" : basename +
".pts";
424 pts_fd = fopen(pts_file.c_str(),
"w");
426 fprintf(stderr,
"Error: could not open file: %s. Aborting!", pts_file.c_str());
434 for(
int pid=0; pid < size; pid++) {
436 pts_fd = fopen(pts_file.c_str(),
"a");
438 fprintf(stderr,
"Error: could not open file: %s. Aborting!", pts_file.c_str());
455 template<
class T,
class S>
458 MPI_Comm comm = locmesh.
comm;
461 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
472 template<
class T,
class S>
475 MPI_Comm comm = mesh.
comm;
478 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
480 vector<size_t> npoint(size), nelem(size), ninterf(size), nidx(size);
482 size_t intf_size = mesh.
pl.interface().size(),
483 idx_size = mesh.
pl.num_algebraic_idx();
485 MPI_Gather(&mesh.
l_numpts,
sizeof(
size_t), MPI_BYTE, npoint.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
486 MPI_Gather(&mesh.
l_numelem,
sizeof(
size_t), MPI_BYTE, nelem.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
487 MPI_Gather(&intf_size,
sizeof(
size_t), MPI_BYTE, ninterf.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
488 MPI_Gather(&idx_size,
sizeof(
size_t), MPI_BYTE, nidx.
data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
491 mesh.
pl.reduce(mult,
"sum");
494 vector<int> mult_hist(hist_size, 0), global_mult_hist(hist_size, 0);
495 for(
auto m : mult) mult_hist[m]++;
497 MPI_Reduce(mult_hist.data(), global_mult_hist.
data(), hist_size, MPI_INT, MPI_SUM, 0, comm);
500 printf(
"===== Parallel mesh statistics =====\n");
502 printf(
"#pid\t#nodes\t#elems\t#interf\t#alg\n");
503 for(
int pid = 0; pid < size; pid++)
504 printf(
"%d\t%ld\t%ld\t%ld\t%ld\n", pid, (
long int)npoint[pid], (
long int)nelem[pid],
505 (
long int)ninterf[pid], (
long int)nidx[pid]);
508 std::cout <<
"Multiplicities :" << std::endl;
509 for(
int i = 2; i < hist_size && global_mult_hist[i] > 0; i++)
510 std::cout << i <<
": " << global_mult_hist[i] << std::endl;
523 template<
class T,
class S>
530 size_t num_extr_elem = 0, num_extr_entr = 0;
542 num_extr_entr += mesh.
dsp[i+1] - mesh.
dsp[i];
549 submesh.
dsp.resize(num_extr_elem+1);
550 submesh.
tag.resize(num_extr_elem);
551 sub_ref_eidx.
resize(num_extr_elem);
558 submesh.
con.resize(num_extr_entr);
562 for(
size_t ridx_ele=0, ridx_con=0, widx_ele=0, widx_con=0; ridx_ele<mesh.
l_numelem; ridx_ele++)
564 ridx_con = mesh.
dsp[ridx_ele];
568 cnt[widx_ele] = mesh.
dsp[ridx_ele + 1] - mesh.
dsp[ridx_ele];
569 submesh.
tag [widx_ele] = mesh.
tag [ridx_ele];
570 sub_ref_eidx[widx_ele] = mesh_ref_eidx[ridx_ele];
571 submesh.
type[widx_ele] = mesh.
type [ridx_ele];
574 submesh.
fib[widx_ele*3+0] = mesh.
fib[ridx_ele*3+0];
575 submesh.
fib[widx_ele*3+1] = mesh.
fib[ridx_ele*3+1];
576 submesh.
fib[widx_ele*3+2] = mesh.
fib[ridx_ele*3+2];
578 submesh.
she[widx_ele*3+0] = mesh.
she[ridx_ele*3+0];
579 submesh.
she[widx_ele*3+1] = mesh.
she[ridx_ele*3+1];
580 submesh.
she[widx_ele*3+2] = mesh.
she[ridx_ele*3+2];
584 for(
int j=0; j<cnt[widx_ele]; j++)
585 submesh.
con[widx_con++] = rnod[mesh.
con[ridx_con++]];
592 unsigned long int gnumele = submesh.
l_numelem;
593 MPI_Allreduce(MPI_IN_PLACE, &gnumele, 1, MPI_UNSIGNED_LONG, MPI_SUM, submesh.
comm);
605 template<
class T,
class S>
608 MPI_Comm comm = mesh.
comm;
613 MPI_Allreduce(MPI_IN_PLACE, &errflag, 1, MPI_SHORT, MPI_SUM, comm);
618 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
622 MPI_Allgather(&mesh.
l_numelem,
sizeof(
size_t), MPI_BYTE, elem_counts.data(),
623 sizeof(
size_t), MPI_BYTE, comm);
628 part[i] = (layout[rank] + i) % size;
643 template<
class T,
class S>
646 MPI_Comm comm = mesh.
comm;
651 S l1 = mesh.
fib[i*3+0];
652 S l2 = mesh.
fib[i*3+1];
653 S l3 = mesh.
fib[i*3+2];
655 if( l1*l1 + l2*l2 + l3*l3 )
686 template<
class T,
class S>
704 std::string oldname = submesh.
name;
708 submesh.
name = oldname;
729 template<
class T,
class S>
733 MPI_Comm_size(mesh.
comm, &size); MPI_Comm_rank(mesh.
comm, &rank);
739 for(
int pid=0; pid<size; pid++)
742 std::cout <<
"\n\n Rank " << rank <<
": \n" << std::endl;
743 plotter.
print(n2n_cnt, n2n_con,
'*');
745 MPI_Barrier(mesh.
comm);
762 template<
class T,
class S>
inline
767 MPI_Comm comm = mesh_a.
comm;
770 MPI_Comm_size(comm, &size);
771 MPI_Comm_rank(comm, &rank);
779 vector<T> mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff;
780 vector<T> mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff;
784 for(
size_t i=0; i<dest.size(); i++) dest[i] = mesh_a_rnbr[i] % size;
789 mesh_a_rnbr_sbuff.
resize(dest.size()); mesh_a_snbr_sbuff.
resize(dest.size());
790 for(
size_t i=0; i<dest.size(); i++) {
791 mesh_a_rnbr_sbuff[i] = mesh_a_rnbr[perm_a[i]];
797 size_t rcv_size =
sum(grph_a.
rcnt);
799 mesh_a_rnbr_rbuff.
resize(rcv_size);
800 mesh_a_snbr_rbuff.
resize(rcv_size);
802 MPI_Exchange(grph_a, mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, comm);
806 dest.resize(mesh_b_rnbr.
size());
807 for(
size_t i=0; i<dest.size(); i++) dest[i] = mesh_b_rnbr[i] % size;
812 mesh_b_rnbr_sbuff.
resize(dest.size()); mesh_b_snbr_sbuff.
resize(dest.size());
813 for(
size_t i=0; i<dest.size(); i++) {
814 mesh_b_rnbr_sbuff[i] = mesh_b_rnbr[perm_b[i]];
815 mesh_b_snbr_sbuff[i] = mesh_b_snbr[perm_b[i]];
822 mesh_b_rnbr_rbuff.
resize(rcv_size);
823 mesh_b_snbr_rbuff.
resize(rcv_size);
825 MPI_Exchange(grph_b, mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, comm);
826 MPI_Exchange(grph_b, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff, comm);
832 for(
size_t i=0; i<mesh_b_rnbr_rbuff.
size(); i++) {
833 T ref_idx = mesh_b_rnbr_rbuff[i];
834 T sub_idx = mesh_b_snbr_rbuff[i];
836 if(ref_to_sub_b.
count(ref_idx) && ref_to_sub_b[ref_idx] != sub_idx)
837 fprintf(stderr,
"inter_domain_mapping error: Missmatching multiple mappings: %d : %d \n",
838 ref_to_sub_b[ref_idx], sub_idx);
839 ref_to_sub_b[ref_idx] = sub_idx;
844 for(
size_t i=0; i<mesh_a_rnbr_rbuff.
size(); i++) {
845 auto it = ref_to_sub_b.
find(mesh_a_rnbr_rbuff[i]);
846 if(it != ref_to_sub_b.
end())
847 mesh_a_snbr_rbuff[i] = it->second;
849 mesh_a_snbr_rbuff[i] = -1;
854 MPI_Exchange(grph_a, mesh_a_snbr_rbuff, mesh_a_snbr_sbuff, comm);
856 size_t num_mapped = 0;
857 for(
size_t i=0; i<mesh_a_snbr_sbuff.
size(); i++)
858 if(mesh_a_snbr_sbuff[i] > -1) num_mapped++;
861 vector<T> snbr_a(num_mapped), snbr_b(num_mapped);
864 for(
size_t i=0, idx=0; i<mesh_a_snbr_sbuff.
size(); i++) {
865 if(mesh_a_snbr_sbuff[i] > -1) {
866 snbr_a[idx] = mesh_a_snbr[i];
867 snbr_b[idx] = mesh_a_snbr_sbuff[i];
873 a_to_b.
assign(snbr_a, snbr_b);
876 template<
class T>
inline
882 sele.
v1 = n1, sele.
v2 = n2, sele.
v3 = n3; sele.
eidx = eidx;
885 auto it = surfmap.find(surf);
886 if(it != surfmap.end()) surfmap.erase(it);
887 else surfmap[surf] = sele;
890 template<
class T>
inline
897 buff[0] = n1, buff[1] = n2, buff[2] = n3, buff[3] = n4;
900 sele.
v1 = n1, sele.
v2 = n2, sele.
v3 = n3, sele.
v4 = n4, sele.
eidx = eidx;
901 surf.
v1 = buff[0], surf.
v2 = buff[1], surf.
v3 = buff[2], surf.
v4 = buff[3];
903 auto it = surfmap.find(surf);
904 if(it != surfmap.end()) surfmap.erase(it);
905 else surfmap[surf] = sele;
908 template<
class T>
inline
917 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3];
925 template<
class T>
inline
934 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4];
943 template<
class T>
inline
953 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4], n6 = nod[5];
963 template<
class T>
inline
971 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3],
972 n5 = nod[4], n6 = nod[5], n7 = nod[6], n8 = nod[7];
982 template<
class T,
class S>
inline
989 const T* con = mesh.
con.data();
996 for(
size_t i=0; i<nodvec.
size(); i++)
997 nod[i] = nbr[con[i]];
1018 ref_eidx[
eidx], qbuff, qd, qde, quad_surf);
1041 for(
auto it = tri_surf.
begin(); it != tri_surf.
end(); ++it) {
1042 auto ft = search_tri.find(it->first);
1043 if(ft != search_tri.end() && !found_tri.count(ft->first)) {
1045 T iv1 = mesh.
pl.localize(it->second.v1);
1046 T iv2 = mesh.
pl.localize(it->second.v2);
1047 T iv3 = mesh.
pl.localize(it->second.v3);
1055 T fv1 = mesh.
pl.localize(ft->second.v1);
1056 T fv2 = mesh.
pl.localize(ft->second.v2);
1057 T fv3 = mesh.
pl.localize(ft->second.v3);
1067 found_tri[ft->first] = found;
1072 for(
auto it = quad_surf.
begin(); it != quad_surf.
end(); ++it) {
1073 auto ft = search_quad.find(it->first);
1074 if(ft != search_quad.end() && !found_quad.count(ft->first)) {
1076 T iv1 = mesh.
pl.localize(it->second.v1);
1077 T iv2 = mesh.
pl.localize(it->second.v2);
1078 T iv3 = mesh.
pl.localize(it->second.v3);
1086 T fv1 = mesh.
pl.localize(ft->second.v1);
1087 T fv2 = mesh.
pl.localize(ft->second.v2);
1088 T fv3 = mesh.
pl.localize(ft->second.v3);
1098 found_quad[ft->first] = found;
1107 template<
class T,
class S>
inline
1113 const T* con = mesh.
con.data();
1116 bool have_tags = tags.
size() > 0;
1119 for(
size_t i=0; i<nodvec.
size(); i++)
1120 nod[i] = nbr[con[i]];
1138 eidx, qbuff, qd, qde, quad_surf);
1166 template<
class T,
class S>
inline
1175 template<
class V>
inline
1180 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1182 size_t dsize = data.
size();
1183 is_dup.
assign(dsize,
false);
1188 for(
size_t i=0; i<dsize; i++)
1193 size_t nrecv =
sum(grph.
rcnt);
1200 for(
size_t i=0; i<dsize; i++)
1201 sbuff[i] = data[perm[i]];
1209 for(
const V & val : rbuff) {
1210 if(not_dup.
count(val) == 0 && dup.
count(val) == 0)
1218 for(
size_t i=0; i<nrecv; i++) {
1220 bool d = dup.
count(val);
1223 assert(d == (not_dup.
count(val) == 0));
1237 template<
class K,
class V>
inline
1244 for(
const auto & v : map) check_vec[idx++] = v.first;
1248 for(
size_t i=0; i<is_dup.
size(); i++)
1250 map.
erase(check_vec[i]);
1253 template<
class K>
inline
1262 for(
size_t i=0; i<is_dup.
size(); i++)
1264 set.
erase(check_vec[i]);
1267 template<
class T>
inline
1272 long int g_num_tri = tri_surf.size();
1273 long int g_num_quad = quad_surf.size();
1274 MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1275 MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1284 template<
class T,
class S>
inline
1290 MPI_Comm comm = surfmesh.
comm;
1292 long int g_num_tri = tri_surf.size();
1293 long int g_num_quad = quad_surf.size();
1294 MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1295 MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1297 surfmesh.
g_numelem = g_num_tri + g_num_quad;
1298 surfmesh.
l_numelem = tri_surf.size() + quad_surf.size();
1302 surfmesh.
con.resize(tri_surf.size() * 3 + quad_surf.size() * 4);
1305 size_t idx = 0, cidx = 0;
1306 for(
const auto & v : tri_surf) {
1309 surfmesh.
con[cidx + 0] = v.second.v1;
1310 surfmesh.
con[cidx + 1] = v.second.v2;
1311 surfmesh.
con[cidx + 2] = v.second.v3;
1313 elem_orig[idx] = v.second.eidx;
1319 for(
const auto & v : quad_surf) {
1322 surfmesh.
con[cidx + 0] = v.second.v1;
1323 surfmesh.
con[cidx + 1] = v.second.v2;
1324 surfmesh.
con[cidx + 2] = v.second.v3;
1325 surfmesh.
con[cidx + 3] = v.second.v4;
1327 elem_orig[idx] = v.second.eidx;
1340 template<
class T,
class S>
inline
1352 const T* nod = surfmesh.
con.data() + surfmesh.
dsp[
eidx];
1372 template<
class T,
class S>
inline
1394 T orig = elem_orig[
eidx];
1418 template<
class T,
class S>
inline
1426 template<
class T,
class S>
inline
1431 int rank; MPI_Comm_rank(mesh.
comm, &rank);
1436 bool twoFib =
false;
1437 bool read_bin =
false;
1445 MPI_Bcast(&numele,
sizeof(
size_t), MPI_BYTE, 0, mesh.
comm);
1453 numcon = surface.
con.size();
1456 std::cerr <<
"Error: Incomplete surface file! Aborting!" << std::endl;
1465 MPI_Bcast(surface.
tag.data(), surface.
tag.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1468 MPI_Bcast(surface.
dsp.data(), surface.
dsp.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1476 MPI_Bcast(ref_eidx.
data(), surface.
l_numelem*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1478 MPI_Bcast(&numcon,
sizeof(
size_t), MPI_BYTE, 0, mesh.
comm);
1479 surface.
con.resize(numcon);
1480 MPI_Bcast(surface.
con.data(), surface.
con.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1484 size_t initial_gnumelem = surface.
g_numelem;
1494 mesh.
pl.localize(surface.
con);
1503 T orig = elem_orig[
eidx];
1523 long int numele_check = surface.
l_numelem;
1524 MPI_Allreduce(MPI_IN_PLACE, &numele_check, 1, MPI_LONG, MPI_SUM, mesh.
comm);
1527 fprintf(stderr,
"ERROR: Bad partitioning of surface %s!"
1528 " Global elem sum should be %ld, but is %ld!\n\n",
1550 for(
size_t i=0; i<v.
size(); i++)
void sort_triple(const T in1, const T in2, const T in3, T &out1, T &out2, T &out3)
sort the "in" triple into the "out" triple
Functions related to mesh IO.
Functions related to network communication.
Classes related to mesh node renumbering.
Classes and algorithms related to the layout of distributed meshes.
Various sorting algorithms.
The vector class and related algorithms.
Class used to plot functions on the terminal.
The class holds the communication graph for a MPI_Exchange() call.
vector< T > rcnt
Number of elements received from each rank.
vector< T > scnt
Number of elements sent to each rank.
void resize(size_t size)
Resize all vectors to size.
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
vector< T > sdsp
Displacements w.r.t. scnt.
vector< T > rdsp
Displacements w.r.t. rcnt.
void transpose()
transpose comm graph (receive becomes send, and vice versa)
void scale(V fac)
scale comm graph layout data
Index mapping class. This is a bijective mapping.
void assign(const vector< T > &a, const vector< T > &b)
Set up the index mapping between a and b.
The mesh storage class. It contains both element and vertex data.
overlapping_layout< T > pl
nodal parallel layout
size_t g_numpts
global number of points
vector< T > dsp
connectivity starting index of each element
vector< S > she
sheet direction
vector< S > fib
fiber direction
size_t l_numelem
local number of elements
vector< elem_t > type
element type
std::map< SF_nbr, vector< T > > nbr
container for different numberings
std::string name
the mesh name
vector< T > & register_numbering(SF_nbr nbr_type)
Register a new numbering to the mesh and return the associated index vector.
vector< S > xyz
node cooridnates
size_t l_numpts
local number of points
size_t g_numelem
global number of elements
void localize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
MPI_Comm comm
the parallel mesh is defined on a MPI world
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
vector< T > tag
element tag
hashmap::unordered_set< int > extr_tag
the element tags based on which the mesh has been extracted
The abstract numbering class.
Functor class applying a submesh renumbering.
A vector storing arbitrary data.
size_t size() const
The current size of the vector.
void resize(size_t n)
Resize a vector.
const T * end() const
Pointer to the vector's end.
void assign(InputIterator s, InputIterator e)
Assign a memory range.
const T * begin() const
Pointer to the vector's start.
T * data()
Pointer to the vector's start.
iterator find(const K &key)
Search for key. Return iterator.
hm_int count(const K &key) const
Check if key exists.
hm_int erase(const K &key)
Erase by key.
Custom unordered_set implementation.
hm_int erase(const K &key)
hm_int count(const K &key) const
void insert(InputIterator first, InputIterator last)
Ascii matrix graph plotter.
void print(const VEC &cnt, const VEC &col, char s)
Print a matrix graph to stdout.
void extract_tagbased(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh based on element tags.
void cnt_from_dsp(const vector< T > &dsp, vector< T > &cnt)
Compute counts from displacements.
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void print_DD_info(const meshdata< T, S > &mesh)
Print some basic information on the domain decomposition of a mesh.
void get_hashmap_duplicates(const vector< V > &data, const MPI_Comm comm, vector< bool > &is_dup)
void write_pts_block(FILE *&fd, bool write_binary, const vector< S > &xyz)
Write a chunk of points to a file.
void read_surface_mesh(const meshdata< T, S > &mesh, meshdata< T, S > &surface, std::string filename)
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
void unique_accumulate(vector< T > &_P, vector< S > &_A)
void convert_surface_mesh(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, meshdata< T, S > &surfmesh, vector< T > &elem_orig)
void rebalance_mesh(meshdata< T, S > &mesh)
Rebalance the parallel distribution of a mesh, if a local size is 0.
void read_headers(FILE *ele_fd, FILE *fib_fd, bool read_binary, size_t &numelem, bool &twoFib)
Read the header from the element and fiber files.
void extract_mesh(const vector< bool > &keep, const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh from a given mesh.
void search_for_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_map< triple< T >, tri_sele< T > > &search_tri, const hashmap::unordered_map< quadruple< T >, quad_sele< T > > &search_quad, hashmap::unordered_map< triple< T >, tri_sele< T > > &found_tri, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &found_quad)
void permute_mesh(const meshdata< T, S > &inmesh, meshdata< T, S > &outmesh, const vector< T > &perm)
Permute the element data of a mesh based on a given permutation.
double inner_prod(const Point &a, const Point &b)
void sort_parallel(MPI_Comm comm, const vector< T > &idx, vector< T > &out_idx)
Sort index values parallel ascending across the ranks.
void write_elements(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write the element data (elements and fibers) of a CARP mesh.
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
void insert_surf_pyr(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void unique_resize(vector< T > &_P)
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
void write_pts_header(FILE *&pts_fd, bool binary, size_t numpts)
Write the header of the points file.
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
void read_elem_block(FILE *&fd, bool read_binary, size_t bstart, size_t bsize, meshdata< T, S > &mesh)
Read a block of size bsize from an CARP element file.
void insert_surf_hex(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
Point normalize(const Point &vect)
void gather_mesh(const meshdata< T, S > &locmesh, meshdata< T, S > &globmesh)
Gather a mesh on rank 0.
void restrict_to_set(vector< T > &v, const hashmap::unordered_set< T > &set)
void compute_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, hashmap::unordered_map< triple< T >, tri_sele< T > > &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf)
void global_to_local(const vector< T > &glob, vector< T > &data, bool sortedData, bool doWarn)
void insert_surf_pri(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void vec_assign(S *lhs, const V *rhs, size_t size)
Assign the values in rhs to lhs. The data-type of rhs is cast to the type of lhs.
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
void insert_surf_tri(T n1, T n2, T n3, size_t eidx, triple< T > &surf, tri_sele< T > &sele, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
void remove_parallel_duplicates(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, MPI_Comm comm)
void convert_mesh_surface(const meshdata< T, S > &surfmesh, hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T >> &quad_surf)
void binary_sort(vector< T > &_V)
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
Point cross(const Point &a, const Point &b)
cross product
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx, vector< T > &buff, quadruple< T > &surf, quad_sele< T > &sele, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_mesh(meshdata< T, S > &mesh, vector< T > &part)
Redistribute both element and vertex data of a mesh.
void print_mesh_graph(meshdata< T, S > &mesh)
One-by-one each process prints the graph of a given mesh.
void inter_domain_mapping(const meshdata< T, S > &mesh_a, const meshdata< T, S > &mesh_b, const SF_nbr snbr, index_mapping< T > &a_to_b)
Submesh index mapping between different domains/meshes.
void nodal_connectivity_graph(const meshdata< T, S > &mesh, vector< T > &n2n_cnt, vector< T > &n2n_con)
Compute the node-to-node connectivity.
void extract_myocardium(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract the myocardium submesh.
SF_nbr
Enumeration encoding the different supported numberings.
@ NBR_PETSC
PETSc numbering of nodes.
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
void remove_duplicates(hashmap::unordered_map< K, V > &map, const MPI_Comm comm)
remove parallel duplicates from a hashmap::unordered_map
void insert_surf_tet(const T *nod, const size_t eidx, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)