27 #ifndef _SF_MESH_UTILS_H
28 #define _SF_MESH_UTILS_H
55 template<
class T,
class S>
71 size_t numelem = outmesh.
l_numelem, numcon = inmesh.
con.size();
79 outmesh.
dsp .resize(numelem+1);
80 outmesh.
tag .resize(numelem);
87 outmesh.
con.resize(numcon);
90 T* elem = outmesh.
con.data();
92 for(
size_t i=0; i<numelem; i++) {
93 outmesh.
tag[i] = inmesh.
tag[perm[i]];
94 ref_eidx_out[i] = ref_eidx_in[perm[i]];
95 outmesh.
type[i] = inmesh.
type[perm[i]];
98 outmesh.
fib[i*3+0] = inmesh.
fib[perm[i]*3+0];
99 outmesh.
fib[i*3+1] = inmesh.
fib[perm[i]*3+1];
100 outmesh.
fib[i*3+2] = inmesh.
fib[perm[i]*3+2];
104 outmesh.
she[i*3+0] = inmesh.
she[perm[i]*3+0];
105 outmesh.
she[i*3+1] = inmesh.
she[perm[i]*3+1];
106 outmesh.
she[i*3+2] = inmesh.
she[perm[i]*3+2];
109 int esize = inmesh.
dsp[perm[i]+1] - inmesh.
dsp[perm[i]];
112 T estart = inmesh.
dsp[perm[i]];
113 for(
int j=0; j<esize; j++) elem[j] = inmesh.
con[estart+j];
132 template<
class T,
class S>
135 MPI_Comm comm = mesh.
comm;
138 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
155 con_grph.
sdsp[0] = 0;
156 for(
int i=0; i<size; i++) con_grph.
sdsp[i+1] = sendbuff.
dsp[elem_grph.
sdsp[i+1]];
158 MPI_Alltoall(con_grph.
scnt.data(),
sizeof(
size_t), MPI_BYTE, con_grph.
rcnt.data(),
sizeof(
size_t), MPI_BYTE, comm);
166 MPI_Allreduce(MPI_IN_PLACE, &nFib, 1, MPI_INT, MPI_MAX, comm);
172 size_t recv_size =
sum(elem_grph.
rcnt);
174 mesh.
dsp .resize(recv_size+1);
175 mesh.
tag .resize(recv_size);
176 ref_eidx.
resize(recv_size);
191 MPI_Exchange(elem_grph, ref_eidx_sbuff, ref_eidx, comm);
204 for(
size_t i=0; i<sendbuff.
con.size(); i++) sendbuff.
con[i] = rnod[sendbuff.
con[i]];
206 recv_size =
sum(con_grph.
rcnt);
207 mesh.
con.resize(recv_size);
219 template<
class T,
class S>
235 template<
class T,
class S>
246 MPI_Comm comm = mesh.
comm;
248 MPI_Comm_size(comm, &size);
249 MPI_Comm_rank(comm, &rank);
262 con_layout.
scnt.zero();
263 for(
int pid=0; pid<size; pid++)
264 for(T i=elem_layout.
sdsp[pid]; i<elem_layout.
sdsp[pid+1]; i++)
265 con_layout.
scnt[pid] += sendmesh.
dsp[i+1] - sendmesh.
dsp[i];
270 nod_layout.
sdsp[0] = 0;
272 for(
int pid=0; pid<size; pid++)
274 T con_start = con_layout.
sdsp[pid], con_end = con_layout.
sdsp[pid+1];
283 nod_layout.
sdsp[pid+1] = nod_layout.
sdsp[pid] + nod_layout.
scnt[pid];
289 MPI_Alltoall(nod_layout.
scnt.data(),
sizeof(T), MPI_BYTE, nod_layout.
rcnt.data(),
sizeof(T), MPI_BYTE, comm);
297 for(
size_t i=0; i<nod_lidx.
size(); i++)
299 T lidx = nod_lidx[i];
300 xyz_sbuff[i*3+0] = sendmesh.
xyz[lidx*3+0];
301 xyz_sbuff[i*3+1] = sendmesh.
xyz[lidx*3+1];
302 xyz_sbuff[i*3+2] = sendmesh.
xyz[lidx*3+2];
305 size_t rsize =
sum(nod_layout.
rcnt);
318 acc_col(nod_rbuff.
size());
323 acc_dsp.
resize(acc_cnt.size()+1);
328 for(
size_t i=0; i<acc_cnt.size(); i++)
330 T pidx = acc_col[acc_dsp[i]];
331 mesh.
xyz[i*3+0] = xyz_rbuff[pidx*3+0];
332 mesh.
xyz[i*3+1] = xyz_rbuff[pidx*3+1];
333 mesh.
xyz[i*3+2] = xyz_rbuff[pidx*3+2];
347 template<
class T,
class S>
350 const MPI_Comm comm = mesh.
comm;
353 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
363 T elem_bsize = (elem_gmax - elem_gmin) / size + 1;
367 for(
size_t i=0; i<dest.
size(); i++)
368 dest[i] = (ref_eidx[i] - elem_gmin) / elem_bsize;
401 for(
size_t i=0; i<wmesh.
con.size(); i++) wmesh.
con[i] = nbr[wmesh.
con[i]];
410 for(
size_t i=0; i<wmesh.
con.size(); i++) wmesh.
con[i] = nbr[wmesh.
con[i]];
417 const vector<T> & alg_nod = mesh.
pl.algebraic_nodes();
421 for(
size_t i=0; i<alg_nod.
size(); i++) {
424 xyz[i*3+0] = mesh.
xyz[loc*3+0];
425 xyz[i*3+1] = mesh.
xyz[loc*3+1];
426 xyz[i*3+2] = mesh.
xyz[loc*3+2];
428 xyz_idx[i] = nbr_orig[loc];
431 sort_parallel(comm, xyz_idx, xyz_cnt, xyz, srt_idx, srt_cnt, srt_xyz);
435 std::string pts_file = binary ? basename +
".bpts" : basename +
".pts";
438 pts_fd = fopen(pts_file.c_str(),
"w");
440 fprintf(stderr,
"Error: could not open file: %s. Aborting!", pts_file.c_str());
448 for(
int pid=0; pid < size; pid++) {
450 pts_fd = fopen(pts_file.c_str(),
"a");
452 fprintf(stderr,
"Error: could not open file: %s. Aborting!", pts_file.c_str());
469 template<
class T,
class S>
472 MPI_Comm comm = locmesh.
comm;
475 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
486 template<
class T,
class S>
489 MPI_Comm comm = mesh.
comm;
492 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
494 vector<size_t> npoint(size), nelem(size), ninterf(size), nidx(size);
496 size_t intf_size = mesh.
pl.interface().size(),
497 idx_size = mesh.
pl.num_algebraic_idx();
499 MPI_Gather(&mesh.
l_numpts,
sizeof(
size_t), MPI_BYTE, npoint.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
500 MPI_Gather(&mesh.
l_numelem,
sizeof(
size_t), MPI_BYTE, nelem.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
501 MPI_Gather(&intf_size,
sizeof(
size_t), MPI_BYTE, ninterf.data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
502 MPI_Gather(&idx_size,
sizeof(
size_t), MPI_BYTE, nidx.
data(),
sizeof(
size_t), MPI_BYTE, 0, comm);
505 mesh.
pl.reduce(mult,
"sum");
508 vector<int> mult_hist(hist_size, 0), global_mult_hist(hist_size, 0);
509 for(
auto m : mult) mult_hist[m]++;
511 MPI_Reduce(mult_hist.data(), global_mult_hist.
data(), hist_size, MPI_INT, MPI_SUM, 0, comm);
514 printf(
"===== Parallel mesh statistics =====\n");
516 printf(
"#pid\t#nodes\t#elems\t#interf\t#alg\n");
517 for(
int pid = 0; pid < size; pid++)
518 printf(
"%d\t%ld\t%ld\t%ld\t%ld\n", pid, (
long int)npoint[pid], (
long int)nelem[pid],
519 (
long int)ninterf[pid], (
long int)nidx[pid]);
522 std::cout <<
"Multiplicities :" << std::endl;
523 for(
int i = 2; i < hist_size && global_mult_hist[i] > 0; i++)
524 std::cout << i <<
": " << global_mult_hist[i] << std::endl;
537 template<
class T,
class S>
544 size_t num_extr_elem = 0, num_extr_entr = 0;
561 num_extr_entr += mesh.
dsp[i+1] - mesh.
dsp[i];
568 submesh.
dsp.resize(num_extr_elem+1);
569 submesh.
tag.resize(num_extr_elem);
570 sub_ref_eidx.
resize(num_extr_elem);
577 submesh.
con.resize(num_extr_entr);
581 for(
size_t ridx_ele=0, ridx_con=0, widx_ele=0, widx_con=0; ridx_ele<mesh.
l_numelem; ridx_ele++)
583 ridx_con = mesh.
dsp[ridx_ele];
587 cnt[widx_ele] = mesh.
dsp[ridx_ele + 1] - mesh.
dsp[ridx_ele];
588 submesh.
tag [widx_ele] = mesh.
tag [ridx_ele];
589 sub_ref_eidx[widx_ele] = mesh_ref_eidx[ridx_ele];
590 submesh.
type[widx_ele] = mesh.
type [ridx_ele];
594 submesh.
fib[widx_ele*3+0] = mesh.
fib[ridx_ele*3+0];
595 submesh.
fib[widx_ele*3+1] = mesh.
fib[ridx_ele*3+1];
596 submesh.
fib[widx_ele*3+2] = mesh.
fib[ridx_ele*3+2];
599 submesh.
she[widx_ele*3+0] = mesh.
she[ridx_ele*3+0];
600 submesh.
she[widx_ele*3+1] = mesh.
she[ridx_ele*3+1];
601 submesh.
she[widx_ele*3+2] = mesh.
she[ridx_ele*3+2];
605 for(
int j=0; j<cnt[widx_ele]; j++)
606 submesh.
con[widx_con++] = rnod[mesh.
con[ridx_con++]];
613 unsigned long int gnumele = submesh.
l_numelem;
614 MPI_Allreduce(MPI_IN_PLACE, &gnumele, 1, MPI_UNSIGNED_LONG, MPI_SUM, submesh.
comm);
626 template<
class T,
class S>
629 MPI_Comm comm = mesh.
comm;
634 MPI_Allreduce(MPI_IN_PLACE, &errflag, 1, MPI_SHORT, MPI_SUM, comm);
639 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
643 MPI_Allgather(&mesh.
l_numelem,
sizeof(
size_t), MPI_BYTE, elem_counts.data(),
644 sizeof(
size_t), MPI_BYTE, comm);
649 part[i] = (layout[rank] + i) % size;
664 template<
class T,
class S>
667 MPI_Comm comm = mesh.
comm;
674 l1 = mesh.
fib[i*3+0];
675 l2 = mesh.
fib[i*3+1];
676 l3 = mesh.
fib[i*3+2];
679 if( l1*l1 + l2*l2 + l3*l3 )
710 template<
class T,
class S>
728 std::string oldname = submesh.
name;
732 submesh.
name = oldname;
753 template<
class T,
class S>
757 MPI_Comm_size(mesh.
comm, &size); MPI_Comm_rank(mesh.
comm, &rank);
763 for(
int pid=0; pid<size; pid++)
766 std::cout <<
"\n\n Rank " << rank <<
": \n" << std::endl;
767 plotter.
print(n2n_cnt, n2n_con,
'*');
769 MPI_Barrier(mesh.
comm);
786 template<
class T,
class S>
inline
791 MPI_Comm comm = mesh_a.
comm;
794 MPI_Comm_size(comm, &size);
795 MPI_Comm_rank(comm, &rank);
803 vector<T> mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, mesh_a_snbr_sbuff, mesh_a_snbr_rbuff;
804 vector<T> mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff;
808 for(
size_t i=0; i<dest.size(); i++) dest[i] = mesh_a_rnbr[i] % size;
813 mesh_a_rnbr_sbuff.
resize(dest.size()); mesh_a_snbr_sbuff.
resize(dest.size());
814 for(
size_t i=0; i<dest.size(); i++) {
815 mesh_a_rnbr_sbuff[i] = mesh_a_rnbr[perm_a[i]];
821 size_t rcv_size =
sum(grph_a.
rcnt);
823 mesh_a_rnbr_rbuff.
resize(rcv_size);
824 mesh_a_snbr_rbuff.
resize(rcv_size);
826 MPI_Exchange(grph_a, mesh_a_rnbr_sbuff, mesh_a_rnbr_rbuff, comm);
830 dest.resize(mesh_b_rnbr.
size());
831 for(
size_t i=0; i<dest.size(); i++) dest[i] = mesh_b_rnbr[i] % size;
836 mesh_b_rnbr_sbuff.
resize(dest.size()); mesh_b_snbr_sbuff.
resize(dest.size());
837 for(
size_t i=0; i<dest.size(); i++) {
838 mesh_b_rnbr_sbuff[i] = mesh_b_rnbr[perm_b[i]];
839 mesh_b_snbr_sbuff[i] = mesh_b_snbr[perm_b[i]];
846 mesh_b_rnbr_rbuff.
resize(rcv_size);
847 mesh_b_snbr_rbuff.
resize(rcv_size);
849 MPI_Exchange(grph_b, mesh_b_rnbr_sbuff, mesh_b_rnbr_rbuff, comm);
850 MPI_Exchange(grph_b, mesh_b_snbr_sbuff, mesh_b_snbr_rbuff, comm);
856 for(
size_t i=0; i<mesh_b_rnbr_rbuff.
size(); i++) {
857 T ref_idx = mesh_b_rnbr_rbuff[i];
858 T sub_idx = mesh_b_snbr_rbuff[i];
860 if(ref_to_sub_b.
count(ref_idx) && ref_to_sub_b[ref_idx] != sub_idx)
861 fprintf(stderr,
"inter_domain_mapping error: Missmatching multiple mappings: %d : %d \n",
862 ref_to_sub_b[ref_idx], sub_idx);
863 ref_to_sub_b[ref_idx] = sub_idx;
868 for(
size_t i=0; i<mesh_a_rnbr_rbuff.
size(); i++) {
869 auto it = ref_to_sub_b.
find(mesh_a_rnbr_rbuff[i]);
870 if(it != ref_to_sub_b.
end())
871 mesh_a_snbr_rbuff[i] = it->second;
873 mesh_a_snbr_rbuff[i] = -1;
878 MPI_Exchange(grph_a, mesh_a_snbr_rbuff, mesh_a_snbr_sbuff, comm);
880 size_t num_mapped = 0;
881 for(
size_t i=0; i<mesh_a_snbr_sbuff.
size(); i++)
882 if(mesh_a_snbr_sbuff[i] > -1) num_mapped++;
885 vector<T> snbr_a(num_mapped), snbr_b(num_mapped);
888 for(
size_t i=0, idx=0; i<mesh_a_snbr_sbuff.
size(); i++) {
889 if(mesh_a_snbr_sbuff[i] > -1) {
890 snbr_a[idx] = mesh_a_snbr[i];
891 snbr_b[idx] = mesh_a_snbr_sbuff[i];
897 a_to_b.
assign(snbr_a, snbr_b);
900 template<
class T>
inline
906 sele.
v1 = n1, sele.
v2 = n2, sele.
v3 = n3; sele.
eidx = eidx;
909 auto it = surfmap.find(surf);
910 if(it != surfmap.end()) surfmap.erase(it);
911 else surfmap[surf] = sele;
914 template<
class T>
inline
921 buff[0] = n1, buff[1] = n2, buff[2] = n3, buff[3] = n4;
924 sele.
v1 = n1, sele.
v2 = n2, sele.
v3 = n3, sele.
v4 = n4, sele.
eidx = eidx;
925 surf.
v1 = buff[0], surf.
v2 = buff[1], surf.
v3 = buff[2], surf.
v4 = buff[3];
927 auto it = surfmap.find(surf);
928 if(it != surfmap.end()) surfmap.erase(it);
929 else surfmap[surf] = sele;
932 template<
class T>
inline
941 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3];
949 template<
class T>
inline
958 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4];
967 template<
class T>
inline
977 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3], n5 = nod[4], n6 = nod[5];
987 template<
class T>
inline
995 T n1 = nod[0], n2 = nod[1], n3 = nod[2], n4 = nod[3],
996 n5 = nod[4], n6 = nod[5], n7 = nod[6], n8 = nod[7];
1006 template<
class T,
class S>
inline
1013 const T* con = mesh.
con.data();
1020 for(
size_t i=0; i<nodvec.
size(); i++)
1021 nod[i] = nbr[con[i]];
1042 ref_eidx[
eidx], qbuff, qd, qde, quad_surf);
1065 for(
auto it = tri_surf.
begin(); it != tri_surf.
end(); ++it) {
1066 auto ft = search_tri.find(it->first);
1067 if(ft != search_tri.end() && !found_tri.count(ft->first)) {
1069 T iv1 = mesh.
pl.localize(it->second.v1);
1070 T iv2 = mesh.
pl.localize(it->second.v2);
1071 T iv3 = mesh.
pl.localize(it->second.v3);
1079 T fv1 = mesh.
pl.localize(ft->second.v1);
1080 T fv2 = mesh.
pl.localize(ft->second.v2);
1081 T fv3 = mesh.
pl.localize(ft->second.v3);
1091 found_tri[ft->first] = found;
1096 for(
auto it = quad_surf.
begin(); it != quad_surf.
end(); ++it) {
1097 auto ft = search_quad.find(it->first);
1098 if(ft != search_quad.end() && !found_quad.count(ft->first)) {
1100 T iv1 = mesh.
pl.localize(it->second.v1);
1101 T iv2 = mesh.
pl.localize(it->second.v2);
1102 T iv3 = mesh.
pl.localize(it->second.v3);
1110 T fv1 = mesh.
pl.localize(ft->second.v1);
1111 T fv2 = mesh.
pl.localize(ft->second.v2);
1112 T fv3 = mesh.
pl.localize(ft->second.v3);
1122 found_quad[ft->first] = found;
1131 template<
class T,
class S>
inline
1137 const T* con = mesh.
con.data();
1140 bool have_tags = tags.
size() > 0;
1143 for(
size_t i=0; i<nodvec.
size(); i++)
1144 nod[i] = nbr[con[i]];
1162 eidx, qbuff, qd, qde, quad_surf);
1190 template<
class T,
class S>
inline
1199 template<
class V>
inline
1204 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1206 size_t dsize = data.
size();
1207 is_dup.
assign(dsize,
false);
1212 for(
size_t i=0; i<dsize; i++)
1217 size_t nrecv =
sum(grph.
rcnt);
1224 for(
size_t i=0; i<dsize; i++)
1225 sbuff[i] = data[perm[i]];
1233 for(
const V & val : rbuff) {
1234 if(not_dup.
count(val) == 0 && dup.
count(val) == 0)
1242 for(
size_t i=0; i<nrecv; i++) {
1244 bool d = dup.
count(val);
1247 assert(d == (not_dup.
count(val) == 0));
1261 template<
class K,
class V>
inline
1268 for(
const auto & v : map) check_vec[idx++] = v.first;
1272 for(
size_t i=0; i<is_dup.
size(); i++)
1274 map.
erase(check_vec[i]);
1277 template<
class K>
inline
1286 for(
size_t i=0; i<is_dup.
size(); i++)
1288 set.
erase(check_vec[i]);
1291 template<
class T>
inline
1296 long int g_num_tri = tri_surf.size();
1297 long int g_num_quad = quad_surf.size();
1298 MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1299 MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1308 template<
class T,
class S>
inline
1314 MPI_Comm comm = surfmesh.
comm;
1316 long int g_num_tri = tri_surf.size();
1317 long int g_num_quad = quad_surf.size();
1318 MPI_Allreduce(MPI_IN_PLACE, &g_num_tri, 1, MPI_LONG, MPI_SUM, comm);
1319 MPI_Allreduce(MPI_IN_PLACE, &g_num_quad, 1, MPI_LONG, MPI_SUM, comm);
1321 surfmesh.
g_numelem = g_num_tri + g_num_quad;
1322 surfmesh.
l_numelem = tri_surf.size() + quad_surf.size();
1326 surfmesh.
con.resize(tri_surf.size() * 3 + quad_surf.size() * 4);
1329 size_t idx = 0, cidx = 0;
1330 for(
const auto & v : tri_surf) {
1333 surfmesh.
con[cidx + 0] = v.second.v1;
1334 surfmesh.
con[cidx + 1] = v.second.v2;
1335 surfmesh.
con[cidx + 2] = v.second.v3;
1337 elem_orig[idx] = v.second.eidx;
1343 for(
const auto & v : quad_surf) {
1346 surfmesh.
con[cidx + 0] = v.second.v1;
1347 surfmesh.
con[cidx + 1] = v.second.v2;
1348 surfmesh.
con[cidx + 2] = v.second.v3;
1349 surfmesh.
con[cidx + 3] = v.second.v4;
1351 elem_orig[idx] = v.second.eidx;
1364 template<
class T,
class S>
inline
1376 const T* nod = surfmesh.
con.data() + surfmesh.
dsp[
eidx];
1396 template<
class T,
class S>
inline
1418 T orig = elem_orig[
eidx];
1442 template<
class T,
class S>
inline
1450 template<
class T,
class S>
inline
1455 int rank; MPI_Comm_rank(mesh.
comm, &rank);
1461 bool read_bin =
false;
1469 MPI_Bcast(&numele,
sizeof(
size_t), MPI_BYTE, 0, mesh.
comm);
1477 numcon = surface.
con.size();
1480 std::cerr <<
"Error: Incomplete surface file! Aborting!" << std::endl;
1489 MPI_Bcast(surface.
tag.data(), surface.
tag.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1492 MPI_Bcast(surface.
dsp.data(), surface.
dsp.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1500 MPI_Bcast(ref_eidx.
data(), surface.
l_numelem*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1502 MPI_Bcast(&numcon,
sizeof(
size_t), MPI_BYTE, 0, mesh.
comm);
1503 surface.
con.resize(numcon);
1504 MPI_Bcast(surface.
con.data(), surface.
con.size()*
sizeof(T), MPI_BYTE, 0, mesh.
comm);
1508 size_t initial_gnumelem = surface.
g_numelem;
1518 mesh.
pl.localize(surface.
con);
1527 T orig = elem_orig[
eidx];
1547 long int numele_check = surface.
l_numelem;
1548 MPI_Allreduce(MPI_IN_PLACE, &numele_check, 1, MPI_LONG, MPI_SUM, mesh.
comm);
1551 fprintf(stderr,
"ERROR: Bad partitioning of surface %s!"
1552 " Global elem sum should be %ld, but is %ld!\n\n",
1574 for(
size_t i=0; i<v.
size(); i++)
void sort_triple(const T in1, const T in2, const T in3, T &out1, T &out2, T &out3)
sort the "in" triple into the "out" triple
Functions related to mesh IO.
Functions related to network communication.
Classes related to mesh node renumbering.
Classes and algorithms related to the layout of distributed meshes.
Various sorting algorithms.
The vector class and related algorithms.
Class used to plot functions on the terminal.
The class holds the communication graph for a MPI_Exchange() call.
vector< T > rcnt
Number of elements received from each rank.
vector< T > scnt
Number of elements sent to each rank.
void resize(size_t size)
Resize all vectors to size.
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
vector< T > sdsp
Displacements w.r.t. scnt.
vector< T > rdsp
Displacements w.r.t. rcnt.
void transpose()
transpose comm graph (receive becomes send, and vice versa)
void scale(V fac)
scale comm graph layout data
Index mapping class. This is a bijective mapping.
void assign(const vector< T > &a, const vector< T > &b)
Set up the index mapping between a and b.
The mesh storage class. It contains both element and vertex data.
overlapping_layout< T > pl
nodal parallel layout
size_t g_numpts
global number of points
vector< T > dsp
connectivity starting index of each element
vector< S > she
sheet direction
vector< S > fib
fiber direction
size_t l_numelem
local number of elements
vector< elem_t > type
element type
std::map< SF_nbr, vector< T > > nbr
container for different numberings
std::string name
the mesh name
vector< T > & register_numbering(SF_nbr nbr_type)
Register a new numbering to the mesh and return the associated index vector.
vector< S > xyz
node cooridnates
size_t l_numpts
local number of points
size_t g_numelem
global number of elements
void localize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
MPI_Comm comm
the parallel mesh is defined on a MPI world
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
vector< T > tag
element tag
hashmap::unordered_set< int > extr_tag
the element tags based on which the mesh has been extracted
The abstract numbering class.
Functor class applying a submesh renumbering.
A vector storing arbitrary data.
size_t size() const
The current size of the vector.
void resize(size_t n)
Resize a vector.
const T * end() const
Pointer to the vector's end.
void assign(InputIterator s, InputIterator e)
Assign a memory range.
const T * begin() const
Pointer to the vector's start.
T * data()
Pointer to the vector's start.
iterator find(const K &key)
Search for key. Return iterator.
hm_int count(const K &key) const
Check if key exists.
hm_int erase(const K &key)
Erase by key.
Custom unordered_set implementation.
hm_int erase(const K &key)
hm_int count(const K &key) const
void insert(InputIterator first, InputIterator last)
Ascii matrix graph plotter.
void print(const VEC &cnt, const VEC &col, char s)
Print a matrix graph to stdout.
void extract_tagbased(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh based on element tags.
void cnt_from_dsp(const vector< T > &dsp, vector< T > &cnt)
Compute counts from displacements.
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void print_DD_info(const meshdata< T, S > &mesh)
Print some basic information on the domain decomposition of a mesh.
void get_hashmap_duplicates(const vector< V > &data, const MPI_Comm comm, vector< bool > &is_dup)
void read_headers(FILE *ele_fd, FILE *fib_fd, bool read_binary, size_t &numelem, int &nFib)
Read the header from the element and fiber files.
void write_pts_block(FILE *&fd, bool write_binary, const vector< S > &xyz)
Write a chunk of points to a file.
void read_surface_mesh(const meshdata< T, S > &mesh, meshdata< T, S > &surface, std::string filename)
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
void unique_accumulate(vector< T > &_P, vector< S > &_A)
void convert_surface_mesh(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, meshdata< T, S > &surfmesh, vector< T > &elem_orig)
void rebalance_mesh(meshdata< T, S > &mesh)
Rebalance the parallel distribution of a mesh, if a local size is 0.
void extract_mesh(const vector< bool > &keep, const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh from a given mesh.
void search_for_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_map< triple< T >, tri_sele< T > > &search_tri, const hashmap::unordered_map< quadruple< T >, quad_sele< T > > &search_quad, hashmap::unordered_map< triple< T >, tri_sele< T > > &found_tri, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &found_quad)
void permute_mesh(const meshdata< T, S > &inmesh, meshdata< T, S > &outmesh, const vector< T > &perm)
Permute the element data of a mesh based on a given permutation.
double inner_prod(const Point &a, const Point &b)
void sort_parallel(MPI_Comm comm, const vector< T > &idx, vector< T > &out_idx)
Sort index values parallel ascending across the ranks.
void write_elements(const meshdata< T, S > &mesh, bool binary, std::string basename)
Read the element data (elements and fibers) of a CARP mesh.
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
void insert_surf_pyr(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void unique_resize(vector< T > &_P)
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
void write_pts_header(FILE *&pts_fd, bool binary, size_t numpts)
Write the header of the points file.
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
void read_elem_block(FILE *&fd, bool read_binary, size_t bstart, size_t bsize, meshdata< T, S > &mesh)
Read a block of size bsize from an CARP element file.
void insert_surf_hex(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
Point normalize(const Point &vect)
void gather_mesh(const meshdata< T, S > &locmesh, meshdata< T, S > &globmesh)
Gather a mesh on rank 0.
void restrict_to_set(vector< T > &v, const hashmap::unordered_set< T > &set)
void compute_surface(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, hashmap::unordered_map< triple< T >, tri_sele< T > > &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf)
void global_to_local(const vector< T > &glob, vector< T > &data, bool sortedData, bool doWarn)
void insert_surf_pri(const T *nod, const size_t eidx, vector< T > &buff, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &qsurfmap)
void vec_assign(S *lhs, const V *rhs, size_t size)
Assign the values in rhs to lhs. The data-type of rhs is cast to the type of lhs.
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
void insert_surf_tri(T n1, T n2, T n3, size_t eidx, triple< T > &surf, tri_sele< T > &sele, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)
void remove_parallel_duplicates(hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &quad_surf, MPI_Comm comm)
void convert_mesh_surface(const meshdata< T, S > &surfmesh, hashmap::unordered_map< triple< T >, tri_sele< T >> &tri_surf, hashmap::unordered_map< quadruple< T >, quad_sele< T >> &quad_surf)
void binary_sort(vector< T > &_V)
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
void extract_myocardium(const meshdata< T, S > &mesh, meshdata< T, S > &submesh, bool require_fibers=true)
Extract the myocardium submesh.
Point cross(const Point &a, const Point &b)
cross product
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
void insert_surf_quad(T n1, T n2, T n3, T n4, size_t eidx, vector< T > &buff, quadruple< T > &surf, quad_sele< T > &sele, hashmap::unordered_map< quadruple< T >, quad_sele< T > > &surfmap)
void redistribute_mesh(meshdata< T, S > &mesh, vector< T > &part)
Redistribute both element and vertex data of a mesh.
void print_mesh_graph(meshdata< T, S > &mesh)
One-by-one each process prints the graph of a given mesh.
void inter_domain_mapping(const meshdata< T, S > &mesh_a, const meshdata< T, S > &mesh_b, const SF_nbr snbr, index_mapping< T > &a_to_b)
Submesh index mapping between different domains/meshes.
void nodal_connectivity_graph(const meshdata< T, S > &mesh, vector< T > &n2n_cnt, vector< T > &n2n_con)
Compute the node-to-node connectivity.
SF_nbr
Enumeration encoding the different supported numberings.
@ NBR_PETSC
PETSc numbering of nodes.
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
void remove_duplicates(hashmap::unordered_map< K, V > &map, const MPI_Comm comm)
remove parallel duplicates from a hashmap::unordered_map
void insert_surf_tet(const T *nod, const size_t eidx, hashmap::unordered_map< triple< T >, tri_sele< T > > &surfmap)