28 #ifndef _SF_NUMBERING_H 29 #define _SF_NUMBERING_H 47 template<
class T,
class S>
66 template<
class T,
class S>
77 MPI_Comm comm = submesh.
comm;
80 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
88 T bsize = (gmax - gmin) / size + 1;
92 for(
size_t i=0; i<dest.
size(); i++)
93 dest[i] = (rnod[i] - gmin) / bsize;
104 vector<T> proc(rsize), acc_cnt(rsize, T(1));
110 rsize = rbuff.
size();
113 MPI_Allgather(&rsize,
sizeof(
size_t), MPI_BYTE, grph.
rcnt.
data(),
sizeof(size_t), MPI_BYTE, comm);
121 sbuff.resize(proc.size());
124 for(
size_t i=0, idx=0; i<acc_cnt.
size(); i++)
125 for(T j=0; j<acc_cnt[i]; j++, idx++) sbuff[idx] = rbuff[i];
126 dest.
assign(proc.begin(), proc.end());
135 for(
size_t i=0, idx=0; i<acc_cnt.size(); i++)
136 for(T j=0; j<acc_cnt[i]; j++, idx++) sbuff[idx] = new_nbr[i];
137 dest.
assign(proc.begin(), proc.end());
149 for(
size_t i=0; i < rnod.
size(); i++) snod[i] = map.
forward_map(rnod[i]);
160 template<
class T>
inline 179 size_t nnod = n2n_cnt.
size();
183 perm.
resize(1); perm[0] = 0; inR[0] =
true;
186 while(perm.
size() < nnod)
189 if (pidx < T(perm.
size())) nidx = perm[pidx++];
192 while(inR[i] ==
true) i++;
196 T start = n2n_dsp[nidx], stop = start + n2n_cnt[nidx];
200 for(T j = start; j<stop; j++) {
215 for(
size_t i=0; i<perm.
size(); i++) old2new[perm[i]] = i;
218 for(
size_t i=0, j=perm.
size()-1; i<perm.
size(); i++, j--) old2new[perm[j]] = i;
229 template<
class T,
class S>
264 MPI_Comm comm = mesh.
comm;
265 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
270 const T alg_start = alg_dsp[rank], alg_end = alg_dsp[rank+1];
273 for(
size_t i=0; i<alg_nod.
size(); i++)
274 petsc_idx[alg_nod[i]] = alg_start + i;
278 for(
const T & n : alg_nod) is_alg[n] =
true;
280 size_t loc_petsc_idx = 0;
281 for(
size_t eidx = 0; eidx < mesh.
l_numelem; eidx++) {
282 for(T j = mesh.
dsp[eidx]; j < mesh.
dsp[eidx+1]; j++) {
283 const T c = mesh.
con[j];
284 if(is_alg[c] && petsc_idx[c] == -1) {
285 petsc_idx[c] = alg_start + loc_petsc_idx;
291 assert(loc_petsc_idx == alg_nod.size());
298 for(
size_t i=0; i<alg_nod.size(); i++) {
299 nodal2restr[alg_nod[i]] = i;
302 for(
size_t eidx = 0; eidx < mesh.
l_numelem; eidx++)
304 T start = mesh.
dsp[eidx], stop = mesh.
dsp[eidx+1];
305 for(T i = start; i<stop; i++) {
306 if(nodal2restr.count(mesh.
con[i])) {
308 restr_mesh.con.push_back(nodal2restr[mesh.
con[i]]);
323 for(
size_t i=0; i<alg_nod.size(); i++)
324 petsc_idx[alg_nod[i]] = old2new[i] + alg_start;
330 _pl.
reduce(petsc_idx,
"max");
332 bool print_indexing =
false;
334 for(
int pid = 0; pid < size; pid++) {
336 for(
size_t eidx = 0; eidx < 100; eidx++) {
337 printf(
"rank %d elem %d: ", rank,
int(eidx));
338 for(T j = mesh.
dsp[eidx]; j < mesh.
dsp[eidx+1] - 1; j++) {
339 const T c = mesh.
con[j];
340 printf(
"%d ", petsc_idx[c]);
342 printf(
"%d\n", petsc_idx[mesh.
con[mesh.
dsp[eidx+1] - 1]]);
345 MPI_Barrier(PETSC_COMM_WORLD);
const vector< T > & algebraic_layout() const
Getter function for the global algebraic node layout.
void reindex_cuthill_mckee(const vector< T > &n2n_cnt, const vector< T > &n2n_dsp, const vector< T > &n2n_con, const bool reverse, hashmap::unordered_map< T, T > &old2new)
The mesh storage class. It contains both element and vertex data.
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
The vector class and related algorithms.
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
virtual void operator()(meshdata< T, S > &mesh)=0
Add a numbering to a mesh.
void renumber_sorted_ascending(meshdata< T, S > &submesh, SF_nbr in_nbr, SF_nbr out_nbr)
Renumber the global indexing of in_nbr globally ascending into the out_nbr.
size_t num_local_idx() const
Retrieve the local number of indices.
MPI_Comm comm
the parallel mesh is defined on a MPI world
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
PETSc numbering of nodes.
vector< T > dsp
connectivity starting index of each element
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
The nodal numbering of the reference mesh (the one stored on HD).
vector< T > rdsp
Displacements w.r.t. rcnt.
T * data()
Pointer to the vector's start.
SF_nbr
Enumeration encoding the different supported numberings.
const T * end() const
Pointer to the vector's end.
The element numbering of the reference mesh (the one stored on HD).
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
The abstract numbering class.
Submesh element numbering: The globally ascending sorted reference indices are reindexed.
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
Functor class generating a numbering optimized for PETSc.
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
Index mapping class. This is a bijective mapping.
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
size_t size() const
The current size of the vector.
void nodal_connectivity_graph(const meshdata< T, S > &mesh, vector< T > &n2n_cnt, vector< T > &n2n_con)
Compute the node-to-node connectivity.
void reduce(vector< V > &ndat, const char *op) const
Compute a reduction on overlapping data.
void unique_accumulate(vector< T > &_P, vector< S > &_A)
size_t l_numpts
local number of points
A vector storing arbitrary data.
size_t l_numelem
local number of elements
const T * begin() const
Pointer to the vector's start.
petsc_numbering(const overlapping_layout< T > &pl)
void assign(InputIterator s, InputIterator e)
Assign a memory range.
void source_ranks(vector< V > &source)
For every received data element, get the rank indices it was receive from.
T forward_map(T idx) const
Map one index from a to b.
const vector< T > & algebraic_nodes() const
Getter function for the local indices forming the local algebraic node set.
Functor class applying a submesh renumbering.
vector< T > rcnt
Number of elements received from each rank.
void binary_sort(vector< T > &_V)
vector< T > & register_numbering(SF_nbr nbr_type)
Register a new numbering to the mesh and return the associated index vector.
void resize(size_t n)
Resize a vector.
void append(InputIterator s, InputIterator e)
Append data to the current data chunk.
Classes similar to unordered_set and unordered_map, but with better performance.
The overlapping_layout class contains the algorithms related to managing overlapping parallel index s...
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.