openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
emi.cc
Go to the documentation of this file.
1 //
2 // Copyright (C) 2020 openCARP project
3 //
4 // This program is licensed under the openCARP Academic Public License (APL)
5 // v1.0: You can use and redistribute it and/or modify it in non-commercial
6 // academic environments under the terms of APL as published by the openCARP
7 // project v1.0, or (at your option) any later version. Commercial use requires
8 // a commercial license (info@opencarp.org).
9 //
10 // This program is distributed without any warranty; see the openCARP APL for
11 // more details.
12 //
13 // You should have received a copy of the openCARP APL along with this program
14 // and can find it online: http://www.opencarp.org/license
15 // ----------------------------------------------------------------------------
16 
24 #if WITH_EMI_MODEL
25 
26 #include "petsc_utils.h"
27 #include "physics_types.h"
28 #include "timers.h"
29 #include "stimulate.h"
30 #include "electric_integrators.h"
31 #include "SF_init.h" // for SF::init_xxx()
32 #include "emi.h"
33 #include <array>
34 
35 #ifdef WITH_CALIPER
36 #include "caliper/cali.h"
37 #else
38 #include "caliper_hooks.h"
39 #endif
40 
41 namespace opencarp {
42 
43 void set_elec_tissue_properties_emi_volume(MaterialType* mtype, hashmap::unordered_set<int> &extra_tags, hashmap::unordered_set<int> &intra_tags, FILE_SPEC logger)
44 {
46  MaterialType *m = mtype;
47 
48  // initialize random conductivity fluctuation structure with PrM values
49  m->regions.resize(param_globals::num_gregions);
50 
51  const char* grid_name = "emi_grid_domain";
52  log_msg(logger, 0, 0, "Setting up %s tissue poperties for %d regions ..", grid_name,
53  param_globals::num_gregions);
54 
55  char buf[64];
56  RegionSpecs* reg = m->regions.data();
57 
58  // default tags for extra and intra domains
59  hashmap::unordered_set<int> extra_tags_default = extra_tags;
60  hashmap::unordered_set<int> intra_tags_default = intra_tags;
61 
62  for (size_t i=0; i<m->regions.size(); i++)
63  {
64  for (int j=0;j<param_globals::gregion[i].num_IDs;j++)
65  {
66  int tag = param_globals::gregion[i].ID[j];
67 
68  // removed tags explicitly defined by user in param_globals::gregion[i>1]
69  if(extra_tags_default.find(tag) != extra_tags_default.end())
70  extra_tags_default.erase(tag);
71 
72  if(intra_tags_default.find(tag) != intra_tags_default.end())
73  intra_tags_default.erase(tag);
74  }
75  }
76 
77  for (size_t i=0; i<m->regions.size(); i++, reg++)
78  {
79  if(!strcmp(param_globals::gregion[i].name, "")) {
80  snprintf(buf, sizeof buf, ", gregion_%d", int(i));
81  param_globals::gregion[i].name = dupstr(buf);
82  }
83 
84  // copy metadata into RegionSpecs
85  reg->regname = strdup(param_globals::gregion[i].name);
86  reg->regID = i;
87 
88  if(i==0) // default Extracellular region
89  reg->nsubregs = extra_tags_default.size();
90  if(i==1) // default intracellular region
91  reg->nsubregs = intra_tags_default.size();
92  if(i>1) // optional: rest of other param_globals::gregion[i>1] defined by user
93  reg->nsubregs = param_globals::gregion[i].num_IDs;
94 
95  if(!reg->nsubregs)
96  reg->subregtags = NULL;
97  else
98  {
99  reg->subregtags = new int[reg->nsubregs];
100 
101  if(i==0){
102  int j = 0;
103  for (int tag : extra_tags_default) {
104  reg->subregtags[j] = tag;
105  j++;
106  }
107  }
108  else if(i==1){
109  int j = 0;
110  for (int tag : intra_tags_default) {
111  reg->subregtags[j] = tag;
112  j++;
113  }
114  }
115  else{
116  for (int j=0;j<reg->nsubregs;j++)
117  reg->subregtags[j] = param_globals::gregion[i].ID[j]; // explicit tags defined by user
118  }
119  }
120 
121  // describe material in given region
122  elecMaterial *emat = new elecMaterial();
123  emat->material_type = ElecMat;
124 
125  // Isotropic conductivity is considered in EMI model.
126  emat->InVal[0] = param_globals::gregion[i].g_bath;
127  emat->InVal[1] = param_globals::gregion[i].g_bath;
128  emat->InVal[2] = param_globals::gregion[i].g_bath;
129 
130  emat->ExVal[0] = param_globals::gregion[i].g_bath;
131  emat->ExVal[1] = param_globals::gregion[i].g_bath;
132  emat->ExVal[2] = param_globals::gregion[i].g_bath;
133 
134  emat->BathVal[0] = param_globals::gregion[i].g_bath;
135  emat->BathVal[1] = param_globals::gregion[i].g_bath;
136  emat->BathVal[2] = param_globals::gregion[i].g_bath;
137 
138  // convert units from S/m -> mS/um
139  for (int j=0; j<3; j++) {
140  emat->InVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
141  emat->ExVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
142  emat->BathVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
143  }
144  reg->material = emat;
145  }
146 
147  if(strlen(param_globals::gi_scale_vec))
148  {
149  mesh_t mt = emi_msh;
150  sf_mesh & mesh = get_mesh(mt);
151  const char* file = param_globals::gi_scale_vec;
152 
153  size_t num_file_entries = SF::root_count_ascii_lines(file, mesh.comm);
154 
155  if(num_file_entries != mesh.g_numelem)
156  log_msg(0,4,0, "%s warning: number of %s conductivity scaling entries does not match number of elements!",
157  __func__, get_mesh_type_name(mt));
158 
159  // set up parallel element vector and read data
160  sf_vec *escale;
161  SF::init_vector(&escale, get_mesh(mt), 1, sf_vec::elemwise);
162  escale->read_ascii(param_globals::gi_scale_vec);
163 
164  if(get_size() > 1) {
165  // set up element vector permutation and permute
166  if(get_permutation(mt, ELEM_PETSC_TO_CANONICAL, 1) == NULL) {
168  }
170  sc(*escale, false);
171  }
172 
173  // copy data into SF::vector
174  SF_real* p = escale->ptr();
175  m->el_scale.assign(p, p + escale->lsize());
176  escale->release_ptr(p);
177  }
178 }
179 
180 void parabolic_solver_emi::init()
181 {
183  double t0, t1, dur;
184  get_time(t0);
185  stats.init_logger("par_stats.dat");
186 
187  // Create/initialise linear solver object: PETSc gets constructed with default settings
188  SF::init_solver(&lin_solver);
189 
190  // EMI mesh: After decoupling the interfaces, we obtain a volumetric mesh with new dofs for all vertices.
191  sf_mesh & emi_mesh = get_mesh(emi_msh);
192  // Surface mesh called emi_surface_counter_msh:
193  // This mesh contains only the interface faces, where the indices correspond
194  // to the vertices of the original input mesh.
195  // Each face has a corresponding counter-face, and they are identified by different tag numbers in each rank.
196  sf_mesh & emi_surfmesh_w_counter_face = get_mesh(emi_surface_counter_msh);
197  sf_mesh & emi_surfmesh_one_side = get_mesh(emi_surface_msh);
198  sf_mesh & emi_surfmesh_unique_face = get_mesh(emi_surface_unique_face_msh);
199 
200  int max_row_entries_emi = max_nodal_edgecount(emi_mesh);
201  int max_row_entries_emi_surfmesh = max_nodal_edgecount(emi_surfmesh_w_counter_face);
202 
203  int rank;;
204  MPI_Comm_rank(emi_surfmesh_w_counter_face.comm, &rank);
205 
206  sf_vec::ltype alg_type = sf_vec::algebraic;
207  sf_vec::ltype alg_surface_type = sf_vec::elemwise;
208 
209  int dpn = 1;
210 
211  //-----------------------------------------------------------------
212  // setup vectors
213  //-----------------------------------------------------------------
214  SF::init_vector(&ui, emi_mesh, dpn, alg_type);
215  SF::init_vector(&dui, emi_mesh, dpn, alg_type);
216  SF::init_vector(&ui_pre, emi_mesh, dpn, alg_type);
217  SF::init_vector(&Irhs, emi_mesh, dpn, alg_type);
218  SF::init_vector(&Iij_stim, emi_mesh, dpn, alg_type);
219  SF::init_vector(&Iij_temp, emi_mesh, dpn, alg_type);
220  SF::init_vector(&vb_both_face, emi_surfmesh_w_counter_face, dpn, alg_surface_type);
221  SF::init_vector(&vb_unique_face, emi_surfmesh_unique_face, dpn, alg_surface_type);
222  SF::init_vector(&Ib_both_face, emi_surfmesh_w_counter_face, dpn, alg_surface_type);
223  SF::init_vector(&Ib_unique_face, emi_surfmesh_unique_face, dpn, alg_surface_type);
224 
225  //-----------------------------------------------------------------
226  // initialize matrices (LHS, K, M_{emi mesh}, M_{surface mesh})
227  //-----------------------------------------------------------------
228  SF::init_matrix(&lhs_emi);
229  SF::init_matrix(&stiffness_emi);
230  SF::init_matrix(&mass_emi);
231  SF::init_matrix(&mass_surf_emi);
232 
233  lhs_emi->init(emi_mesh, dpn, dpn, max_row_entries_emi*3);
234  stiffness_emi->init(emi_mesh, dpn, dpn, max_row_entries_emi*2);
235  mass_emi->init(emi_mesh, dpn, dpn, max_row_entries_emi*3);
236  mass_surf_emi->init(emi_mesh, dpn, dpn, max_row_entries_emi*3);
237  //---------------------------------------------------------------------
238  // initialize operator matrices B, Bs, Vm_myocyte_emi
239  //---------------------------------------------------------------------
240  mesh_int_t M = emi_surfmesh_w_counter_face.g_numelem;
241  mesh_int_t N = emi_mesh.g_numpts;
242  mesh_int_t m = emi_surfmesh_w_counter_face.l_numelem;
243  mesh_int_t m_one_side = emi_surfmesh_one_side.l_numelem;
244  mesh_int_t M_one_side = emi_surfmesh_one_side.g_numelem;
245  mesh_int_t m_unique_face = emi_surfmesh_unique_face.l_numelem;
246  mesh_int_t M_unique_face = emi_surfmesh_unique_face.g_numelem;
247  mesh_int_t n = ui->lsize();
248 
249  log_msg(NULL, 0, 0, "\n**********************************");
250  log_msg(NULL, 0, 0, "#elements of emi surfmesh unique face: %zu", emi_surfmesh_unique_face.g_numelem);
251  log_msg(NULL, 0, 0, "#elements of emi surfmesh one side: %zu", emi_surfmesh_one_side.g_numelem);
252  log_msg(NULL, 0, 0, "#elements of emi surfmesh: %zu", emi_surfmesh_w_counter_face.g_numelem);
253  log_msg(NULL, 0, 0, "#elements of emi mesh: %zu", emi_mesh.g_numelem);
254  log_msg(NULL, 0, 0, "#dofs for emi_mesh: %zu", emi_mesh.g_numpts);
255  log_msg(NULL, 0, 0, "#max_row_entries_emi: %zu", max_row_entries_emi);
256  log_msg(NULL, 0, 0, "**********************************\n");
257 
258  SF::vector<long int> layout;
259  SF::layout_from_count<long int>(emi_surfmesh_w_counter_face.l_numelem, layout, emi_surfmesh_w_counter_face.comm);
260  mesh_int_t m_l = layout[rank];
261  mesh_int_t n_l = emi_mesh.pl.algebraic_layout()[rank];
262  const SF::vector<mesh_int_t> & alg_nod_surface = emi_surfmesh_w_counter_face.pl.algebraic_nodes();
263 
264  SF::vector<long int> layout_one_side;
265  SF::layout_from_count<long int>(emi_surfmesh_one_side.l_numelem, layout_one_side, emi_surfmesh_one_side.comm);
266  mesh_int_t m_one_side_l = layout_one_side[rank];
267 
268  SF::vector<long int> layout_unique_face;
269  SF::layout_from_count<long int>(emi_surfmesh_unique_face.l_numelem, layout_unique_face, emi_surfmesh_unique_face.comm);
270  mesh_int_t m_unique_face_l = layout_unique_face[rank];
271 
272  // To support a higher number of possible intersections between faces (e.g., multiple contacts on the membrane),
273  // such as when one myocyte is surrounded by more than one extracellular region,
274  // we slightly overallocate the maximum number of row entries (max_row_entries)
275  // for the matrices B, Bi, BsM, and Vm_myocyte_emi to ensure sufficient storage for all coupling cases.
276  SF::init_matrix(&B);
277  B->init(M, N, m, n, m_l, max_row_entries_emi*2);
278  B->zero();
279  SF::init_matrix(&Bi);
280  Bi->init(M, N, m, n, m_l, max_row_entries_emi*2);
281  Bi->zero();
282  SF::init_matrix(&BsM);
283  BsM->init(N, M, n, m, n_l, max_row_entries_emi*2);
284  BsM->zero();
285  SF::init_matrix(&Vm_myocyte_emi);
286  Vm_myocyte_emi->init(N, N, n, n, n_l, max_row_entries_emi*2);
287  Vm_myocyte_emi->zero();
288 
289 
290 
291  // ============================================================================
292  // Direct unique <-> both operators (skip intermediate one-face mesh)
293  // ============================================================================
294  {
295  // Step 1: Create direct mapping unique -> both by converting one-face indices to both-face indices
296  map_elem_uniqueFace_to_elem_bothface.clear();
297 
298  // Build per-rank one->both from the local both->one map, then allgather
299  int comm_size = 0;
300  MPI_Comm_size(emi_surfmesh_w_counter_face.comm, &comm_size);
301 
302  // Gather sizes of vec_both_to_one_face from all ranks
303  std::vector<int> both_counts(comm_size, 0), both_displs(comm_size, 0);
304  int local_both_count = (int)vec_both_to_one_face.size();
305  MPI_Allgather(&local_both_count, 1, MPI_INT, both_counts.data(), 1, MPI_INT,
306  emi_surfmesh_w_counter_face.comm);
307 
308  int total_both_count = 0;
309  for (int i = 0; i < comm_size; i++) {
310  both_displs[i] = total_both_count;
311  total_both_count += both_counts[i];
312  }
313 
314  std::vector<mesh_int_t> all_both_to_one(total_both_count);
315  MPI_Allgatherv(vec_both_to_one_face.data(), local_both_count, MPI_INT,
316  all_both_to_one.data(), both_counts.data(), both_displs.data(),
317  MPI_INT, emi_surfmesh_w_counter_face.comm);
318 
319  // Build per-rank one->both (first/second) from gathered both->one
320  std::vector<std::vector<mesh_int_t>> one_to_both_first(comm_size);
321  std::vector<std::vector<mesh_int_t>> one_to_both_second(comm_size);
322  for (int r = 0; r < comm_size; r++) {
323  // Determine max one-face index on rank r
324  mesh_int_t max_one = -1;
325  for (int i = 0; i < both_counts[r]; i++) {
326  mesh_int_t one_idx = all_both_to_one[both_displs[r] + i];
327  if (one_idx > max_one) max_one = one_idx;
328  }
329  if (max_one < 0) continue;
330  one_to_both_first[r].assign(max_one + 1, -1);
331  one_to_both_second[r].assign(max_one + 1, -1);
332  for (int i = 0; i < both_counts[r]; i++) {
333  mesh_int_t one_idx = all_both_to_one[both_displs[r] + i];
334  if (one_idx < 0) continue;
335  if (one_to_both_first[r][one_idx] < 0) {
336  one_to_both_first[r][one_idx] = i;
337  } else {
338  one_to_both_second[r][one_idx] = i;
339  }
340  }
341  }
342 
343  // Convert unique->one to unique->both using per-rank mappings
344  for (const auto& [unique_idx, one_face_pair] : map_elem_uniqueFace_to_elem_oneface) {
345  const auto& first_oneface = one_face_pair.first;
346  const auto& second_oneface = one_face_pair.second;
347 
348  SF::emi_index_rank<mesh_int_t> first_both, second_both;
349  first_both.index = -1; first_both.rank = -1;
350  second_both.index = -1; second_both.rank = -1;
351 
352  auto map_one = [&](const SF::emi_index_rank<mesh_int_t>& one, bool use_second) {
353  SF::emi_index_rank<mesh_int_t> out;
354  out.index = -1; out.rank = -1;
355  if (one.rank >= 0 && one.rank < comm_size && one.index >= 0) {
356  if (one.index < (int)one_to_both_first[one.rank].size()) {
357  mesh_int_t idx = one_to_both_first[one.rank][one.index];
358  if (use_second && one.index < (int)one_to_both_second[one.rank].size() &&
359  one_to_both_second[one.rank][one.index] >= 0) {
360  idx = one_to_both_second[one.rank][one.index];
361  }
362  if (idx >= 0) {
363  out.index = idx;
364  out.rank = one.rank;
365  }
366  }
367  }
368  return out;
369  };
370 
371  const bool same_oneface =
372  (first_oneface.index >= 0 && second_oneface.index >= 0 &&
373  first_oneface.index == second_oneface.index &&
374  first_oneface.rank == second_oneface.rank);
375 
376  first_both = map_one(first_oneface, false);
377  second_both = map_one(second_oneface, same_oneface);
378 
379  map_elem_uniqueFace_to_elem_bothface[unique_idx] = std::make_pair(first_both, second_both);
380  }
381 
382  // Step 2: Initialize and assemble direct operators
383 
384  // operator_unique_to_both_faces (unique -> both, expansion operator)
385  // NOTE: map_elem_uniqueFace_to_elem_bothface lives only on owner ranks.
386  // We need to propagate both-face indices to all ranks so each rank can
387  // insert its owned rows.
388  SF::init_matrix(&operator_unique_to_both_faces);
389  operator_unique_to_both_faces->init(M, M_unique_face, m, m_unique_face, m_l, max_row_entries_emi*10);
390  operator_unique_to_both_faces->zero();
391 
392  {
393  // Build global arrays for both-face indices per unique face
394  const int M_unique_global = (int)emi_surfmesh_unique_face.g_numelem;
395  std::vector<int> first_rank(M_unique_global, -1), first_idx(M_unique_global, -1);
396  std::vector<int> second_rank(M_unique_global, -1), second_idx(M_unique_global, -1);
397 
398  for (const auto& [local_unique_idx, both_pair] : map_elem_uniqueFace_to_elem_bothface) {
399  int global_unique = (int)(layout_unique_face[rank] + local_unique_idx);
400  if (global_unique < 0 || global_unique >= M_unique_global) continue;
401  first_rank[global_unique] = (int)both_pair.first.rank;
402  first_idx[global_unique] = (int)both_pair.first.index;
403  second_rank[global_unique] = (int)both_pair.second.rank;
404  second_idx[global_unique] = (int)both_pair.second.index;
405  }
406 
407  // Propagate owner-provided entries to all ranks
408  MPI_Allreduce(MPI_IN_PLACE, first_rank.data(), M_unique_global, MPI_INT, MPI_MAX, emi_surfmesh_w_counter_face.comm);
409  MPI_Allreduce(MPI_IN_PLACE, first_idx.data(), M_unique_global, MPI_INT, MPI_MAX, emi_surfmesh_w_counter_face.comm);
410  MPI_Allreduce(MPI_IN_PLACE, second_rank.data(), M_unique_global, MPI_INT, MPI_MAX, emi_surfmesh_w_counter_face.comm);
411  MPI_Allreduce(MPI_IN_PLACE, second_idx.data(), M_unique_global, MPI_INT, MPI_MAX, emi_surfmesh_w_counter_face.comm);
412 
413  // Assemble rows owned by this rank
414  SF::vector<SF_int> row_idx(1), col_idx(1);
415  SF::dmat<SF_real> ebuff(1, 1);
416  ebuff.assign(1, 1, 1.0);
417 
418  for (int gid = 0; gid < M_unique_global; gid++) {
419  col_idx[0] = gid;
420  if (first_rank[gid] == rank && first_idx[gid] >= 0) {
421  row_idx[0] = (SF_int)(layout[rank] + first_idx[gid]);
422  operator_unique_to_both_faces->set_values(row_idx, col_idx, ebuff.data(), false);
423  }
424  if (second_rank[gid] == rank && second_idx[gid] >= 0) {
425  row_idx[0] = (SF_int)(layout[rank] + second_idx[gid]);
426  operator_unique_to_both_faces->set_values(row_idx, col_idx, ebuff.data(), false);
427  }
428  }
429  operator_unique_to_both_faces->finish_assembly();
430  }
431 
432  // operator_both_to_unique_face (both -> unique, restriction/averaging operator)
433  SF::init_matrix(&operator_both_to_unique_face);
434  operator_both_to_unique_face->init(M_unique_face, M, m_unique_face, m, m_unique_face_l, max_row_entries_emi*10);
435  operator_both_to_unique_face->zero();
436  SF::assemble_map_both_to_unique(*operator_both_to_unique_face,
437  map_elem_uniqueFace_to_elem_bothface,
438  emi_surfmesh_unique_face,
439  emi_surfmesh_w_counter_face);
440 
441  // Debug: validate mapping coverage and operator consistency
442  #ifdef EMI_DEBUG_MESH
443  {
444  // Sanity: check mapping coverage and mismatches between one-face and both-face indices
445  mesh_int_t local_valid_first = 0, local_valid_second = 0;
446  mesh_int_t local_same_column = 0;
447  SF::vector<long int> layout_both_dbg;
448  SF::layout_from_count<long int>(emi_surfmesh_w_counter_face.l_numelem, layout_both_dbg, emi_surfmesh_w_counter_face.comm);
449  SF::vector<long int> layout_unique_dbg;
450  SF::layout_from_count<long int>(emi_surfmesh_unique_face.l_numelem, layout_unique_dbg, emi_surfmesh_unique_face.comm);
451 
452  for (const auto& [uidx, both_pair] : map_elem_uniqueFace_to_elem_bothface) {
453  if (both_pair.first.index >= 0) local_valid_first++;
454  if (both_pair.second.index >= 0) local_valid_second++;
455 
456  if (both_pair.first.index >= 0 && both_pair.second.index >= 0 &&
457  both_pair.first.rank >= 0 && both_pair.second.rank >= 0) {
458  mesh_int_t global_first = layout_both_dbg[both_pair.first.rank] + both_pair.first.index;
459  mesh_int_t global_second = layout_both_dbg[both_pair.second.rank] + both_pair.second.index;
460  if (global_first == global_second) local_same_column++;
461  }
462  }
463  mesh_int_t global_valid_first = 0, global_valid_second = 0;
464  mesh_int_t global_same_column = 0;
465  MPI_Allreduce(&local_valid_first, &global_valid_first, 1, MPI_INT, MPI_SUM, emi_surfmesh_w_counter_face.comm);
466  MPI_Allreduce(&local_valid_second, &global_valid_second, 1, MPI_INT, MPI_SUM, emi_surfmesh_w_counter_face.comm);
467  MPI_Allreduce(&local_same_column, &global_same_column, 1, MPI_INT, MPI_SUM, emi_surfmesh_w_counter_face.comm);
468 
469  int dbg_rank = -1;
470  MPI_Comm_rank(emi_surfmesh_w_counter_face.comm, &dbg_rank);
471  if (dbg_rank == 0) {
472  log_msg(NULL, 0, 0, "DEBUG map_unique_to_both: valid_first=%d valid_second=%d (global M=%zu, M_unique=%zu)",
473  (int)global_valid_first, (int)global_valid_second,
474  (size_t)emi_surfmesh_w_counter_face.g_numelem,
475  (size_t)emi_surfmesh_unique_face.g_numelem);
476  log_msg(NULL, 0, 0, "DEBUG map_unique_to_both: same_column=%d", (int)global_same_column);
477  }
478 
479  // Debug: identify missing local unique indices per rank
480  {
481  std::vector<char> present(emi_surfmesh_unique_face.l_numelem, 0);
482  for (const auto& [uidx, both_pair] : map_elem_uniqueFace_to_elem_bothface) {
483  if (uidx >= 0 && uidx < (mesh_int_t)present.size()) present[uidx] = 1;
484  }
485  std::vector<int> missing;
486  for (mesh_int_t i = 0; i < (mesh_int_t)present.size(); i++) {
487  if (!present[i]) missing.push_back((int)i);
488  }
489  int local_missing = (int)missing.size();
490  int comm_size_dbg = 0;
491  MPI_Comm_size(emi_surfmesh_w_counter_face.comm, &comm_size_dbg);
492  std::vector<int> counts(comm_size_dbg, 0), displs(comm_size_dbg, 0);
493  MPI_Gather(&local_missing, 1, MPI_INT,
494  dbg_rank == 0 ? counts.data() : nullptr, 1, MPI_INT,
495  0, emi_surfmesh_w_counter_face.comm);
496  if (dbg_rank == 0) {
497  int total = 0;
498  for (int r = 0; r < comm_size_dbg; r++) {
499  displs[r] = total;
500  total += counts[r];
501  }
502  std::vector<int> all_missing(total, -1);
503  MPI_Gatherv(missing.data(), local_missing, MPI_INT,
504  all_missing.data(), counts.data(), displs.data(), MPI_INT,
505  0, emi_surfmesh_w_counter_face.comm);
506  int offset = 0;
507  bool any = false;
508  for (int r = 0; r < comm_size_dbg; r++) {
509  if (counts[r] == 0) { offset += counts[r]; continue; }
510  any = true;
511  log_msg(NULL, 0, 0, "DEBUG unique missing: rank=%d count=%d", r, counts[r]);
512  int to_print = counts[r] < 10 ? counts[r] : 10;
513  for (int i = 0; i < to_print; i++) {
514  log_msg(NULL, 0, 0, "DEBUG unique missing: rank=%d local_unique=%d", r, all_missing[offset + i]);
515  }
516  offset += counts[r];
517  }
518  if (!any) log_msg(NULL, 0, 0, "DEBUG unique missing: none");
519  } else {
520  MPI_Gatherv(missing.data(), local_missing, MPI_INT,
521  nullptr, nullptr, nullptr, MPI_INT,
522  0, emi_surfmesh_w_counter_face.comm);
523  }
524  }
525 
526  // Operator consistency: unique -> both -> unique on ones vector
527  sf_vec* dbg_unique = nullptr;
528  sf_vec* dbg_both = nullptr;
529  sf_vec* dbg_back = nullptr;
530  SF::init_vector(&dbg_unique, emi_surfmesh_unique_face, dpn, alg_surface_type);
531  SF::init_vector(&dbg_both, emi_surfmesh_w_counter_face, dpn, alg_surface_type);
532  SF::init_vector(&dbg_back, emi_surfmesh_unique_face, dpn, alg_surface_type);
533  dbg_unique->set(1.0);
534  operator_unique_to_both_faces->mult(*dbg_unique, *dbg_both);
535  operator_both_to_unique_face->mult(*dbg_both, *dbg_back);
536 
537  // Compute max abs error from 1.0 and count 0.5 entries
538  SF_real local_max_err = 0.0;
539  mesh_int_t local_half_count = 0;
540  SF_real* p = dbg_back->ptr();
541  for (mesh_int_t i = 0; i < dbg_back->lsize(); i++) {
542  SF_real err = std::abs(p[i] - 1.0);
543  if (err > local_max_err) local_max_err = err;
544  if (std::abs(p[i] - 0.5) < 1e-12) local_half_count++;
545  }
546  dbg_back->release_ptr(p);
547  SF_real global_max_err = 0.0;
548  mesh_int_t global_half_count = 0;
549  MPI_Allreduce(&local_max_err, &global_max_err, 1, MPI_DOUBLE, MPI_MAX, emi_surfmesh_w_counter_face.comm);
550  MPI_Allreduce(&local_half_count, &global_half_count, 1, MPI_INT, MPI_SUM, emi_surfmesh_w_counter_face.comm);
551  if (dbg_rank == 0) {
552  log_msg(NULL, 0, 0, "DEBUG map_unique_to_both consistency: max_err=%.6e half_count=%d",
553  (double)global_max_err, (int)global_half_count);
554  }
555 
556  // Emit a few examples where only one side is present (count==1),
557  // which causes 0.5 after averaging in MPI runs.
558  {
559  const int max_print = 5;
560  const int fields = 9;
561  std::vector<int> local_buf(max_print * fields, -1);
562  int filled = 0;
563 
564  for (const auto& [local_unique_idx, both_pair] : map_elem_uniqueFace_to_elem_bothface) {
565  if (filled >= max_print) break;
566  const auto& first_both = both_pair.first;
567  const auto& second_both = both_pair.second;
568 
569  const bool first_valid = (first_both.index >= 0 && first_both.rank >= 0);
570  const bool second_valid = (second_both.index >= 0 && second_both.rank >= 0);
571  int count = (first_valid ? 1 : 0) + (second_valid ? 1 : 0);
572  if (count != 1) continue;
573 
574  mesh_int_t global_unique = layout_unique_dbg[dbg_rank] + local_unique_idx;
575  mesh_int_t global_first = first_valid ? (layout_both_dbg[first_both.rank] + first_both.index) : -1;
576  mesh_int_t global_second = second_valid ? (layout_both_dbg[second_both.rank] + second_both.index) : -1;
577 
578  int base = filled * fields;
579  local_buf[base + 0] = dbg_rank;
580  local_buf[base + 1] = (int)local_unique_idx;
581  local_buf[base + 2] = (int)global_unique;
582  local_buf[base + 3] = (int)first_both.rank;
583  local_buf[base + 4] = (int)first_both.index;
584  local_buf[base + 5] = (int)global_first;
585  local_buf[base + 6] = (int)second_both.rank;
586  local_buf[base + 7] = (int)second_both.index;
587  local_buf[base + 8] = (int)global_second;
588  filled++;
589  }
590 
591  int comm_size = 0;
592  MPI_Comm_size(emi_surfmesh_w_counter_face.comm, &comm_size);
593  std::vector<int> all_buf;
594  if (dbg_rank == 0) {
595  all_buf.resize(comm_size * max_print * fields, -1);
596  }
597 
598  MPI_Gather(local_buf.data(), max_print * fields, MPI_INT,
599  dbg_rank == 0 ? all_buf.data() : nullptr, max_print * fields, MPI_INT,
600  0, emi_surfmesh_w_counter_face.comm);
601 
602  if (dbg_rank == 0) {
603  for (int r = 0; r < comm_size; r++) {
604  for (int i = 0; i < max_print; i++) {
605  int base = (r * max_print + i) * fields;
606  if (all_buf[base + 1] < 0) continue;
607  log_msg(NULL, 0, 0,
608  "DEBUG map_unique_to_both bad: rank=%d local_unique=%d global_unique=%d "
609  "first(rank=%d idx=%d glob=%d) second(rank=%d idx=%d glob=%d)",
610  all_buf[base + 0], all_buf[base + 1], all_buf[base + 2],
611  all_buf[base + 3], all_buf[base + 4], all_buf[base + 5],
612  all_buf[base + 6], all_buf[base + 7], all_buf[base + 8]);
613  }
614  }
615  }
616  }
617 
618  delete dbg_unique;
619  delete dbg_both;
620  delete dbg_back;
621  }
622  #endif
623  }
624 
625  // initialize Interpolation operator by B and Scatter operator by Bs
626  SF::assemble_restrict_operator(*B, *Bi, *BsM, elemTag_surface_w_counter_mesh, map_vertex_tag_to_dof_petsc, line_face, tri_face, quad_face, emi_surfmesh_w_counter_face, emi_mesh, UM2_to_CM2);
627 
628  // DEBUG: Check mesh sizes and mappings
629  #ifdef EMI_DEBUG_MESH
630  {
631  int local_rank;
632  MPI_Comm_rank(emi_mesh.comm, &local_rank);
633 
634  fprintf(stderr, "RANK %d MESH SIZES: one_side=%zu, counter=%zu, unique=%zu\n",
635  local_rank, emi_surfmesh_one_side.l_numelem,
636  emi_surfmesh_w_counter_face.l_numelem, emi_surfmesh_unique_face.l_numelem);
637 
638  fprintf(stderr, "RANK %d MAP SIZES: uniqueFace_to_oneface=%zu\n",
639  local_rank, map_elem_uniqueFace_to_elem_oneface.size());
640  fflush(stderr);
641  }
642  #endif
643 
644  //-----------------------------------------------------------------
645  // setup the ionic current
646  //-----------------------------------------------------------------
647  // get initial value of the vm and Iion on the face from the ionicOnFace class
648  sf_vec* vb_ptr = get_data(vm_emi_itf);
649  sf_vec* Ib_ptr = get_data(iion_emi_itf);
650 
651  if(!(vb_ptr != NULL && Ib_ptr != NULL)) {
652  log_msg(0,5,0, "%s error: global Vb and Ib vectors not properly set up! Ionics seem invalid! Aborting!",
653  __func__);
654  EXIT(1);
655  }
656 
657  SF::init_vector(&vb, vb_ptr);
658  vb->shallow_copy(*vb_ptr);
659  SF::init_vector(&Ib, Ib_ptr);
660  Ib->shallow_copy(*Ib_ptr);
661 
662  parab_tech = static_cast<parabolic_solver_emi::parabolic_t>(param_globals::parab_solve_emi);
663 
664  dur = timing(t1, t0);
665 }
666 
667 void parabolic_solver_emi::rebuild_matrices(MaterialType* mtype, limpet::MULTI_IF & miif, SF::vector<stimulus> & stimuli, FILE_SPEC logger)
668 {
670  double start, end, period;
671  get_time(start);
672  double t0, t1, dur;
673  mass_integrator mass_integ;
674  mass_integrator mass_integ_emi;
675  int dpn = 1;
676 
677  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
678  MaterialType & mt = mtype[0];
679  const bool have_dbc = have_dbc_stims(stimuli);
680 
681  double Dt = user_globals::tm_manager->time_step;
682 
683  cond_t condType = intra_cond;
684  sf_mesh & mesh = get_mesh(emi_msh);
685  sf_mesh & emi_surfmesh_w_counter_face = get_mesh(emi_surface_counter_msh);
686 
687  // set material and conductivity type
688  set_cond_type(mt, condType);
689 
690  // assemble EMI matrices
691  // fill the EMI system
692  {
693  // emi-> stiffness
694  log_msg(NULL, 0, 0, "assemble stiffness matrix");
695  get_time(t0);
696  elec_stiffness_integrator stfn_integ_emi(mt);
697  stiffness_emi->zero();
698  SF::assemble_matrix(*stiffness_emi, mesh, stfn_integ_emi);
699  dur = timing(t1,t0);
700  log_msg(logger,0,log_flag, "Computed parabolic stiffness matrix in %.5f seconds.", float(dur));
701 
702  log_msg(NULL, 0, 0, "assemble mass matrix on the volumetric mesh");
703  get_time(t0);
704  mass_integrator mass_integ;
705  mass_emi->zero();
706  SF::assemble_matrix(*mass_emi, mesh, mass_integ);
707  dur = timing(t1,t0);
708  log_msg(logger,0,log_flag, "Computed volumetric mass matrix in %.5f seconds.", float(dur));
709 
710  log_msg(NULL, 0, 0, "assemble LHS matrix and mass matrix on the surface mesh");
711  get_time(t0);
712  lhs_emi->zero();
713  // lhs_emi = (CmM/dt + K)
714  SF::assemble_lhs_emi(*lhs_emi, *mass_surf_emi, mesh, emi_surfmesh_w_counter_face, map_vertex_tag_to_dof_petsc, line_face, tri_face, quad_face, stfn_integ_emi, mass_integ_emi, -1., UM2_to_CM2 / Dt);
715  dur = timing(t1,t0);
716  log_msg(logger,0,log_flag, "Computed parabolic mass matrix in %.5f seconds.", float(dur));
717  }
718 
719 
720  bool same_nonzero = false;
721 
722  // set boundary conditions
723  if(have_dbc) {
724  log_msg(logger,0,log_flag, "lhs matrix enforcing Dirichlet boundaries.");
725  get_time(t0);
726 
727  if(dbc == nullptr)
728  dbc = new dbc_manager(*lhs_emi, stimuli);
729  else
730  dbc->recompute_dbcs();
731 
732  dbc->enforce_dbc_lhs();
733 
734  dur = timing(t1,t0);
735  log_msg(logger,0,log_flag, "lhs matrix Dirichlet enforcing done in %.5f seconds.", float(dur));
736  }
737  else {
738  log_msg(logger,0,ECHO, "without enforcing Dirichlet boundaries on the lhs matrix!");
739  // we are dealing with a singular system
740  phie_mat_has_nullspace = true;
741  }
742 
743  set_dir(INPUT);
744  get_time(t0);
745  setup_linear_solver(logger);
746  dur = timing(t1,t0);
747  log_msg(logger,0,log_flag, "Initializing parabolic solver in %.5f seconds.", float(dur));
748  set_dir(OUTPUT);
749 
750  period = timing(end, start);
751 }
752 
753 void parabolic_solver_emi::setup_linear_solver(FILE_SPEC logger)
754 {
755  tol = param_globals::cg_tol_parab;
756  max_it = param_globals::cg_maxit_parab;
757 
758  std::string default_opts;
759  std::string solver_file;
760  solver_file = param_globals::parab_options_file;
761  if (param_globals::flavor == std::string("ginkgo")) {
762  default_opts = std::string(
763  R"(
764 {
765  "type": "solver::Cg",
766  "preconditioner": {
767  "type": "solver::Multigrid",
768  "min_coarse_rows": 8,
769  "max_levels": 16,
770  "default_initial_guess": "zero",
771  "mg_level": [
772  {
773  "type": "multigrid::Pgm",
774  "deterministic": false
775  }
776  ],
777  "coarsest_solver": {
778  "type": "preconditioner::Schwarz",
779  "local_solver": {
780  "type": "preconditioner::Jacobi"
781  }
782  },
783  "criteria": [
784  {
785  "type": "Iteration",
786  "max_iters": 1
787  }
788  ]
789  },
790  "criteria": [
791  {
792  "type": "Iteration",
793  "max_iters": 100
794  },
795  {
796  "type": "ResidualNorm",
797  "reduction_factor": 1e-4
798  }
799  ]
800 }
801  )");
802  } else if (param_globals::flavor == std::string("petsc")) {
803  default_opts = std::string("-ksp_type cg -pc_type gamg -options_left");
804  }
805  lin_solver->setup_solver(*lhs_emi, tol, max_it * 100, param_globals::cg_norm_parab,
806  "parabolic PDE", phie_mat_has_nullspace, logger, solver_file.c_str(),
807  default_opts.c_str());
808 }
809 
810 void parabolic_solver_emi::solve()
811 {
812  switch (parab_tech) {
813  case SEMI_IMPLICIT: solve_semiImplicit(); break;
814  }
815 }
816 
817 void parabolic_solver_emi::solve_semiImplicit()
818 {
819  double t0,t1;
820  get_time(t0);
821 
822  if(dbc != nullptr){
823  CALI_MARK_BEGIN("apply_dbc_rhs");
824  dbc->enforce_dbc_rhs(*ui);
825  CALI_MARK_END("apply_dbc_rhs");
826  }
827 
828  *ui_pre = *ui;
829 
830  // -K*u (K is assembled as -K)
831  CALI_MARK_BEGIN("stiff_mat");
832  stiffness_emi->mult(*ui, *Iij_temp);
833  // rhs = Iij + K*u
834  *Irhs -= *Iij_temp;
835  CALI_MARK_END("stiff_mat");
836 
837  // add volumetric stimulus currents (I_ex, I_in)
838  // rhs = Iij + K*u + M*Iij_stim
839  if (Iij_stim->mag() > 0.0) {
840  CALI_MARK_BEGIN("stim_application");
841  mass_emi->mult(*Iij_stim, *Iij_temp);
842  *Irhs -= *Iij_temp;
843  CALI_MARK_BEGIN("stim_application");
844  }
845 
846  // rhs = -(Iij + K*u + M*Iij_stim)
847  CALI_MARK_BEGIN("rhs_update");
848  (*Irhs) *= (-1.0);
849  CALI_MARK_END("rhs_update");
850 
851  // compute step
852  CALI_MARK_BEGIN("linear_solve");
853  (*lin_solver)(*dui, *Irhs);
854  CALI_MARK_END("linear_solve");
855 
856  // logfile for solver
857  if(lin_solver->reason < 0) {
858  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
859  petsc_get_converged_reason_str(lin_solver->reason));
860  EXIT(1);
861  }
862 
863  // update solution:: ui_pre = ui_pre + dui
864  CALI_MARK_BEGIN("sol_update");
865  ui_pre->add_scaled(*dui, 1.0);
866  *ui *=0;
867  ui->add_scaled(*ui_pre, 1.0);
868  CALI_MARK_END("sol_update");
869 
870  // We need to enforce DBCs again after the solution vector was updated.
871  // Otherwise, matrix/solver tolerances allow small nonzero values in dui to accumulate in ui.
872  if(dbc != nullptr){
873  CALI_MARK_BEGIN("apply_dbc_rhs");
874  dbc->enforce_dbc_rhs(*ui);
875  CALI_MARK_END("apply_dbc_rhs");
876  }
877 
878  // treat solver statistics
879  stats.slvtime += timing(t1, t0);
880  stats.update_iter(lin_solver->niter);
881 }
882 
884  std::pair<SF::emi_face<mesh_int_t,SF::tuple<mesh_int_t>>,
885  SF::emi_face<mesh_int_t,SF::tuple<mesh_int_t>>>> & line_face,
887  std::pair<SF::emi_face<mesh_int_t,SF::triple<mesh_int_t>>,
888  SF::emi_face<mesh_int_t,SF::triple<mesh_int_t>>>> & tri_face,
890  std::pair<SF::emi_face<mesh_int_t,SF::quadruple<mesh_int_t>>,
891  SF::emi_face<mesh_int_t,SF::quadruple<mesh_int_t>>>> & quad_face,
892  hashmap::unordered_map<std::pair<mesh_int_t,mesh_int_t>, mesh_int_t> & map_vertex_tag_to_dof,
893  hashmap::unordered_map<std::pair<mesh_int_t,mesh_int_t>, std::pair<mesh_int_t,mesh_int_t>> & map_vertex_tag_to_dof_petsc,
894  std::vector<std::string> & tags_data)
895 {
896  sf_mesh & emi_surfmesh_w_counter_face = get_mesh(emi_surface_counter_msh);
897 
898  const SF::vector<mesh_int_t> & tags = emi_surfmesh_w_counter_face.tag;
899 
900  const SF::vector<mesh_int_t> & rnod = emi_surfmesh_w_counter_face.get_numbering(SF::NBR_REF);
902 
903  for(size_t i=0; i<rnod.size(); i++){
904  l2g[i] = rnod[i];
905  }
906 
907  for(size_t eidx=0; eidx<emi_surfmesh_w_counter_face.l_numelem; eidx++)
908  {
909  std::vector<int> elem_nodes;
910  mesh_int_t tag = emi_surfmesh_w_counter_face.tag[eidx];
911  for (int n = emi_surfmesh_w_counter_face.dsp[eidx]; n < emi_surfmesh_w_counter_face.dsp[eidx+1];n++)
912  {
913  mesh_int_t l_idx = emi_surfmesh_w_counter_face.con[n];
914 
915  std::pair <mesh_int_t,mesh_int_t> Index_tag_old;
916  Index_tag_old = std::make_pair(l2g[l_idx],tags[eidx]);
917  mesh_int_t dof = map_vertex_tag_to_dof[Index_tag_old];
918  elem_nodes.push_back(dof);
919  }
920 
921  mesh_int_t tag_first = 0;
922  mesh_int_t tag_second = 0;
923  std::string result_first;
924  std::string result_second;
925  std::sort(elem_nodes.begin(),elem_nodes.end()); // make the node tuple order-independent
926 
927  // Extract all surface face pairs separating regions with different material or boundary tags (ionicFaces)
928  if(elem_nodes.size()==2){
930 
931  key.v1 = elem_nodes[0];
932  key.v2 = elem_nodes[1];
933  std::pair<SF::emi_face<mesh_int_t,SF::tuple<mesh_int_t>>,
934  SF::emi_face<mesh_int_t,SF::tuple<mesh_int_t>>> value = line_face[key];
935 
936  tag_first = value.first.tag;
937  tag_second = value.second.tag;
938  result_first = std::to_string(tag_first) + ":" + std::to_string(tag_second);
939  result_second = std::to_string(tag_second) + ":" + std::to_string(tag_first);
940  }
941  else if(elem_nodes.size()==3){
943  key.v1 = elem_nodes[0];
944  key.v2 = elem_nodes[1];
945  key.v3 = elem_nodes[2];
946  std::pair<SF::emi_face<mesh_int_t,SF::triple<mesh_int_t>>,
947  SF::emi_face<mesh_int_t,SF::triple<mesh_int_t>>> value = tri_face[key];
948 
949  tag_first = value.first.tag;
950  tag_second = value.second.tag;
951  result_first = std::to_string(tag_first) + ":" + std::to_string(tag_second);
952  result_second = std::to_string(tag_second) + ":" + std::to_string(tag_first);
953  }
954  else if(elem_nodes.size()==4){
956  key.v1 = elem_nodes[0];
957  key.v2 = elem_nodes[1];
958  key.v3 = elem_nodes[2];
959  key.v4 = elem_nodes[3];
960  std::pair<SF::emi_face<mesh_int_t,SF::quadruple<mesh_int_t>>,
961  SF::emi_face<mesh_int_t,SF::quadruple<mesh_int_t>>> value = quad_face[key];
962 
963  tag_first = value.first.tag;
964  tag_second = value.second.tag;
965  result_first = std::to_string(tag_first) + ":" + std::to_string(tag_second);
966  result_second = std::to_string(tag_second) + ":" + std::to_string(tag_first);
967  }
968  // check if current element tag has a custom region ID
969  tags_data.push_back(result_first);
970  tags_data.push_back(result_second);
971  }
972 }
973 
974 void EMI::initialize()
975 {
976  double t1, t2;
977  get_time(t1);
978 
979  set_dir(OUTPUT);
980 
981  // open logger
982  logger = f_open("emi.log", param_globals::experiment != 4 ? "w" : "r");
983  const int verb = param_globals::output_level;
984  // Mesh processing step: convert the input mesh into
985  // - an EMI mesh (with discontinuities at gap junctions and membranes)
986  // - and generate the corresponding surface mesh
987  CALI_MARK_BEGIN("mesh_setup");
988  setup_EMI_mesh();
989 
990  // setup mappings between extra and intra grids, algebraic and nodal,
991  // and between PETSc and canonical orderings
992  setup_mappings();
993 
994  // the ionicOnFace physics is currently triggered from inside the emi to have tighter
995  // control over it
996  ion.logger = logger;
997 
998  ion.set_surface_mesh_data(parab_solver.line_face,
999  parab_solver.tri_face,
1000  parab_solver.quad_face,
1001  parab_solver.map_vertex_tag_to_dof);
1002 
1003  // builds per-face adjacency tags
1004  std::vector<std::string> tags_data;
1005  tags_onFace(parab_solver.line_face,
1006  parab_solver.tri_face,
1007  parab_solver.quad_face,
1008  parab_solver.map_vertex_tag_to_dof,
1009  parab_solver.map_vertex_tag_to_dof_petsc,
1010  tags_data);
1011  CALI_MARK_END("mesh_setup");
1012 
1013  ion.set_tags_onFace(tags_data);
1014 
1015  ion.set_default_interface_pairs(parab_solver.membraneFace_default, parab_solver.gapjunctionFace_default, parab_solver.map_elem_uniqueFace_to_tags);
1016  ion.initialize();
1017 
1018  // set up tissue properties on the extra and intracellular domains
1019  set_elec_tissue_properties_emi_volume(mtype_vol, parab_solver.extra_tags, parab_solver.intra_tags, logger);
1020  region_mask(emi_msh, mtype_vol[0].regions, mtype_vol[0].regionIDs, true, "gregion_vol");
1021 
1022  // add electrics timer for time stepping, add to time stepper tool (TS)
1023  double global_time = user_globals::tm_manager->time;
1024  timer_idx = user_globals::tm_manager->add_eq_timer(global_time, param_globals::tend, 0,
1025  param_globals::dt, 0, "elec::ref_dt", "TS");
1026 
1027  // EMI stimuli setup
1028  CALI_MARK_BEGIN("stimulus_setup");
1029  param_globals::operator_splitting = 0; // as long as we don't use operator splitting, we default to 0 so we can keep the stimuli scaling mint
1030  setup_stimuli();
1031  CALI_MARK_END("stimulus_setup");
1032 
1033  // set up the linear equation systems. this needs to happen after the stimuli have been
1034  // set up, since we need boundary condition info
1035  CALI_MARK_BEGIN("solver_setup");
1036  setup_solvers();
1037  CALI_MARK_END("solver_setup");
1038 
1039  // balance electrodes, we may need the extracellular mass matrix
1040  balance_electrodes();
1041  // total current scaling
1042  scale_total_stimulus_current(stimuli, *parab_solver.mass_emi, *parab_solver.mass_surf_emi, logger);
1043 
1044  sf_mesh & emi_mesh = get_mesh(emi_msh);
1045  sf_mesh & emi_surfmesh_w_counter_face = get_mesh(emi_surface_counter_msh);
1046 
1047  // - Extract the resting potential from v_b, which is defined only on the membrane faces.
1048  // (we do not care about gap junction in this case)
1049  // - Project this resting potential from the surface mesh onto the EMI mesh.
1050  // - assign the corresponding resting potential on the memebrane to all DOFs
1051  // myocyte that is associated with those membrane faces.
1052  // (dofs on the interfaces of myocytes and all inner dofs located inside of myocytes).
1053  // - any DOF with tag number belongs to the extra_tags set, is assigned to zero.
1054  // - We provide a mapping between the vertices of the surface mesh(vertices of the original input mesh)
1055  // and those of the EMI mesh (with updated indices).
1056  // Based on the face type (line_face, tri_face, or quad_face) and the tag numbers that
1057  // identify paired faces (face and counter-face),
1058  // we can determine whether a given face belongs to a membrane or a gap-junction region.
1059  // - Direct unique -> both mapping
1060  parab_solver.operator_unique_to_both_faces->mult(*parab_solver.vb, *parab_solver.vb_both_face);
1061  SF::assign_resting_potential_from_ionic_models_on_myocyte(*parab_solver.Vm_myocyte_emi,
1062  parab_solver.vb_both_face,
1063  parab_solver.elemTag_emi_mesh,
1064  parab_solver.map_vertex_tag_to_dof_petsc,
1065  parab_solver.line_face, parab_solver.tri_face, parab_solver.quad_face,
1066  emi_surfmesh_w_counter_face, emi_mesh);
1067 
1068  parab_solver.Vm_myocyte_emi->get_diagonal(*parab_solver.ui); // initialize all myocytes to the resting potential based on ionic models
1069  *parab_solver.vb_unique_face = *parab_solver.vb;
1070 
1071  CALI_MARK_BEGIN("output_setup");
1072  // prepare the electrics output. we skip it if we do post-processing
1073  if(param_globals::experiment != EXP_POSTPROCESS)
1074  setup_output();
1075  CALI_MARK_END("output_setup");
1076 
1077  this->initialize_time += timing(t2, t1);
1078 }
1079 
1080 void EMI::setup_mappings()
1081 {
1082  bool emi_exits = mesh_is_registered(emi_msh);
1083  assert(emi_exits);
1084  const int dpn = 1;
1085 
1086  if(get_scattering(emi_msh, ALG_TO_NODAL, dpn) == NULL)
1087  {
1088  log_msg(logger, 0, 0, "%s: Setting up intracellular algebraic-to-nodal scattering.", __func__);
1090  }
1091  if(get_permutation(emi_msh, PETSC_TO_CANONICAL, dpn) == NULL)
1092  {
1093  log_msg(logger, 0, 0, "%s: Setting up intracellular PETSc to canonical permutation.", __func__);
1095  }
1096 }
1097 
1098 void EMI::compute_step()
1099 {
1100  double t1, t2;
1101  get_time(t1);
1102 
1103  // activation checking
1104  const double time = user_globals::tm_manager->time,
1105  time_step = user_globals::tm_manager->time_step;
1106 
1107  const int verb = param_globals::output_level;
1108  // We are treating stimuli by type:
1109  // - Potential stimuli (Phi_ex, GND_ex, Phi_ex_ol) are managed by a dbc_manager and are applied
1110  // to the left and right hand side of the equation.
1111  // - Current stimuli (I_ex, I_in) are applied to the right hand side,
1112  // while (I_tm) is applied to the vector Ib directly.
1113  CALI_MARK_BEGIN("apply_dbc_lhs");
1114  apply_dbc_stimulus();
1115  CALI_MARK_END("apply_dbc_lhs");
1116 
1117  // compute ionics update
1118  CALI_MARK_BEGIN("ion_compute");
1119  ion.compute_step();
1120  CALI_MARK_END("ion_compute");
1121 
1122  CALI_MARK_BEGIN("apply_stim");
1123  apply_current_stimulus();
1124  CALI_MARK_END("apply_stim");
1125 
1126  // convert Ib -> Irhs
1127  parab_solver.operator_unique_to_both_faces->mult(*parab_solver.Ib, *parab_solver.Ib_both_face);
1128  parab_solver.BsM->mult(*parab_solver.Ib_both_face, *parab_solver.Irhs);
1129 
1130  // solver parabolic system
1131  CALI_MARK_BEGIN("parab_solve");
1132  parab_solver.solve();
1133  {
1134  // v_b = B_i * u
1135  parab_solver.B->mult(*parab_solver.ui, *parab_solver.vb_both_face);
1136  // Direct both -> unique mapping
1137  parab_solver.operator_both_to_unique_face->mult(*parab_solver.vb_both_face, *parab_solver.vb_unique_face);
1138  *parab_solver.vb = *parab_solver.vb_unique_face;
1139  }
1140  CALI_MARK_END("parab_solve");
1141 
1142  if(user_globals::tm_manager->trigger(iotm_console)) {
1143  // output lin solver stats
1144  parab_solver.stats.log_stats(user_globals::tm_manager->time, false);
1145  }
1146  this->compute_time += timing(t2, t1);
1147 
1148  // since the traces have their own timing, we check for trace dumps in the compute step loop
1149  if(user_globals::tm_manager->trigger(iotm_trace))
1151 }
1152 
1153 void EMI::output_step()
1154 {
1155  double t1, t2;
1156  get_time(t1);
1157 
1158  output_manager.write_data();
1159 
1160  double curtime = timing(t2, t1);
1161  this->output_time += curtime;
1162 
1163  IO_stats.calls++;
1164  IO_stats.tot_time += curtime;
1165 
1167  IO_stats.log_stats(user_globals::tm_manager->time, false);
1168 }
1169 
1173 void EMI::destroy()
1174 {
1176  // close logger
1177  f_close(logger);
1178 
1179  // close output files
1180  output_manager.close_files_and_cleanup();
1181 
1182  // destroy ionics
1183  ion.destroy();
1184 }
1185 
1186 void EMI::setup_stimuli()
1187 {
1188  // initialize basic stim info data (used units, supported types, etc)
1189  init_stim_info();
1190 
1191  stimuli.resize(param_globals::num_stim);
1192  for (int i = 0; i < param_globals::num_stim; i++) {
1193  // construct new stimulus
1194  stimulus & s = stimuli[i];
1195 
1197  s.translate(i);
1198 
1199  // we associate to the EMI mesh. this is needed for the stim_phys and stim_electrode setups.
1200  s.associated_intra_mesh = emi_msh, s.associated_extra_mesh = emi_msh;
1201 
1202  s.setup(i);
1203 
1204  if (s.phys.type == Illum) {
1205  log_msg(0, MAX_LOG_LEVEL, ECHO, "Stimulus of type Illum (=6) is not implemented in EMI. Abort.");
1206  EXIT(EXIT_FAILURE);
1207  }
1208 
1209  // Depending on the stimulus type, we make sure to only stimulate the correct regions of the mesh:
1210  // Extracellular stimuli restrict to all DOFs of the extracellular region (including the extracellular side of the membrane).
1211  // Equivalent for intracellular stimuli.
1212  // Stimuli that act directly on the membrane restrict to DOFs with ptsData > 0, i.e. membrane, gap junctions, and complex junctions.
1213  if (is_extra(s.phys.type)) {
1214  SF::vector<mesh_int_t> extra_vertices;
1215  const sf_mesh& mesh = get_mesh(s.associated_extra_mesh);
1216 
1217  // Gather vertices from emi_msh using extra_tags
1218  indices_from_region_tags(extra_vertices, mesh, parab_solver.extra_tags);
1219 
1220  // Restrict electrode vertices to extra region
1221  restrict_to_set(s.electrode.vertices, extra_vertices);
1222  } else if (s.phys.type == I_tm) {
1223  const sf_mesh& mesh = get_mesh(emi_msh);
1224  SF::restrict_to_membrane(s.electrode.vertices, dof2ptsData, mesh);
1225  } else {
1226  SF::vector<mesh_int_t> intra_vertices;
1227  const sf_mesh& mesh = get_mesh(s.associated_intra_mesh);
1228 
1229  // Gather vertices from emi_msh using intra_tags
1230  indices_from_region_tags(intra_vertices, mesh, parab_solver.intra_tags);
1231 
1232  // Restrict electrode vertices to intra region
1233  restrict_to_set(s.electrode.vertices, intra_vertices);
1234  }
1235 
1236  if (s.electrode.dump_vtx) {
1237  set_dir(OUTPUT);
1238  s.dump_vtx_file(i);
1239  }
1240 
1241  if(param_globals::stim[i].pulse.dumpTrace && get_rank() == 0) {
1242  set_dir(OUTPUT);
1243  s.pulse.wave.write_trace(s.name+".trc");
1244  }
1245 
1246  }
1247 }
1248 
1249 void EMI::apply_dbc_stimulus()
1250 {
1251  parabolic_solver_emi& ps = parab_solver;
1252 
1253  // we check if the DBC layout changed, if so we recompute the matrix and the dbc_manager
1254  bool dbcs_have_updated = ps.dbc != nullptr && ps.dbc->dbc_update();
1256 
1257  if (dbcs_have_updated && time_not_final) {
1258  parab_solver.rebuild_matrices(mtype_vol, *ion.miif, stimuli, logger);
1259  }
1260 }
1261 
1262 void EMI::apply_current_stimulus()
1263 {
1264  parabolic_solver_emi& ps = parab_solver;
1265  ps.Iij_stim->set(0.0);
1266 
1267  // iterate over stimuli
1268  for(stimulus & s : stimuli) {
1269  if(s.is_active()) {
1270  switch (s.phys.type) {
1271  case I_tm: {
1272  apply_stim_to_vector(s, *ps.Iij_temp, true);
1273  ps.Bi->mult(*ps.Iij_temp, *ps.Ib_both_face);
1274  ps.operator_both_to_unique_face->mult(*ps.Ib_both_face, *ps.Ib_unique_face);
1275  ps.Ib->add_scaled(*ps.Ib_unique_face, -0.5); // scale by 0.5 to revert 2x scaling in Bi*Iij
1276  } break;
1277 
1278  case I_ex:
1279  case I_in: {
1280  apply_stim_to_vector(s, *ps.Iij_stim, true);
1281  } break;
1282 
1283  default: break;
1284  }
1285  }
1286  }
1287 }
1288 
1289 void EMI::balance_electrodes()
1290 {
1291  for (int i = 0; i < param_globals::num_stim; i++) {
1292  if (param_globals::stim[i].crct.balance != -1) {
1293  int from = param_globals::stim[i].crct.balance;
1294  int to = i;
1295 
1296  log_msg(NULL, 0, 0, "Balancing stimulus %d with %d %s-wise.", from, to,
1297  is_current(stimuli[from].phys.type) ? "current" : "voltage");
1298 
1299  stimulus& s_from = stimuli[from];
1300  stimulus& s_to = stimuli[to];
1301 
1302  s_to.pulse = s_from.pulse;
1303  s_to.ptcl = s_from.ptcl;
1304  s_to.phys = s_from.phys;
1305  s_to.pulse.strength *= -1.0;
1306 
1307  if (s_from.phys.type == I_ex || s_from.phys.type == I_in) {
1308  // if from is total current, skip volume based adjustment of strength
1309  // otherwise, calling constant_total_stimulus_current() will undo the balanced scaling of to.pulse.strength
1310  // constant_total_stimulus_current() will do the scaling based on the volume
1311  if (!s_from.phys.total_current) {
1312  sf_mat& mass = *parab_solver.mass_emi;
1313  SF_real vol0 = get_volume_from_nodes(mass, s_from.electrode.vertices);
1314  SF_real vol1 = get_volume_from_nodes(mass, s_to.electrode.vertices);
1315 
1316  s_to.pulse.strength *= fabs(vol0 / vol1);
1317  }
1318  }
1319  }
1320  }
1321 }
1322 
1323 void EMI::scale_total_stimulus_current(SF::vector<stimulus>& stimuli,
1324  sf_mat& mass_vol,
1325  sf_mat& mass_surf,
1326  FILE_SPEC logger)
1327 {
1328  for (stimulus & s : stimuli){
1329  if(is_current(s.phys.type) && s.phys.total_current){
1330  switch (s.phys.type) {
1331  case I_in:
1332  case I_ex: {
1333  // compute affected volume in um^3
1334  SF_real vol = get_volume_from_nodes(mass_vol, s.electrode.vertices);
1335  // s->strength holds the total current in uA, compute current density in uA/cm^3
1336  // Theoretically, we don't need to scale the volume to cm^3 here since we later
1337  // multiply with the mass matrix and we get um^3 * uA/um^3 = uA.
1338  // However, for I_ex/I_in there is an additional um^3 to cm^3 scaling in phys.scale,
1339  // since I_ex/I_in is expected to be in uA/cm^3. Therefore, we need to compensate for that to arrive at uA later.
1340  assert(vol > 0);
1341  float scale = 1.e12 / vol;
1342 
1343  s.pulse.strength *= scale;
1344 
1345  log_msg(logger, 0, ECHO,
1346  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^3\n",
1347  s.name.c_str(), s.idx, s.pulse.strength);
1348  } break;
1349 
1350  case I_tm: {
1351  // In the EMI model, I_tm only affects the membrane. Therefore, we compute
1352  // the affected membrane surface in um^2 using the membrane mass matrix.
1353  // The electrode vertices are resticted to the membrane during setup, hence this function returns a surface already.
1354  SF_real surf = get_volume_from_nodes(mass_surf, s.electrode.vertices);
1355 
1356  // convert to cm^2
1357  assert(surf > 0);
1358  surf /= 1.e8;
1359 
1360  // scale surface density now to result in correct total current
1361  s.pulse.strength /= surf;
1362  log_msg(logger, 0, ECHO,
1363  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^2\n",
1364  s.name.c_str(), s.idx, s.pulse.strength);
1365  } break;
1366 
1367  default: break;
1368  }
1369  }
1370  }
1371 }
1372 
1373 // Assign a deterministic global element numbering for a surface mesh based on
1374 // element type, tag, and sorted node ids. This only affects output ordering.
1375 static void assign_deterministic_elem_numbering(sf_mesh & mesh)
1376 {
1377  const int KEY_SIZE = 6; // type, tag, n1, n2, n3, n4
1378  int rank = 0, size = 0;
1379  MPI_Comm_rank(mesh.comm, &rank);
1380  MPI_Comm_size(mesh.comm, &size);
1381 
1382  auto make_key = [&](size_t i) {
1383  std::array<long long, KEY_SIZE> k;
1384  k.fill(-1);
1385  k[0] = (long long)mesh.type[i];
1386  k[1] = (long long)mesh.tag[i];
1387 
1388  int nn = 0;
1389  if (mesh.type[i] == SF::Line) nn = 2;
1390  else if (mesh.type[i] == SF::Tri) nn = 3;
1391  else if (mesh.type[i] == SF::Quad) nn = 4;
1392  else nn = 0;
1393 
1394  std::vector<long long> nodes;
1395  nodes.reserve(nn);
1396  size_t off = mesh.dsp[i];
1397  for (int j = 0; j < nn; j++) {
1398  nodes.push_back((long long)mesh.con[off + j]);
1399  }
1400  std::sort(nodes.begin(), nodes.end());
1401  for (int j = 0; j < (int)nodes.size(); j++) {
1402  k[2 + j] = nodes[j];
1403  }
1404  return k;
1405  };
1406 
1407  // Pack local keys
1408  std::vector<long long> local_keys(mesh.l_numelem * KEY_SIZE, -1);
1409  for (size_t i = 0; i < mesh.l_numelem; i++) {
1410  auto k = make_key(i);
1411  for (int j = 0; j < KEY_SIZE; j++) local_keys[i * KEY_SIZE + j] = k[j];
1412  }
1413 
1414  // Gather sizes
1415  std::vector<int> counts(size, 0), displs(size, 0);
1416  int local_count = (int)local_keys.size();
1417  MPI_Allgather(&local_count, 1, MPI_INT, counts.data(), 1, MPI_INT, mesh.comm);
1418  int total = 0;
1419  for (int r = 0; r < size; r++) {
1420  displs[r] = total;
1421  total += counts[r];
1422  }
1423 
1424  std::vector<long long> all_keys;
1425  if (rank == 0) all_keys.resize(total, -1);
1426  MPI_Gatherv(local_keys.data(), local_count, MPI_LONG_LONG,
1427  rank == 0 ? all_keys.data() : nullptr, counts.data(), displs.data(), MPI_LONG_LONG,
1428  0, mesh.comm);
1429 
1430  // Build sorted global keys on rank 0
1431  std::vector<std::array<long long, KEY_SIZE>> sorted_keys;
1432  if (rank == 0) {
1433  const int nkeys = total / KEY_SIZE;
1434  sorted_keys.resize(nkeys);
1435  for (int i = 0; i < nkeys; i++) {
1436  std::array<long long, KEY_SIZE> k;
1437  for (int j = 0; j < KEY_SIZE; j++) k[j] = all_keys[i * KEY_SIZE + j];
1438  sorted_keys[i] = k;
1439  }
1440  std::sort(sorted_keys.begin(), sorted_keys.end());
1441  }
1442 
1443  // Broadcast sorted keys
1444  int nkeys = 0;
1445  if (rank == 0) nkeys = (int)sorted_keys.size();
1446  MPI_Bcast(&nkeys, 1, MPI_INT, 0, mesh.comm);
1447  std::vector<long long> flat_sorted(nkeys * KEY_SIZE, -1);
1448  if (rank == 0) {
1449  for (int i = 0; i < nkeys; i++) {
1450  for (int j = 0; j < KEY_SIZE; j++) flat_sorted[i * KEY_SIZE + j] = sorted_keys[i][j];
1451  }
1452  }
1453  MPI_Bcast(flat_sorted.data(), (int)flat_sorted.size(), MPI_LONG_LONG, 0, mesh.comm);
1454 
1455  // Reconstruct sorted_keys on all ranks
1456  if (rank != 0) {
1457  sorted_keys.resize(nkeys);
1458  for (int i = 0; i < nkeys; i++) {
1459  std::array<long long, KEY_SIZE> k;
1460  for (int j = 0; j < KEY_SIZE; j++) k[j] = flat_sorted[i * KEY_SIZE + j];
1461  sorted_keys[i] = k;
1462  }
1463  }
1464 
1465  // Assign deterministic element numbering (both REF and SUBMESH)
1466  SF::vector<mesh_int_t> & nbr_ref = mesh.register_numbering(SF::NBR_ELEM_REF);
1467  SF::vector<mesh_int_t> & nbr_sub = mesh.register_numbering(SF::NBR_ELEM_SUBMESH);
1468  nbr_ref.resize(mesh.l_numelem);
1469  nbr_sub.resize(mesh.l_numelem);
1470  for (size_t i = 0; i < mesh.l_numelem; i++) {
1471  auto k = make_key(i);
1472  auto it = std::lower_bound(sorted_keys.begin(), sorted_keys.end(), k);
1473  if (it == sorted_keys.end() || *it != k) {
1474  log_msg(0, 5, 0, "deterministic numbering failed to find key (rank %d, elem %zu)", rank, i);
1475  EXIT(1);
1476  }
1477  mesh_int_t gid = (mesh_int_t)(it - sorted_keys.begin());
1478  nbr_ref[i] = gid;
1479  nbr_sub[i] = gid;
1480  }
1481 }
1482 
1483 void EMI::setup_output()
1484 {
1485  bool write_binary = false;
1486  std::string output_base = get_basename(param_globals::meshname);
1487  set_dir(OUTPUT);
1488 
1489  // write entire mesh
1490  sf_mesh & mesh = get_mesh(emi_msh);
1491  std::string output_file = output_base + "_e";
1492  log_msg(0, 0, 0, "Writing \"%s\" mesh: %s", mesh.name.c_str(), output_file.c_str());
1493  write_mesh_parallel(mesh, write_binary, output_file.c_str());
1494  // register output for overall phi on the entire mesh
1495  output_manager.register_output(parab_solver.ui, emi_msh, 1, param_globals::phiefile, "mV", NULL);
1496 
1497  // write membrane mesh
1499  mesh_m.name = "Membrane";
1500  output_file = output_base + "_m";
1501  log_msg(0, 0, 0, "Writing \"%s\" mesh: %s", mesh_m.name.c_str(), output_file.c_str());
1502  write_mesh_parallel(mesh_m, write_binary, output_file.c_str());
1503  // register output for Vm on membrane interface
1504  // ensure deterministic element ordering in output
1507  }
1508  output_manager.register_output(parab_solver.vb_unique_face, emi_surface_unique_face_msh, 1, param_globals::vofile, "mV", NULL, true);
1509 
1510  if(param_globals::num_trace) {
1511  sf_mesh & imesh = get_mesh(emi_msh);
1512  open_trace(ion.miif, param_globals::num_trace, param_globals::trace_node, NULL, &imesh);
1513  }
1514 
1515  // initialize generic logger for IO timings per time_dt
1516  IO_stats.init_logger("IO_stats.dat");
1517 }
1518 
1519 void EMI::dump_matrices()
1520 {
1521  std::string bsname = param_globals::dump_basename;
1522  std::string fn;
1523 
1524  set_dir(OUTPUT);
1525 
1526  fn = bsname + "_lhs.bin";
1527  parab_solver.lhs_emi->write(fn.c_str());
1528 
1529  fn = bsname + "_K.bin";
1530  parab_solver.stiffness_emi->write(fn.c_str());
1531 
1532  fn = bsname + "_B.bin";
1533  parab_solver.B->write(fn.c_str());
1534 
1535  fn = bsname + "_Bi.bin";
1536  parab_solver.Bi->write(fn.c_str());
1537 
1538  fn = bsname + "_BsM.bin";
1539  parab_solver.BsM->write(fn.c_str());
1540 
1541  fn = bsname + "_M.bin";
1542  parab_solver.mass_emi->write(fn.c_str());
1543 
1544  fn = bsname + "_Ms.bin";
1545  parab_solver.mass_surf_emi->write(fn.c_str());
1546 
1547 }
1548 
1551 double EMI::timer_val(const int timer_id)
1552 {
1553  // determine
1554  int sidx = stimidx_from_timeridx(stimuli, timer_id);
1555  double val = 0.0;
1556  if(sidx != -1) {
1557  stimuli[sidx].value(val);
1558  }
1559  else
1560  val = std::nan("NaN");
1561 
1562  return val;
1563 }
1564 
1567 std::string EMI::timer_unit(const int timer_id)
1568 {
1569  int sidx = stimidx_from_timeridx(stimuli, timer_id);
1570  std::string s_unit;
1571 
1572  if(sidx != -1)
1573  // found a timer-linked stimulus
1574  s_unit = stimuli[sidx].pulse.wave.f_unit;
1575 
1576  return s_unit;
1577 }
1578 
1579 void EMI::setup_solvers()
1580 {
1581  set_dir(OUTPUT);
1582  parab_solver.init();
1583  parab_solver.rebuild_matrices(mtype_vol, *ion.miif, stimuli, logger);
1584 
1585  if(param_globals::dump2MatLab)
1586  dump_matrices();
1587 }
1588 
1589 void extract_unique_tag(SF::vector<mesh_int_t>& unique_tags)
1590 {
1591  MPI_Comm comm = SF_COMM;
1592  int size, rank;
1593  MPI_Comm_size(comm, &size);
1594  MPI_Comm_rank(comm, &rank);
1595 
1596  binary_sort(unique_tags);
1597  unique_resize(unique_tags); // unique_resize is done locally(for each rank)
1598  make_global(unique_tags, comm);
1599  binary_sort(unique_tags);
1600  unique_resize(unique_tags);
1601 }
1602 
1603 void make_global_pairs(std::set<std::pair<mesh_int_t,mesh_int_t>>&set_pair, int size)
1604 {
1605  std::vector<std::pair<mesh_int_t,mesh_int_t>> vector_pair(set_pair.begin(), set_pair.end());
1606 
1607  // Flatten the vector of pairs into a single array of ints
1608  std::vector<int> local_flat_data;
1609  for (const auto& p : vector_pair) {
1610  local_flat_data.push_back(p.first);
1611  local_flat_data.push_back(p.second);
1612  }
1613 
1614  // Determine the size of each processor's local data
1615  int local_size = local_flat_data.size();
1616  std::vector<int> all_sizes(size);
1617 
1618  // Gather sizes of local data from all processors
1619  MPI_Allgather(&local_size, 1, MPI_INT, all_sizes.data(), 1, MPI_INT, MPI_COMM_WORLD);
1620 
1621  // Compute displacement for each processor in the global array
1622  std::vector<int> displacements(size, 0);
1623  int total_size = 0;
1624  for (int i = 0; i < size; ++i) {
1625  displacements[i] = total_size;
1626  total_size += all_sizes[i];
1627  }
1628 
1629  // Global array to gather all data
1630  std::vector<int> global_flat_data(total_size);
1631 
1632  // Use MPI_Allgatherv to gather all data
1633  MPI_Allgatherv(
1634  local_flat_data.data(), local_size, MPI_INT,
1635  global_flat_data.data(), all_sizes.data(), displacements.data(), MPI_INT,
1636  MPI_COMM_WORLD
1637  );
1638 
1639  // Convert the global_flat_data back into a vector of pairs
1640  std::vector<std::pair<int, int>> global_data;
1641  for (size_t i = 0; i < global_flat_data.size(); i += 2) {
1642  global_data.emplace_back(global_flat_data[i], global_flat_data[i + 1]);
1643  }
1644 
1645  std::set<std::pair<mesh_int_t,mesh_int_t>> membraneFace_default_set(global_data.begin(), global_data.end());
1646 
1647  set_pair = membraneFace_default_set;
1648 }
1649 
1650 void compute_tags_per_rank(int num_tags, SF::vector<mesh_int_t>& num_tags_per_rank)
1651 {
1652  MPI_Comm comm = SF_COMM;
1653  int size, rank;
1654  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1655  divide(num_tags, size, num_tags_per_rank);
1656 }
1657 
1658 void EMI::setup_EMI_mesh()
1659 {
1660  log_msg(0,0,0, "\n *** Processing EMI mesh ***\n");
1661 
1662  const std::string basename = param_globals::meshname;
1663  const int verb = param_globals::output_level;
1664  std::map<mesh_t, sf_mesh> & mesh_registry = user_globals::mesh_reg;
1665  assert(mesh_registry.count(emi_msh) == 1);
1666 
1667  set_dir(INPUT);
1668 
1669  sf_mesh & emi_mesh = mesh_registry[emi_msh];
1670  sf_mesh & emi_surfmesh_one_side = mesh_registry[emi_surface_msh];
1671  sf_mesh & emi_surfmesh_w_counter_face = mesh_registry[emi_surface_counter_msh];
1672  sf_mesh & emi_surfmesh_unique_face = mesh_registry[emi_surface_unique_face_msh];
1673 
1674  MPI_Comm comm = emi_mesh.comm;
1675 
1676  int size, rank;
1677  double t1, t2, s1, s2;
1678  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1679 
1680  //-----------------------------------------------------------------
1681  // Validate mesh dimension: EMI only supports 3D volumetric meshes
1682  //-----------------------------------------------------------------
1683  if (emi_mesh.l_numelem > 0) {
1684  // Check first local element type (all elements should be same type in CARP)
1685  SF::elem_t first_elem = emi_mesh.type[0];
1686 
1687  if (first_elem == SF::Line || first_elem == SF::Tri || first_elem == SF::Quad) {
1688  const char* type_name = (first_elem == SF::Line) ? "1D (Line)" :
1689  (first_elem == SF::Tri) ? "2D (Tri)" :
1690  "2D (Quad)";
1691  if (rank == 0) {
1692  log_msg(0, 5, 0, "\n*** ERROR: EMI model requires a 3D volumetric mesh!");
1693  log_msg(0, 5, 0, "*** Current mesh element type: %s", type_name);
1694  log_msg(0, 5, 0, "*** EMI only supports 3D element types: Tetra, Pyramid, Prism, Hexa");
1695  log_msg(0, 5, 0, "*** Please provide a 3D mesh with volume elements.\n");
1696  }
1697  EXIT(EXIT_FAILURE);
1698  }
1699  }
1700 
1701  //-----------------------------------------------------------------
1702  // Step 1: READ *.intra and *.extra
1703  //-----------------------------------------------------------------
1704  if(verb) log_msg(NULL, 0, 0,"\nReading tags for extra and intra regions from input files");
1705  t1 = MPI_Wtime();
1706  {
1707  SF::vector<mesh_int_t> unique_extra_tags;
1708  SF::vector<mesh_int_t> unique_intra_tags;
1709 
1710  if(verb) log_msg(NULL, 0, 0,"Read extracellular tags");
1711  read_indices_global(unique_extra_tags,basename+".extra", comm);
1712  for(mesh_int_t tag:unique_extra_tags){
1713  parab_solver.extra_tags.insert(tag);
1714  }
1715 
1716  if(verb) log_msg(NULL, 0, 0,"Read intracellular tags");
1717  read_indices_global(unique_intra_tags,basename+".intra", comm);
1718  for(mesh_int_t tag:unique_intra_tags){
1719  parab_solver.intra_tags.insert(tag);
1720  }
1721 
1722  int total_num_tags = parab_solver.extra_tags.size() + parab_solver.intra_tags.size();
1723  if(total_num_tags < size){
1724  log_msg(0,5,0, "\nThe number of unique tags on EMI mesh is smaller than number of processors!");
1725  EXIT(EXIT_FAILURE);
1726  }
1727  if(verb) log_msg(NULL, 0, 0,"\nextra_tags=%lu, intra_tags=%lu", parab_solver.extra_tags.size(), parab_solver.intra_tags.size());
1728  }
1729  t2 = MPI_Wtime();
1730  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1731 
1732  //-----------------------------------------------------------------
1733  // Step 2: READ points from *.pts
1734  //-----------------------------------------------------------------
1736  SF::vector<mesh_int_t> ptsidx;
1737  SF::vector<mesh_int_t> ptsData;
1738  if(verb) log_msg(NULL, 0, 0,"\nReading points with data on each vertex");
1739  t1 = MPI_Wtime();
1740  SF::read_points(basename, comm, pts, ptsidx);
1741  ptsData.resize(ptsidx.size());
1742  assert(ptsidx.size()==ptsData.size());
1743  t2 = MPI_Wtime();
1744  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1745 
1746  std::list< sf_mesh* > meshlist;
1747  meshlist.push_back(&emi_mesh);
1748 
1749  //-----------------------------------------------------------------
1750  // Step 3: Distrubute mesh based on tag or *.part
1751  //-----------------------------------------------------------------
1752  if(verb) log_msg(NULL, 0, 0,"\nDistribute mesh based on tags");
1753  // should be replaced by scotch/pt-scotch for efficiency
1754  SF::vector<mesh_int_t> unique_tags;
1755  t1 = MPI_Wtime();
1756  distribute_elements_based_tags(emi_mesh, unique_tags);
1757  t2 = MPI_Wtime();
1758  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1759 
1760  //-----------------------------------------------------------------
1761  // Step 4: insert coordinates to the mesh
1762  //-----------------------------------------------------------------
1763  // insert points
1764  if(verb) log_msg(NULL, 0, 0, "\nInserting points");
1765  t1 = MPI_Wtime();
1766  SF::insert_points(pts, ptsidx, meshlist);
1767  t2 = MPI_Wtime();
1768  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1769 
1770  //-----------------------------------------------------------------
1771  // Step 5: extract ptsdata based on the location of each vertex
1772  //-----------------------------------------------------------------
1773  if(verb) log_msg(NULL, 0, 0, "\nCompute location of all dofs on emi mesh(inner dofs, memebarne, gap junction) saved into ptsData from original mesh");
1774  t1 = MPI_Wtime();
1775  compute_ptsdata_from_original_mesh( emi_mesh,
1776  SF::NBR_REF,
1777  vertex2ptsdata,
1778  parab_solver.extra_tags,
1779  parab_solver.intra_tags);
1780  t2 = MPI_Wtime();
1781  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1782 
1783  //-----------------------------------------------------------------
1784  // Step 6: extract faces and counterfaces on the interfaces (mem/gap)
1785  // and mark the unique faces on surface mesh
1786  // generate required mapping between surface mesh and unique faces
1787  //-----------------------------------------------------------------
1788  t1 = MPI_Wtime();
1789  if(verb) log_msg(NULL, 0, 0, "\nExtract EMI surface mesh");
1790  hashmap::unordered_map<mesh_int_t, SF::emi_index_rank<mesh_int_t>> unused_map_elem_oneface_to_elem_uniqueFace;
1791  extract_face_based_tags(emi_mesh, SF::NBR_REF, vertex2ptsdata,
1792  parab_solver.line_face,
1793  parab_solver.tri_face,
1794  parab_solver.quad_face,
1795  parab_solver.extra_tags,
1796  parab_solver.intra_tags,
1797  parab_solver.membraneFace_default,
1798  parab_solver.gapjunctionFace_default,
1799  emi_surfmesh_one_side, emi_surfmesh_w_counter_face, emi_surfmesh_unique_face,
1800  parab_solver.map_elem_uniqueFace_to_elem_oneface,
1801  unused_map_elem_oneface_to_elem_uniqueFace);
1802  meshlist.push_back(&emi_surfmesh_one_side);
1803  t2 = MPI_Wtime();
1804  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1805  //-----------------------------------------------------------------
1806  // Step 7: set of pairs(t1:t2) for membrane and gap-juntion faces
1807  //-----------------------------------------------------------------
1808  if(verb) log_msg(NULL, 0, 0, "\nset membraneFace_default:");
1809  make_global_pairs(parab_solver.membraneFace_default, size);
1810  if(verb) log_msg(NULL, 0, 0, "\nset gapjunctionFace_default:");
1811  make_global_pairs(parab_solver.gapjunctionFace_default, size);
1812 
1813  #ifdef EMI_DEBUG_MESH
1814  {
1815  for (const auto& p : parab_solver.membraneFace_default) {
1816  log_msg(NULL, 0, 0, "[%d:%d]", p.first, p.second);
1817  }
1818  }
1819 
1820  if(verb) log_msg(NULL, 0, 0, "\nset gapjunctionFace_default:");
1821  make_global_pairs(parab_solver.gapjunctionFace_default, size);
1822  if(verb==10){
1823  for (const auto& p : parab_solver.gapjunctionFace_default) {
1824  log_msg(NULL, 0, 0, "[%d:%d]", p.first, p.second);
1825  }
1826  }
1827  #endif
1828  //-----------------------------------------------------------------
1829  // Step 8: register surface meshes based on the {typeof}_face with counter face
1830  //-----------------------------------------------------------------
1831  compute_surface_mesh_with_counter_face(emi_surfmesh_w_counter_face, SF::NBR_REF,
1832  parab_solver.line_face,
1833  parab_solver.tri_face,
1834  parab_solver.quad_face);
1835 
1836  compute_surface_mesh_with_unique_face(emi_surfmesh_unique_face, SF::NBR_REF,
1837  parab_solver.line_face,
1838  parab_solver.tri_face,
1839  parab_solver.quad_face,
1840  parab_solver.map_elem_uniqueFace_to_tags);
1841 
1842  //-----------------------------------------------------------------
1843  // Step 9: create a map between emi_surfmesh_w_counter_face and emi_surfmesh (both -> one)
1844  //-----------------------------------------------------------------
1845  SF::create_reverse_elem_mapping_between_surface_meshes(parab_solver.line_face,
1846  parab_solver.tri_face,
1847  parab_solver.quad_face,
1848  parab_solver.vec_both_to_one_face,
1849  comm);
1850  t2 = MPI_Wtime();
1851  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1852 
1853  //-----------------------------------------------------------------
1854  // Step 10: global number of interfaces only on emi_surfmesh without counter face
1855  //-----------------------------------------------------------------
1856  if(verb) log_msg(NULL, 0, 0, "\ncompute global number of interface");
1857  size_t global_count_surf = 0;
1858  size_t numelem_surface = emi_surfmesh_one_side.l_numelem;
1859  MPI_Reduce(&numelem_surface, &global_count_surf, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
1860  if(verb && rank==0) fprintf(stdout, "global number of interfaces = %zu\n", global_count_surf);
1861 
1862  //-----------------------------------------------------------------
1863  // Step 11: submesh_numbering on the emi_mesh and generate parallel layout
1864  //-----------------------------------------------------------------
1865  {
1867  sub_numbering(emi_mesh);
1868  emi_mesh.generate_par_layout();
1869  }
1870 
1871  SF::meshdata<mesh_int_t, mesh_real_t> tmesh_backup_old = emi_mesh;
1872 
1873  //-----------------------------------------------------------------
1874  // Step 12: make a map bwteen key(vertex,tag)-> value(dof) after decoupling interfaces defined on emi mesh
1875  //-----------------------------------------------------------------
1876  // During interface decoupling and the introduction of new degrees of freedom (DoFs),
1877  // we assume that the mesh partitioning is performed at least on a per-tag basis.
1878  // This means that all elements sharing the same tag number belong to the same rank.
1879  t1 = MPI_Wtime();
1880  if(verb) log_msg(NULL, 0, 0, "\ndecouple emi interfaces");
1881  if(verb) log_msg(NULL, 0, 0, "\tcompute map oldIdx to dof");
1882  compute_map_vertex_to_dof(emi_mesh, SF::NBR_REF, vertex2ptsdata, parab_solver.extra_tags, parab_solver.map_vertex_tag_to_dof);
1883 
1884  //-----------------------------------------------------------------
1885  // Step 13: complete the map bwteen key(vertex,tag)-> value(dof) for counter faces defined on emi mesh
1886  //-----------------------------------------------------------------
1887  if(verb) log_msg(NULL, 0, 0, "\tcomplete map oldIdx to dof with counter interface");
1888  // add to the map the counter part of the interface
1889  complete_map_vertex_to_dof_with_counter_face(parab_solver.line_face, parab_solver.tri_face, parab_solver.quad_face, parab_solver.map_vertex_tag_to_dof);
1890 
1891  //-----------------------------------------------------------------
1892  // Step 14: update emi mesh with new dofs, so decoupling the interfaces applied on emi mesh
1893  //-----------------------------------------------------------------
1894  if(verb) log_msg(NULL, 0, 0, "\tupdate mesh with dof");
1895  update_emi_mesh_with_dofs(emi_mesh, SF::NBR_REF, parab_solver.map_vertex_tag_to_dof, parab_solver.dof2vertex);
1896  t2 = MPI_Wtime();
1897  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1898 
1899  //-----------------------------------------------------------------
1900  // Step 15: set map_vertex_tag_to_dof_petsc where the key(vertex,tag) and value(dof,petsc)
1901  //-----------------------------------------------------------------
1902  t1 = MPI_Wtime();
1903  if(verb) log_msg(NULL, 0, 0, "\nInitialize petsc =0 for map<oldIdx,tag> -><dof, petsc>");
1904  // Iterate over map to assign the (oldIndx, tag) -> (dof, petsc) atm petsc = 0
1905  for(const auto & key_value : parab_solver.map_vertex_tag_to_dof)
1906  {
1907  mesh_int_t gIndex_old = key_value.first.first;
1908  mesh_int_t tag_old = key_value.first.second;
1909  mesh_int_t dof = key_value.second;
1910 
1911  std::pair <mesh_int_t,mesh_int_t> dof_petsc = std::make_pair(dof,-1); // petsc numbering ...
1912  parab_solver.map_vertex_tag_to_dof_petsc.insert({key_value.first,dof_petsc});
1913  }
1914  t2 = MPI_Wtime();
1915  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1916 
1917  //-----------------------------------------------------------------
1918  // Step 16: insert the coordinates on new dofs to the emi mesh
1919  //-----------------------------------------------------------------
1920  t1 = MPI_Wtime();
1921  if(verb) log_msg(NULL, 0, 0, "Inserting points and ptsData of dofs to emi_mesh");
1922  insert_points_ptsData_to_dof(tmesh_backup_old, emi_mesh, SF::NBR_REF, parab_solver.dof2vertex, vertex2ptsdata, dof2ptsData);
1923  t2 = MPI_Wtime();
1924  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1925 
1926  //-----------------------------------------------------------------
1927  // Step 17: submesh_numbering on the emi_mesh and generate parallel layout after decoupling the interfaces on the emi mesh
1928  //-----------------------------------------------------------------
1929  t1 = MPI_Wtime();
1930  if(verb) log_msg(NULL, 0, 0, "Generating unique PETSc numberings");
1931  {
1933  sub_numbering(emi_mesh);
1934  emi_mesh.generate_par_layout();
1935  }
1936  t2 = MPI_Wtime();
1937  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1938 
1939  //-----------------------------------------------------------------
1940  // Step 18: register petsc_numbering on emi mesh
1941  //-----------------------------------------------------------------
1942  t1 = MPI_Wtime();
1943  if(verb) log_msg(NULL, 0, 0, "Generating unique PETSc numberings");
1944  {
1945  SF::petsc_numbering<mesh_int_t,mesh_real_t> petsc_numbering(emi_mesh.pl);
1946  petsc_numbering(emi_mesh);
1947  }
1948  t2 = MPI_Wtime();
1949  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1950 
1951  //-----------------------------------------------------------------
1952  // Step 19: complete map_vertex_tag_to_dof_petsc where the key(vertex,tag) and value(dof,petsc) for counter faces on emi mesh
1953  //-----------------------------------------------------------------
1954  if(verb) log_msg(NULL, 0, 0, "Updating the map between indices to PETSc numberings");
1955  t1 = MPI_Wtime();
1956  update_map_indices_to_petsc(emi_mesh, SF::NBR_REF, SF::NBR_PETSC, parab_solver.extra_tags, parab_solver.map_vertex_tag_to_dof_petsc, parab_solver.dof2vertex, parab_solver.elemTag_emi_mesh);
1957  t2 = MPI_Wtime();
1958  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1959 
1960  //-----------------------------------------------------------------
1961  // Step 20: update value of map {type_of}_face with new dofs after decoupling
1962  //-----------------------------------------------------------------
1963  t1 = MPI_Wtime();
1964  if(verb) log_msg(NULL, 0, 0, "Updating surface mesh with dof");
1965  update_faces_on_surface_mesh_after_decoupling_with_dofs(emi_surfmesh_one_side, parab_solver.map_vertex_tag_to_dof, parab_solver.line_face, parab_solver.tri_face, parab_solver.quad_face);
1966  t2 = MPI_Wtime();
1967  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1968 
1969  //-----------------------------------------------------------------
1970  // Step 21: register emi_surfmesh for SF::NBR_ELEM_REF, NBR_REF, SF::NBR_SUBMESH
1971  //-----------------------------------------------------------------
1972  t1 = MPI_Wtime();
1973  if(verb) log_msg(NULL, 0, 0, "Layout for element of EMI surfmesh");
1974  SF::vector<mesh_int_t> & emi_surfmesh_elem = emi_surfmesh_one_side.register_numbering(SF::NBR_ELEM_REF);
1975  SF::vector<long int> layout;
1976  SF::layout_from_count<long int>(emi_surfmesh_one_side.l_numelem, layout, emi_surfmesh_one_side.comm);
1977  size_t count = layout[rank+1] - layout[rank];
1978  emi_surfmesh_elem.resize(count);
1979  for (int i = 0; i < count; ++i){
1980  emi_surfmesh_elem[i] = layout[rank]+i;
1981  }
1982 
1983  emi_surfmesh_one_side.localize(SF::NBR_REF);
1984  emi_surfmesh_one_side.register_numbering(SF::NBR_SUBMESH);
1985  t2 = MPI_Wtime();
1986  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
1987 
1988  //-----------------------------------------------------------------
1989  // Step 22: register emi_surfmesh_w_counter_face for SF::NBR_ELEM_REF, NBR_REF, SF::NBR_SUBMESH
1990  //-----------------------------------------------------------------
1991  t1 = MPI_Wtime();
1992  if(verb) log_msg(NULL, 0, 0, "Layout for element of EMI surfmesh w counter face");
1993  SF::vector<mesh_int_t> & emi_surfmesh_counter_elem = emi_surfmesh_w_counter_face.register_numbering(SF::NBR_ELEM_REF);
1994  SF::vector<long int> layout_counter;
1995  SF::layout_from_count<long int>(emi_surfmesh_w_counter_face.l_numelem, layout_counter, emi_surfmesh_w_counter_face.comm);
1996  size_t count_counter = layout_counter[rank+1] - layout_counter[rank];
1997  emi_surfmesh_counter_elem.resize(count_counter);
1998  for (int i = 0; i < count_counter; ++i){
1999  emi_surfmesh_counter_elem[i] = layout_counter[rank]+i;
2000  }
2001  emi_surfmesh_w_counter_face.localize(SF::NBR_REF);
2002  emi_surfmesh_w_counter_face.register_numbering(SF::NBR_SUBMESH);
2003  t2 = MPI_Wtime();
2004  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2005 
2006 
2007  //-----------------------------------------------------------------
2008  // Step 23: register emi_surfmesh_w_counter_face for SF::NBR_ELEM_REF, NBR_REF, SF::NBR_SUBMESH
2009  //-----------------------------------------------------------------
2010  t1 = MPI_Wtime();
2011  if(verb) log_msg(NULL, 0, 0, "Layout for element of EMI surfmesh w counter face");
2012  SF::vector<mesh_int_t> & emi_surfmesh_unique_elem = emi_surfmesh_unique_face.register_numbering(SF::NBR_ELEM_REF);
2013  SF::vector<long int> layout_unique;
2014  SF::layout_from_count<long int>(emi_surfmesh_unique_face.l_numelem, layout_unique, emi_surfmesh_unique_face.comm);
2015  size_t count_unique = layout_unique[rank+1] - layout_unique[rank];
2016  emi_surfmesh_unique_elem.resize(count_unique);
2017  for (int i = 0; i < count_unique; ++i){
2018  emi_surfmesh_unique_elem[i] = layout_unique[rank]+i;
2019  }
2020  emi_surfmesh_unique_face.localize(SF::NBR_REF);
2021  emi_surfmesh_unique_face.register_numbering(SF::NBR_SUBMESH);
2022  t2 = MPI_Wtime();
2023  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2024 
2025  //-----------------------------------------------------------------
2026  // Step 24: insert coorinates to emi_surfmesh_one_side and emi_surfmesh_w_counter_face
2027  //-----------------------------------------------------------------
2028  t1 = MPI_Wtime();
2029  if(verb) log_msg(NULL, 0, 0, "Inserting points to EMI surfmesh");
2030  insert_points_to_surface_mesh(tmesh_backup_old, emi_surfmesh_one_side, SF::NBR_REF, parab_solver.dof2vertex, parab_solver.extra_tags, parab_solver.elemTag_surface_mesh);
2031  insert_points_to_surface_mesh(tmesh_backup_old, emi_surfmesh_w_counter_face, SF::NBR_REF, parab_solver.dof2vertex, parab_solver.extra_tags, parab_solver.elemTag_surface_w_counter_mesh);
2032  insert_points_to_surface_mesh(tmesh_backup_old, emi_surfmesh_unique_face, SF::NBR_REF, parab_solver.dof2vertex);
2033  t2 = MPI_Wtime();
2034  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2035 
2036  //-----------------------------------------------------------------
2037  // Step 25: submesh_numbering & petc numebring for both emi_surfmesh_one_side and emi_surfmesh_w_counter_face
2038  //-----------------------------------------------------------------
2039  t1 = MPI_Wtime();
2040  if(verb) log_msg(NULL, 0, 0, "Generating submesh_numbering and PETSc numberings for surface mesh");
2041  {
2043  sub_numbering(emi_surfmesh_one_side);
2044  emi_surfmesh_one_side.generate_par_layout();
2045 
2046  SF::petsc_numbering<mesh_int_t,mesh_real_t> petsc_numbering(emi_surfmesh_one_side.pl);
2047  petsc_numbering(emi_surfmesh_one_side);
2048  }
2049 
2050  {
2052  sub_numbering(emi_surfmesh_w_counter_face);
2053  emi_surfmesh_w_counter_face.generate_par_layout();
2054 
2055  SF::petsc_numbering<mesh_int_t,mesh_real_t> petsc_numbering(emi_surfmesh_w_counter_face.pl);
2056  petsc_numbering(emi_surfmesh_w_counter_face);
2057  }
2058 
2059  {
2061  sub_numbering(emi_surfmesh_unique_face);
2062  emi_surfmesh_unique_face.generate_par_layout();
2063 
2064  SF::petsc_numbering<mesh_int_t,mesh_real_t> petsc_numbering(emi_surfmesh_unique_face.pl);
2065  petsc_numbering(emi_surfmesh_unique_face);
2066  // Ensure deterministic element ordering across MPI for output
2067  assign_deterministic_elem_numbering(emi_surfmesh_unique_face);
2068  }
2069  t2 = MPI_Wtime();
2070  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2071 
2072  //-----------------------------------------------------------------
2073  // Step 26: assign petsc numbering to map_vertex_tag_to_dof_petsc where the key(vertex,tag) and value(dof,petsc)
2074  //-----------------------------------------------------------------
2075  t1 = MPI_Wtime();
2076  if(verb) log_msg(NULL, 0, 0, "assign PETSc numbering for new faces");
2077  SF::assign_petsc_on_counter_face(parab_solver.map_vertex_tag_to_dof_petsc,comm);
2078  {
2079  hashmap::unordered_map<std::pair<mesh_int_t,mesh_int_t>, std::pair<mesh_int_t,mesh_int_t>>::iterator it;
2080  for (it = parab_solver.map_vertex_tag_to_dof_petsc.begin(); it != parab_solver.map_vertex_tag_to_dof_petsc.end(); it++)
2081  {
2082  std::pair <mesh_int_t,mesh_int_t> Index_tag_old = it->first;
2083  std::pair <mesh_int_t,mesh_int_t> dof_petsc = it->second;
2084  parab_solver.dof2petsc[dof_petsc.first] = dof_petsc.second;
2085  parab_solver.petsc2dof[dof_petsc.second] = dof_petsc.first;
2086  }
2087  }
2088 
2089  // DEBUG: Check for invalid PETSc indices after exchange
2090  #ifdef EMI_DEBUG_MESH
2091  {
2092  int invalid_count = 0;
2093  for (const auto& [key, val] : parab_solver.map_vertex_tag_to_dof_petsc) {
2094  if (val.second < 0) {
2095  invalid_count++;
2096  if (invalid_count <= 3) {
2097  fprintf(stderr, "RANK %d INVALID: vertex=%ld tag=%ld dof=%ld petsc=%ld\n",
2098  rank, (long)key.first, (long)key.second, (long)val.first, (long)val.second);
2099  }
2100  }
2101  }
2102  fprintf(stderr, "RANK %d: After Step 26: %d invalid PETSc indices out of %zu total\n",
2103  rank, invalid_count, parab_solver.map_vertex_tag_to_dof_petsc.size());
2104  fflush(stderr);
2105  }
2106  #endif
2107 
2108  t2 = MPI_Wtime();
2109  if(verb) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2110 
2111  //-----------------------------------------------------------------
2112  // Step 27: update counter faces to the map{type_of}_face
2113  //-----------------------------------------------------------------
2114  added_counter_faces_to_map(parab_solver.line_face, parab_solver.tri_face, parab_solver.quad_face);
2115 
2116  log_msg(0,0,0, "\n *** EMI mesh processing Done ***\n");
2117 }
2118 
2119 void distribute_elements_based_tags(SF::meshdata<mesh_int_t, mesh_real_t>& mesh, SF::vector<mesh_int_t> & unique_tags)
2120 {
2121  MPI_Comm comm = mesh.comm;
2122  int size, rank;
2123  double t1, t2, s1, s2;
2124  MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
2125  const int verb = param_globals::output_level;
2126 
2127  if(verb==10) log_msg(NULL, 0, 0,"\ncompute the number of unique tags");
2128  // compute the number of unique tag
2129  unique_tags = mesh.tag;
2130  t1 = MPI_Wtime();
2131  extract_unique_tag(unique_tags);
2132  t2 = MPI_Wtime();
2133  size_t ntags = unique_tags.size();
2134  if(verb==10) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2135 
2136  if(ntags<size)
2137  {
2138  PetscPrintf(PETSC_COMM_WORLD,"\nThe number of processors should be less than the number of tags, size = %d & ntags = %zu !!!\n", size, ntags);
2139  cleanup_and_exit();
2140  }
2141 
2142  if(verb==10) log_msg(NULL, 0, 0,"\ncompute the number of tags which belongs to one rank");
2143  // compute the number of tags per rank
2144  SF::vector<mesh_int_t> ntags_per_rank;
2145  t1 = MPI_Wtime();
2146  compute_tags_per_rank(ntags, ntags_per_rank);
2147  t2 = MPI_Wtime();
2148  if(verb==10) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2149 
2150  // generate global destination w.r.t. tags
2151  // redistribute elements based on new partition
2152  t1 = MPI_Wtime();
2153  SF::vector<mesh_int_t> part_based_Tags(mesh.l_numelem);
2154  partition_based_tags(ntags, mesh.tag, unique_tags, ntags_per_rank, part_based_Tags);
2155  SF::redistribute_elements(mesh,part_based_Tags);
2156  // permute elements locally first based on the tag then element index
2157  permute_mesh_locally_based_on_tag_elemIdx(mesh);
2158  t2 = MPI_Wtime();
2159  if(verb==10) log_msg(NULL, 0, 0, "Done in %f sec.", float(t2 - t1));
2160 }
2161 
2162 // In the EMI model we cannot have partition boundaries crossing cells.
2163 // Therefore this function assigns a partition to each tag. All elements with this tag will go to the same partition.
2164 void partition_based_tags(int num_tags, SF::vector<mesh_int_t> tag, SF::vector<mesh_int_t> unique_tags, SF::vector<mesh_int_t> num_tags_per_rank, SF::vector<mesh_int_t>& part)
2165 {
2166  const int verb = param_globals::output_level;
2167  MPI_Comm comm = SF_COMM;
2168  int size, rank;
2169  MPI_Comm_size(comm, &size);
2170  MPI_Comm_rank(comm, &rank);
2171 
2172  // we need to have tags_to_rank as a map globally.
2174  // Try to load the mapping from a .part file and fall back on the internal mapping if there is no such file.
2175  // If the mapping is read from file, the num_tags_per_rank argument is not used.
2176  if (load_partitions_from_file(tags_to_rank_map, comm)) {
2177  if (tags_to_rank_map.size() != unique_tags.size()) {
2178  log_msg(0,5,0, "\nerror: the number of tags in the .part file does not match the number of unique tags in the EMI mesh");
2179  EXIT(1);
2180  }
2181  }
2182  else{
2183  map_tags_to_rank(size, unique_tags, num_tags_per_rank, tags_to_rank_map);
2184  }
2185 
2186  for (size_t i = 0; i < part.size(); ++i) {
2187  if(tags_to_rank_map.count(tag[i]))
2188  part[i] = tags_to_rank_map[tag[i]];
2189  }
2190 }
2191 
2192 void map_tags_to_rank(int size, const SF::vector<mesh_int_t> & unique_tags, const SF::vector<mesh_int_t> & num_tags_per_rank, hashmap::unordered_map<mesh_int_t, mesh_int_t> &tags_to_rank_map)
2193 {
2194  // the goal is to map the tag as a key -> rank, should be global one
2195  mesh_int_t t = 0; // tag index
2196  for (size_t r = 0; r < size; ++r) {
2197  for (size_t count = 0; count < num_tags_per_rank[r];) {
2198  mesh_int_t tag = unique_tags[t];
2199 
2200  // // here we consider each tag belongs to only one rank.
2201  tags_to_rank_map.insert({tag, r});
2202  count +=1;
2203  t+=1;
2204  }
2205  }
2206 }
2207 
2208 // This function creates a tag-to-rank mapping based only on the numerical order of the tags.
2209 bool load_partitions_from_file(hashmap::unordered_map<mesh_int_t, mesh_int_t>& tags_to_rank_map, MPI_Comm comm)
2210 {
2211  FILE_SPEC tags_to_part = nullptr;
2212 
2213  SF::vector<int> parts;
2214  const std::string basename = param_globals::meshname;
2215  FILE* fd = fopen((basename + ".part").c_str(), "r");
2216  if (fd != NULL) { // check if the file exists otherwise read_indices_global() crash
2217  read_indices_global(parts, basename + ".part", comm);
2218  fclose(fd);
2219  }
2220 
2221  if (parts.size() == 0) return false; // file does not exist or is empty
2222 
2223  if (parts.size() % 2 != 0) {
2224  log_msg(0,5,0, "\nThe part file should contain 2 lines per tag, one for the tag number and the next for its associated partition number.!");
2225  EXIT(1);
2226  }
2227 
2228  for(int i = 0; i < parts.size(); i+=2) {
2229  tags_to_rank_map.insert({parts[i], parts[i + 1]});
2230  }
2231 
2232  return true;
2233 }
2234 
2235 void permute_mesh_locally_based_on_tag_elemIdx(SF::meshdata<mesh_int_t, mesh_real_t>& mesh)
2236 {
2237  mesh.globalize(SF::NBR_REF);
2240  interval(perm, 0, mesh.tag.size());
2241 
2242  SF::vector<mesh_int_t> tags = tmesh.tag;
2244  binary_sort_sort_copy(tags, elemIdx, perm);
2245  permute_mesh(tmesh, mesh, perm);
2246 
2247  mesh.localize(SF::NBR_REF);
2248 }
2249 
2250 } // namespace opencarp
2251 
2252 #endif
int mesh_int_t
Definition: SF_container.h:46
#define SF_COMM
the default SlimFem MPI communicator
Definition: SF_globals.h:27
double SF_real
Use the general double as real type.
Definition: SF_globals.h:38
std::int32_t SF_int
Use the general std::int32_t as int type.
Definition: SF_globals.h:37
#define MAX_LOG_LEVEL
Definition: basics.h:315
#define ECHO
Definition: basics.h:308
#define CALI_CXX_MARK_FUNCTION
Definition: caliper_hooks.h:6
#define CALI_MARK_BEGIN(_str)
Definition: caliper_hooks.h:4
#define CALI_MARK_END(_str)
Definition: caliper_hooks.h:5
Dense matrix class.
Definition: dense_mat.hpp:43
void globalize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
Definition: SF_container.h:525
size_t l_numelem
local number of elements
Definition: SF_container.h:399
void localize(SF_nbr nbr_type)
Localize the connectivity data w.r.t. a given numbering.
Definition: SF_container.h:496
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:404
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:464
vector< T > tag
element tag
Definition: SF_container.h:417
Functor class generating a numbering optimized for PETSc.
Definition: SF_numbering.h:231
Container for a PETSc VecScatter.
Functor class applying a submesh renumbering.
Definition: SF_numbering.h:68
A vector storing arbitrary data.
Definition: SF_vector.h:43
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
void insert(InputIterator first, InputIterator last)
Insert Iterator range.
Definition: hashmap.hpp:539
size_t size() const
Definition: hashmap.hpp:687
size_t size() const
Definition: hashmap.hpp:1066
iterator find(const K &key)
Definition: hashmap.hpp:1006
hm_int erase(const K &key)
Definition: hashmap.hpp:978
long d_time
current time instance index
Definition: timer_utils.h:77
double time_step
global reference time step
Definition: timer_utils.h:78
int add_eq_timer(double istart, double iend, int ntrig, double iintv, double idur, const char *iname, const char *poolname=nullptr)
Add a equidistant step timer to the array of timers.
Definition: timer_utils.cc:78
long d_end
final index in multiples of dt
Definition: timer_utils.h:82
double time
current time
Definition: timer_utils.h:76
EMI model based on computed current on the faces, main EMI physics class.
void init_solver(SF::abstract_linear_solver< T, S > **sol)
Definition: SF_init.h:220
void read_points(const std::string basename, const MPI_Comm comm, vector< S > &pts, vector< T > &ptsidx)
Read the points and insert them into a list of meshes.
Definition: SF_mesh_io.h:824
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
Definition: SF_vector.h:350
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
void permute_mesh(const meshdata< T, S > &inmesh, meshdata< T, S > &outmesh, const vector< T > &perm)
Permute the element data of a mesh based on a given permutation.
Definition: SF_mesh_utils.h:56
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
void divide(const size_t gsize, const size_t num_parts, vector< T > &loc_sizes)
divide gsize into num_parts local parts with even distribution of the remainder
Definition: SF_vector.h:358
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
Definition: SF_vector.h:332
void insert_points(const vector< S > &pts, const vector< T > &ptsidx, std::list< meshdata< T, S > * > &meshlist)
Insert the points from the read-in buffers into a list of distributed meshes.
Definition: SF_mesh_io.h:894
void assemble_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
Generalized matrix assembly.
int max_nodal_edgecount(const meshdata< T, S > &mesh)
Compute the maximum number of node-to-node edges for a mesh.
Definition: SF_container.h:608
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
void restrict_to_set(vector< T > &v, const hashmap::unordered_set< T > &set)
size_t root_count_ascii_lines(std::string file, MPI_Comm comm)
count the lines in a ascii file
void init_vector(SF::abstract_vector< T, S > **vec)
Definition: SF_init.h:99
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
void init_matrix(SF::abstract_matrix< T, S > **mat)
Definition: SF_init.h:199
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
elem_t
element type enum
Definition: SF_container.h:53
@ Line
Definition: SF_container.h:61
@ Tri
Definition: SF_container.h:60
@ Quad
Definition: SF_container.h:59
void binary_sort_sort_copy(vector< T > &_V, vector< T > &_W, vector< S > &_A)
Definition: SF_sort.h:335
@ NBR_PETSC
PETSc numbering of nodes.
Definition: SF_container.h:203
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:204
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:201
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:202
@ NBR_ELEM_SUBMESH
Submesh element numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:205
void dump_trace(MULTI_IF *MIIF, limpet::Real time)
void open_trace(MULTI_IF *MIIF, int n_traceNodes, int *traceNodes, int *label, opencarp::sf_mesh *imesh)
Set up ionic model traces at some global node numbers.
timer_manager * tm_manager
a manager for the various physics timers
Definition: main.cc:58
bool using_legacy_stimuli
flag storing whether legacy stimuli are used
Definition: main.cc:64
std::map< mesh_t, sf_mesh > mesh_reg
Registry for the different meshes used in a multi-physics simulation.
Definition: main.cc:52
int stimidx_from_timeridx(const SF::vector< stimulus > &stimuli, const int timer_id)
determine link between timer and stimulus
Definition: electrics.cc:800
@ iotm_console
Definition: timer_utils.h:44
@ iotm_trace
Definition: timer_utils.h:44
sf_vec * get_data(datavec_t d)
Retrieve a petsc data vector from the data registry.
Definition: sim_utils.cc:887
SF::scattering * get_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Get a scattering from the global scatter registry.
void set_cond_type(MaterialType &m, cond_t type)
Definition: electrics.cc:852
SF_real get_volume_from_nodes(sf_mat &mass, SF::vector< mesh_int_t > &local_idx)
Definition: fem_utils.cc:217
sf_mesh & get_mesh(const mesh_t gt)
Get a mesh by specifying the gridID.
Definition: sf_interface.cc:33
SF::scattering * register_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Register a scattering between to grids, or between algebraic and nodal representation of data on the ...
Definition: sf_interface.cc:69
cond_t
description of electrical tissue properties
Definition: fem_types.h:42
@ intra_cond
Definition: fem_types.h:43
SF::scattering * get_permutation(const int mesh_id, const int perm_id, const int dpn)
Get the PETSC to canonical permutation scattering for a given mesh and number of dpn.
SF::meshdata< mesh_int_t, mesh_real_t > sf_mesh
Definition: sf_interface.h:48
void apply_stim_to_vector(const stimulus &s, sf_vec &vec, bool add)
Definition: electrics.cc:471
int set_dir(IO_t dest)
Definition: sim_utils.cc:490
void cleanup_and_exit()
Definition: sim_utils.cc:1274
void read_indices_global(SF::vector< T > &idx, const std::string filename, MPI_Comm comm)
Definition: fem_utils.h:53
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:276
void indices_from_region_tags(SF::vector< mesh_int_t > &idx, const sf_mesh &mesh, const hashmap::unordered_set< int > &tags)
Populate vertex data with the vertices of multiple tag regions.
Definition: fem_utils.cc:169
const char * get_mesh_type_name(mesh_t t)
get a char* to the name of a mesh type
Definition: sf_interface.cc:46
void region_mask(mesh_t meshspec, SF::vector< RegionSpecs > &regspec, SF::vector< int > &regionIDs, bool mask_elem, const char *reglist)
classify elements/points as belonging to a region
Definition: ionics.cc:363
void init_stim_info(void)
uses potential for stimulation
Definition: stimulate.cc:49
bool is_extra(stim_t type)
whether stimulus is on extra grid (or on intra)
Definition: stimulate.cc:82
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.
Definition: basics.cc:135
bool have_dbc_stims(const SF::vector< stimulus > &stimuli)
return wheter any stimuli require dirichlet boundary conditions
Definition: electrics.cc:875
SF::scattering * register_permutation(const int mesh_id, const int perm_id, const int dpn)
Register a permutation between two orderings for a mesh.
bool is_current(stim_t type)
uses current as stimulation
Definition: stimulate.cc:72
@ OUTPUT
Definition: sim_utils.h:53
char * dupstr(const char *old_str)
Definition: basics.cc:44
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
Definition: basics.cc:72
mesh_t
The enum identifying the different meshes we might want to load.
Definition: sf_interface.h:59
@ emi_surface_unique_face_msh
Definition: sf_interface.h:68
@ emi_surface_msh
Definition: sf_interface.h:66
@ emi_surface_counter_msh
Definition: sf_interface.h:67
void get_time(double &tm)
Definition: basics.h:436
bool mesh_is_registered(const mesh_t gt)
check wheter a SF mesh is set
Definition: sf_interface.cc:63
SF::abstract_vector< SF_int, SF_real > sf_vec
Definition: sf_interface.h:50
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:290
SF::abstract_matrix< SF_int, SF_real > sf_mat
Definition: sf_interface.h:52
V timing(V &t2, const V &t1)
Definition: basics.h:448
std::string get_basename(const std::string &path)
Definition: basics.cc:61
void f_close(FILE_SPEC &f)
Close a FILE_SPEC.
Definition: basics.cc:162
@ ElecMat
Definition: fem_types.h:39
file_desc * FILE_SPEC
Definition: basics.h:138
Basic physics types.
#define UM2_to_CM2
convert um^2 to cm^2
Definition: physics_types.h:35
#define PETSC_TO_CANONICAL
Permute algebraic data from PETSC to canonical ordering.
Definition: sf_interface.h:79
#define ALG_TO_NODAL
Scatter algebraic to nodal.
Definition: sf_interface.h:77
#define ELEM_PETSC_TO_CANONICAL
Permute algebraic element data from PETSC to canonical ordering.
Definition: sf_interface.h:81
#define EXP_POSTPROCESS
Definition: sim_utils.h:162
Electrical stimulation functions.