openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
electrics.cc
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
26 #include "electrics.h"
27 #include "petsc_utils.h"
28 #include "timers.h"
29 #include "stimulate.h"
30 #include "electric_integrators.h"
31 
32 #include "SF_init.h" // for SF::init_xxx()
33 
34 namespace opencarp {
35 
37 {
38  double t1, t2;
39  get_time(t1);
40 
41  set_dir(OUTPUT);
42 
43  // open logger
44  logger = f_open("electrics.log", param_globals::experiment != 4 ? "w" : "r");
45 
46  // setup mappings between extra and intra grids, algebraic and nodal,
47  // and between PETSc and canonical orderings
48  setup_mappings();
49 
50  // the ionic physics is currently triggered from inside the Electrics to have tighter
51  // control over it
52  ion.logger = logger;
53  ion.initialize();
54 
55  // set up Intracellular tissue
57  region_mask(intra_elec_msh, mtype[intra_grid].regions, mtype[intra_grid].regionIDs, true, "gregion_i");
58 
59  if (param_globals::bidomain || param_globals::extracell_monodomain_stim) {
60  // set up Extracellular tissue
62  region_mask(extra_elec_msh, mtype[extra_grid].regions, mtype[extra_grid].regionIDs, true, "gregion_e");
63  }
64 
65  // add electrics timer for time stepping, add to time stepper tool (TS)
66  double global_time = user_globals::tm_manager->time;
67  timer_idx = user_globals::tm_manager->add_eq_timer(global_time, param_globals::tend, 0,
68  param_globals::dt, 0, "elec::ref_dt", "TS");
69 
70  // electrics stimuli setup
71  setup_stimuli();
72 
73  // set up the linear equation systems. this needs to happen after the stimuli have been
74  // set up, since we need boundary condition info
75  setup_solvers();
76 
77  // the next setup steps require the solvers to be set up, since they use the matrices
78  // generated by those
79 
80  // balance electrodes, we may need the extracellular mass matrix
81  balance_electrodes();
82  // total current scaling
84  // initialize the LATs detector
86 
87  // initialize phie recovery data
88  if(strlen(param_globals::phie_rec_ptf) > 0)
90 
91  // prepare the electrics output. we skip it if we do post-processing
92  if(param_globals::experiment != EXP_POSTPROCESS)
93  setup_output();
94 
95  if (param_globals::prepacing_bcl > 0)
96  prepace();
97  this->initialize_time += timing(t2, t1);
98 }
99 
101 {
102  MaterialType *m = mtype+g;
103 
104  // initialize random conductivity fluctuation structure with PrM values
105  m->regions.resize(param_globals::num_gregions);
106 
107  const char* grid_name = g == Electrics::intra_grid ? "intracellular" : "extracellular";
108  log_msg(logger, 0, 0, "Setting up %s tissue poperties for %d regions ..", grid_name,
109  param_globals::num_gregions);
110 
111  char buf[64];
112  RegionSpecs* reg = m->regions.data();
113 
114  for (size_t i=0; i<m->regions.size(); i++, reg++) {
115  if(!strcmp(param_globals::gregion[i].name, "")) {
116  snprintf(buf, sizeof buf, ", gregion_%d", int(i));
117  param_globals::gregion[i].name = dupstr(buf);
118  }
119 
120  reg->regname = strdup(param_globals::gregion[i].name);
121  reg->regID = i;
122  reg->nsubregs = param_globals::gregion[i].num_IDs;
123  if(!reg->nsubregs)
124  reg->subregtags = NULL;
125  else {
126  reg->subregtags = new int[reg->nsubregs];
127 
128  for (int j=0;j<reg->nsubregs;j++)
129  reg->subregtags[j] = param_globals::gregion[i].ID[j];
130  }
131 
132  // describe material in given region
133  elecMaterial *emat = new elecMaterial();
134  emat->material_type = ElecMat;
135 
136  emat->InVal[0] = param_globals::gregion[i].g_il;
137  emat->InVal[1] = param_globals::gregion[i].g_it;
138  emat->InVal[2] = param_globals::gregion[i].g_in;
139 
140  emat->ExVal[0] = param_globals::gregion[i].g_el;
141  emat->ExVal[1] = param_globals::gregion[i].g_et;
142  emat->ExVal[2] = param_globals::gregion[i].g_en;
143 
144  emat->BathVal[0] = param_globals::gregion[i].g_bath;
145  emat->BathVal[1] = param_globals::gregion[i].g_bath;
146  emat->BathVal[2] = param_globals::gregion[i].g_bath;
147 
148  // convert units from S/m -> mS/um
149  for (int j=0; j<3; j++) {
150  emat->InVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
151  emat->ExVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
152  emat->BathVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
153  }
154  reg->material = emat;
155  }
156 
157  if((g == Electrics::intra_grid && strlen(param_globals::gi_scale_vec)) ||
158  (g == Electrics::extra_grid && strlen(param_globals::ge_scale_vec)) )
159  {
161  sf_mesh & mesh = get_mesh(mt);
162  const char* file = g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec;
163 
164  size_t num_file_entries = SF::root_count_ascii_lines(file, mesh.comm);
165 
166  if(num_file_entries != mesh.g_numelem)
167  log_msg(0,4,0, "%s warning: number of %s conductivity scaling entries does not match number of elements!",
168  __func__, get_mesh_type_name(mt));
169 
170  // set up parallel element vector and read data
171  sf_vec *escale;
172  SF::init_vector(&escale, get_mesh(mt), 1, sf_vec::elemwise);
173  escale->read_ascii(g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec);
174 
175  if(get_size() > 1) {
176  // set up element vector permutation and permute
177  if(get_permutation(mt, ELEM_PETSC_TO_CANONICAL, 1) == NULL) {
179  }
181  sc(*escale, false);
182  }
183 
184  // copy data into SF::vector
185  SF_real* p = escale->ptr();
186  m->el_scale.assign(p, p + escale->lsize());
187  escale->release_ptr(p);
188  }
189 }
190 
191 void Electrics::setup_mappings()
192 {
193  bool intra_exits = mesh_is_registered(intra_elec_msh), extra_exists = mesh_is_registered(extra_elec_msh);
194  assert(intra_exits);
195  const int dpn = 1;
196 
197  // It may be that another physic (e.g. ionic models) has already computed the intracellular mappings,
198  // thus we first test their existence
199  if(get_scattering(intra_elec_msh, ALG_TO_NODAL, dpn) == NULL) {
200  log_msg(logger, 0, 0, "%s: Setting up intracellular algebraic-to-nodal scattering.", __func__);
202  }
204  log_msg(logger, 0, 0, "%s: Setting up intracellular PETSc to canonical permutation.", __func__);
206  }
207 
208  // extracellular mappings
209  if(extra_exists) {
210  log_msg(logger, 0, 0, "%s: Setting up extracellular algebraic-to-nodal scattering.", __func__);
212  log_msg(logger, 0, 0, "%s: Setting up extracellular PETSc to canonical permutation.", __func__);
214  log_msg(logger, 0, 0, "%s: Setting up intra-to-extra scattering.", __func__);
216  }
217 
218  bool check_i2e = false;
219  if(check_i2e && extra_exists) {
220  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
221  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
222  int rank = get_rank();
223 
225 
226  const SF::vector<mesh_int_t> & intra_alg_nod = intra_mesh.pl.algebraic_nodes();
227  const SF::vector<mesh_int_t> & extra_alg_nod = extra_mesh.pl.algebraic_nodes();
228  const SF::vector<mesh_int_t> & extra_petsc_nbr = extra_mesh.get_numbering(SF::NBR_PETSC);
229  const SF::vector<mesh_int_t> & intra_ref_nbr = intra_mesh.get_numbering(SF::NBR_REF);
230  const SF::vector<mesh_int_t> & extra_ref_nbr = extra_mesh.get_numbering(SF::NBR_REF);
231 
232  // TODO(init) : delete these three at the end of this section?
233  sf_vec *intra_testvec; SF::init_vector(&intra_testvec, intra_mesh, 1, sf_vec::algebraic);
234  sf_vec *extra_testvec; SF::init_vector(&extra_testvec, extra_mesh, 1, sf_vec::algebraic);
235  sf_vec *i2e_testvec; SF::init_vector(&i2e_testvec, extra_mesh, 1, sf_vec::algebraic);
236 
237  SF_real* id = intra_testvec->ptr();
238  for(size_t i=0; i<intra_alg_nod.size(); i++) {
239  int lpidx = local_nodal_to_local_petsc(intra_mesh, rank, intra_alg_nod[i]);
240  id[lpidx] = intra_ref_nbr[intra_alg_nod[i]];
241  }
242  intra_testvec->release_ptr(id);
243 
244  SF_real* ed = extra_testvec->ptr();
245  for(size_t i=0; i<extra_alg_nod.size(); i++) {
246  int lpidx = local_nodal_to_local_petsc(extra_mesh, rank, extra_alg_nod[i]);
247  ed[lpidx] = extra_ref_nbr[extra_alg_nod[i]];
248  }
249  extra_testvec->release_ptr(ed);
250 
251  i2e_testvec->set(-1.0);
252  i2e.forward(*intra_testvec, *i2e_testvec);
253 
254  int err = 0;
255  for(size_t i=0; i<extra_alg_nod.size(); i++) {
256  auto id = i2e_testvec->get(i);
257  auto ed = extra_testvec->get(i);
258  if(id > -1 && id != ed)
259  err++;
260  }
261 
262  if(get_global(err, MPI_SUM))
263  log_msg(0,5,0, "Electrics mapping test failed!");
264  else
265  log_msg(0,5,0, "Electrics mapping test succeeded!");
266  }
267 }
268 
270 {
271  double t1, t2;
272  get_time(t1);
273 
274  // if requested, we checkpoint the current state
275  checkpointing();
276 
277  // activation checking
278  const double time = user_globals::tm_manager->time,
279  time_step = user_globals::tm_manager->time_step;
280  lat.check_acts(time);
281  lat.check_quiescence(time, time_step);
282 
283  // I believe that we need to treat the stimuli in two ways:
284  // - Extracellular potential stimuli (this includes ground) affect the
285  // elliptic solver in a more delicate way, as such, there is a dbc_manager
286  // to take care of that.
287  // - Extracellular and Intracellular current stimuli are applied to the rhs vectors
288  // and can be managed by the stimulate() code directly.
289  stimulate_extracellular();
290 
291  if(param_globals::bidomain == BIDOMAIN)
293 
294  clamp_Vm();
295 
296  // compute ionics update
297  ion.compute_step();
298 
299  stimulate_intracellular();
300 
301  // solver parabolic system
303 
304  clamp_Vm();
305 
306  if(user_globals::tm_manager->trigger(iotm_console)) {
307  // output lin solver stats
309  if(param_globals::bidomain == BIDOMAIN)
311  }
312  this->compute_time += timing(t2, t1);
313 
314  // since the traces have their own timing, we check for trace dumps in the compute step loop
317 }
318 
320 {
321  double t1, t2;
322  get_time(t1);
323  // for pseudo-bidomain we compute extracellular potential only for output
324  if(param_globals::bidomain == PSEUDO_BIDM) {
328  }
329 
330  // recover phie
331  if(phie_rcv.pts.size()) {
333  }
334 
337 
338  double curtime = timing(t2, t1);
339  this->output_time += curtime;
340 
341  IO_stats.calls++;
342  IO_stats.tot_time += curtime;
343 
346 }
347 
352 {
353  // output LAT data
355 
356  // close logger
357  f_close(logger);
358 
359  // close output files
361 
362  // destroy ionics
363  ion.destroy();
364 }
365 
366 void balance_electrode(SF::vector<stimulus> & stimuli, int balance_from, int balance_to)
367 {
368  log_msg( NULL, 0, 0, "Balancing stimulus %d with %d %s-wise.",balance_from, balance_to,
369  is_current(stimuli[balance_from].phys.type) ? "current" : "voltage" );
370 
371  stimulus & from = stimuli[balance_from];
372  stimulus & to = stimuli[balance_to];
373 
374  to.pulse = from.pulse;
375  to.ptcl = from.ptcl;
376  to.phys = from.phys;
377  to.pulse.strength *= -1.0;
378 
379  if (from.phys.type == I_ex)
380  {
381  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys)); assert(elec);
382 
383  sf_mat & mass = *elec->ellip_solver.mass_e;
384  SF_real vol0 = get_volume_from_nodes(mass, from.electrode.vertices);
386 
387  to.pulse.strength *= fabs(vol0/vol1);
388  }
389 }
390 
391 void Electrics::balance_electrodes()
392 {
393  for(int i=0; i<param_globals::num_stim; i++) {
394  if(param_globals::stim[i].crct.balance != -1) {
395  int from = param_globals::stim[i].crct.balance;
396  int to = i;
397 
398  balance_electrode(stimuli, from, to);
399  }
400  }
401 }
402 
403 void Electrics::setup_stimuli()
404 {
405  // initialize basic stim info data (used units, supported types, etc)
406  init_stim_info();
407  bool dumpTrace = true;
408 
409  if(dumpTrace) set_dir(OUTPUT);
410 
411  stimuli.resize(param_globals::num_stim);
412  for(int i=0; i<param_globals::num_stim; i++)
413  {
414  // construct new stimulus
415  stimulus & s = stimuli[i];
416 
418  s.translate(i);
419 
420  s.setup(i);
421 
422  if(dumpTrace && get_rank() == 0)
423  s.pulse.wave.write_trace(s.name+".trc");
424  }
425 }
426 
427 void apply_stim_to_vector(const stimulus & s, sf_vec & vec, bool add)
428 {
429  double val; s.value(val);
430  const SF::vector<mesh_int_t> & idx = s.electrode.vertices;
431  const int rank = get_rank();
432 
433  SF_real* v = vec.ptr();
434  for(size_t i=0; i<idx.size(); i++) {
435  int vidx = local_nodal_to_local_petsc(*vec.mesh, rank, idx[i]);
436  SF_real b = v[vidx];
437  v[vidx] = add ? b + val : val;
438  }
439  vec.release_ptr(v);
440 }
441 
442 void Electrics::stimulate_intracellular()
443 {
445 
446  // iterate over stimuli
447  for(stimulus & s : stimuli) {
448  if(s.is_active()) {
449  // for active stimuli, deal with the stimuli-type specific stimulus application
450  switch(s.phys.type)
451  {
452  case I_tm: {
453  if(param_globals::operator_splitting) {
454  apply_stim_to_vector(s, *ps.Vmv, true);
455  }
456  else {
457  SF_real Cm = 1.0;
459  SF_real sc = tm.time_step / Cm;
460 
461  ps.Irhs->set(0.0);
462  apply_stim_to_vector(s, *ps.Irhs, true);
463 
464  *ps.tmp_i1 = *ps.IIon;
465  *ps.tmp_i1 -= *ps.Irhs;
466  *ps.tmp_i1 *= sc; // tmp_i1 = sc * (IIon - Irhs)
467 
468  // add ionic, transmembrane and intracellular currents to rhs
469  if(param_globals::parab_solve != parabolic_solver::EXPLICIT)
470  ps.mass_i->mult(*ps.tmp_i1, *ps.Irhs);
471  else
472  *ps.Irhs = *ps.tmp_i1;
473  }
474  break;
475  }
476 
477  case Illum: {
478  sf_vec* illum_vec = ion.miif->gdata[limpet::illum];
479 
480  if(illum_vec == NULL) {
481  log_msg(0,5,0, "Cannot apply illumination stim: global vector not present!");
482  EXIT(EXIT_FAILURE);
483  } else {
484  apply_stim_to_vector(s, *illum_vec, false);
485  }
486 
487  break;
488  }
489 
490  default: break;
491  }
492  }
493  }
494 }
495 
496 void Electrics::clamp_Vm() {
497  for(stimulus & s : stimuli) {
498  if(s.phys.type == Vm_clmp && s.is_active())
500  }
501 }
502 
503 void Electrics::stimulate_extracellular()
504 {
505  if(param_globals::bidomain) {
506  // we check if the DBC layout changed, if so we recompute the matrix and the dbc_manager
507  bool dbcs_have_updated = ellip_solver.dbc != nullptr && ellip_solver.dbc->dbc_update();
509 
510  if(dbcs_have_updated && time_not_final)
512 
513  ellip_solver.phiesrc->set(0.0);
514 
515  for(const stimulus & s : stimuli) {
516  if(s.is_active() && s.phys.type == I_ex)
518  }
519  }
520 }
521 
523  SF::vector<mesh_int_t> & inp_idx,
525 {
526  int mpi_rank = get_rank(), mpi_size = get_size();
527  const SF::vector<mesh_int_t> & layout = mesh.pl.algebraic_layout();
528 
529  SF::vector<mesh_int_t> sndbuff;
530 
531  size_t buffsize = 0;
532  idx.resize(0);
533 
534  for(int pid=0; pid < mpi_size; pid++) {
535  if(mpi_rank == pid) {
536  sndbuff = inp_idx;
537  buffsize = sndbuff.size();
538  }
539 
540  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
541  sndbuff.resize(buffsize);
542  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
543 
544  mesh_int_t start = layout[mpi_rank], stop = layout[mpi_rank+1];
545 
546  for(mesh_int_t i : sndbuff) {
547  if(i >= start && i < stop)
548  idx.push_back(i - start);
549  }
550  }
551 
552  binary_sort(idx); unique_resize(idx);
553 }
554 
556  SF::vector<mesh_int_t> & inp_idx,
558 {
559  int mpi_rank = get_rank(), mpi_size = get_size();
560  const SF::vector<mesh_int_t> & alg_nod = mesh.pl.algebraic_nodes();
562 
564  for(mesh_int_t ii : alg_nod)
565  amap[nbr[ii]] = ii;
566 
567  SF::vector<mesh_int_t> sndbuff;
568  size_t buffsize = 0;
569  idx.resize(0);
570 
571  for(int pid=0; pid < mpi_size; pid++) {
572  if(mpi_rank == pid) {
573  sndbuff = inp_idx;
574  buffsize = sndbuff.size();
575  }
576 
577  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
578  sndbuff.resize(buffsize);
579  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
580 
581  for(mesh_int_t i : sndbuff) {
582  if(amap.count(i))
583  idx.push_back(amap[i]);
584  }
585  }
586 
587  binary_sort(idx); unique_resize(idx);
588 }
589 
592 void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid,
593  SF::vector<mesh_int_t>* & restr, bool async = false)
594 {
595  sf_mesh & mesh = get_mesh(grid);
596 
597  switch(dataout) {
598 
599  case DATAOUT_SURF: {
600  sf_mesh surfmesh;
601  compute_surface_mesh(mesh, SF::NBR_SUBMESH, surfmesh);
602 
603  SF::vector<mesh_int_t> idxbuff(surfmesh.con);
604  binary_sort(idxbuff); unique_resize(idxbuff);
605 
606  restr = new SF::vector<mesh_int_t>();
607 
608  // for sync output, we need restr to hold the local indices in the petsc vectors
609  // that have been permuted to canonical numbering. For async, we need the
610  // non-overlapping decomposition of indices in NBR_SUBMESH numbering. The petsc indices will be
611  // computed at a later stage. The only reason we need to call compute_restr_idx_async,
612  // is that surface nodes in NBR_SUBMESH, may
613  // reside on partitions where they are not part of the algebraic nodes. thus we need to
614  // recommunicate to make sure the data layout is correct. We do not have this problem for
615  // DATAOUT_VTX.
616  if(!async)
617  compute_restr_idx(mesh, idxbuff, *restr);
618  else
619  compute_restr_idx_async(mesh, idxbuff, *restr);
620 
621  break;
622  }
623 
624  case DATAOUT_VTX: {
625  SF::vector<mesh_int_t> idxbuff;
626 
627  update_cwd();
628 
629  set_dir(INPUT);
630  read_indices(idxbuff, dataout_vtx, mesh, SF::NBR_REF, true, PETSC_COMM_WORLD);
631  set_dir(CURDIR);
632 
633  restr = new SF::vector<mesh_int_t>();
634 
635  if(!async) {
637  for(mesh_int_t & i : idxbuff) i = nbr[i];
638 
639  compute_restr_idx(mesh, idxbuff, *restr);
640  } else {
641  *restr = idxbuff;
642  }
643 
644  break;
645  }
646 
647  case DATAOUT_NONE:
648  case DATAOUT_VOL:
649  default: break;
650  }
651 }
652 
653 void Electrics::setup_output()
654 {
655  int rank = get_rank();
656  SF::vector<mesh_int_t>* restr_i = NULL;
657  SF::vector<mesh_int_t>* restr_e = NULL;
658  set_dir(OUTPUT);
659 
660  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
661  restr_i, param_globals::num_io_nodes > 0);
662 
663  if(param_globals::dataout_i)
664  output_manager.register_output(parab_solver.Vmv, intra_elec_msh, 1, param_globals::vofile, "mV", restr_i);
665 
666  if(param_globals::bidomain) {
667  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
668  restr_e, param_globals::num_io_nodes > 0);
669 
670  if(param_globals::dataout_i)
671  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
672  if(param_globals::dataout_e)
673  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
674  }
675 
676  if(phie_rcv.pts.size())
677  output_manager.register_output_sync(phie_rcv.phie_rec, phie_recv_msh, 1, param_globals::phie_recovery_file, "mV");
678 
680 
681  if(param_globals::num_trace) {
682  sf_mesh & imesh = get_mesh(intra_elec_msh);
683  open_trace(ion.miif, param_globals::num_trace, param_globals::trace_node, NULL, &imesh);
684  }
685 
686  // initialize generic logger for IO timings per time_dt
687  IO_stats.init_logger("IO_stats.dat");
688 }
689 
690 void Electrics::dump_matrices()
691 {
692  std::string bsname = param_globals::dump_basename;
693  std::string fn;
694 
695  set_dir(OUTPUT);
696 
697  // dump monodomain matrices
698  if ( param_globals::parab_solve==1 ) {
699  // using Crank-Nicolson
700  fn = bsname + "_Ki_CN.bin";
701  parab_solver.lhs_parab->write(fn.c_str());
702  }
703  fn = bsname + "_Ki.bin";
704  parab_solver.rhs_parab->write(fn.c_str());
705 
706  fn = bsname + "_Mi.bin";
707  parab_solver.mass_i->write(fn.c_str());
708 
709  if ( param_globals::bidomain ) {
710  fn = bsname + "_Kie.bin";
711  ellip_solver.phie_mat->write(fn.c_str());
712 
713  fn = bsname + "_Me.bin";
714  ellip_solver.mass_e->write(fn.c_str());
715  }
716 }
717 
718 
721 double Electrics::timer_val(const int timer_id)
722 {
723  // determine
724  int sidx = stimidx_from_timeridx(stimuli, timer_id);
725  double val = 0.0;
726  if(sidx != -1) {
727  stimuli[sidx].value(val);
728  }
729  else
730  val = std::nan("NaN");
731 
732  return val;
733 }
734 
737 std::string Electrics::timer_unit(const int timer_id)
738 {
739  int sidx = stimidx_from_timeridx(stimuli, timer_id);
740  std::string s_unit;
741 
742  if(sidx != -1)
743  // found a timer-linked stimulus
744  s_unit = stimuli[sidx].pulse.wave.f_unit;
745 
746  return s_unit;
747 }
748 
751 int stimidx_from_timeridx(const SF::vector<stimulus> & stimuli, const int timer_id)
752 {
753  // the only electrical quantities linked to a timer are stimuli
754  // thus we search for timer links only among stimuli for now
755 
756  // iterate over stimuli
757  for(size_t i = 0; i<stimuli.size(); i++)
758  {
759  const stimulus & s = stimuli[i];
760 
761  if(s.ptcl.timer_id == timer_id)
762  return s.idx;
763  }
764 
765  // invalid timer index not linked to any stimulus
766  return -1;
767 }
768 
779 void get_kappa(sf_vec & kappa, IMPregion *ir, limpet::MULTI_IF & miif, double k)
780 {
781  double* reg_kappa = new double[miif.N_IIF];
782 
783  for(int i=0; i<miif.N_IIF; i++)
784  reg_kappa[i] = k * miif.IIF[i]->cgeom().SVratio * ir[i].volFrac;
785 
786  double *kd = kappa.ptr();
787 
788  for(int i = 0; i < miif.numNode; i++)
789  kd[i] = reg_kappa[(int) miif.IIFmask[i]];
790 
791  kappa.release_ptr(kd);
792  delete [] reg_kappa;
793 }
794 
795 
804 {
805  for(size_t i=0; i < m.regions.size(); i++) {
806  elecMaterial *emat = static_cast<elecMaterial*>(m.regions[i].material);
807  emat->g = type;
808  }
809 }
810 
811 void Electrics::setup_solvers()
812 {
813  set_dir(OUTPUT);
814  parab_solver.init();
816 
817  if (param_globals::bidomain) {
818  ellip_solver.init();
820  }
821 
822  if(param_globals::dump2MatLab)
823  dump_matrices();
824 }
825 
827 {
828  for(const stimulus & s : stimuli) {
829  if(is_dbc(s.phys.type))
830  return true;
831  }
832  return false;
833 }
834 
837 const char* get_tsav_ext(double time)
838 {
839  int min_idx = -1;
840  double min_diff = 1e100;
841 
842  for(int i=0; i<param_globals::num_tsav; i++)
843  {
844  double diff = fabs(param_globals::tsav[i] - time);
845  if(min_diff > diff) {
846  min_diff = diff;
847  min_idx = i;
848  }
849  }
850 
851  if(min_idx == -1)
852  min_idx = 0;
853 
854  return param_globals::tsav_ext[min_idx];
855 }
856 
857 void Electrics::checkpointing()
858 {
860 
861  // regular user selected state save
862  if (tm.trigger(iotm_chkpt_list)) {
863  char save_fnm[1024];
864  const char* tsav_ext = get_tsav_ext(tm.time);
865 
866  snprintf(save_fnm, sizeof save_fnm, "%s.%s.roe", param_globals::write_statef, tsav_ext);
867 
868  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
869  }
870 
871  // checkpointing based on interval
872  if (tm.trigger(iotm_chkpt_intv)) {
873  char save_fnm[1024];
874  snprintf(save_fnm, sizeof save_fnm, "checkpoint.%.1f.roe", tm.time);
875  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
876  }
877 }
878 
880 {
881  double t0, t1, dur;
882  get_time(t0);
883  stats.init_logger("ell_stats.dat");
884 
885  // here we can differentiate the solvers
886  SF::init_solver(&lin_solver);
887  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
888  sf_vec::ltype alg_type = sf_vec::algebraic;
889  const int dpn = 1;
890 
891  SF::init_vector(&phie, extra_mesh, dpn, alg_type);
892  SF::init_vector(&phiesrc, extra_mesh, dpn, alg_type);
893  SF::init_vector(&currtmp, extra_mesh, dpn, alg_type);
894 
896  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
897  SF::init_vector(&phie_i, intra_mesh, dpn, alg_type);
898  }
899 
900  int max_row_entries = max_nodal_edgecount(extra_mesh);
901 
902  SF::init_matrix(&phie_mat);
903  SF::init_matrix(&mass_e);
904 
905  // alloc stiffness matrix
906  phie_mat->init(extra_mesh, dpn, dpn, max_row_entries);
907  // alloc mass matrix
908  mass_e ->init(extra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
909  dur = timing(t1, t0);
910 }
911 
915 {
916  double t0, t1, dur;
917  get_time(t0);
918  rebuild_stiffness(mtype, stimuli, logger);
919  rebuild_mass(logger);
920  dur = timing(t1, t0);
921 }
922 
926 {
927  double t0, t1, dur;
928  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
929 
930  MaterialType & mt = mtype[Electrics::extra_grid];
931  const bool have_dbc = have_dbc_stims(stimuli);
932 
933  cond_t condType = sum_cond;
934  set_cond_type(mt, condType);
935 
936  // get mesh reference
937  sf_mesh & mesh = get_mesh(extra_elec_msh);
938 
939  get_time(t0);
940 
941  // fill the system
942  elec_stiffness_integrator stfn_integ(mt);
943 
944  phie_mat->zero();
945  SF::assemble_matrix(*phie_mat, mesh, stfn_integ);
946  phie_mat->scale(-1.0);
947 
948  dur = timing(t1,t0);
949  log_msg(logger,0,log_flag, "Computed ellipitc stiffness matrix in %.3f seconds.", dur);
950 
951  // set boundary conditions
952  if(have_dbc) {
953  log_msg(logger,0,log_flag, "Elliptic lhs matrix enforcing Dirichlet boundaries.");
954  get_time(t0);
955 
956  if(dbc == nullptr)
957  dbc = new dbc_manager(*phie_mat, stimuli);
958  else
959  dbc->recompute_dbcs();
960 
961  dbc->enforce_dbc_lhs();
962 
963  dur = timing(t1,t0);
964  log_msg(logger,0,log_flag, "Elliptic lhs matrix Dirichlet enforcing done in %.3f seconds.", dur);
965  }
966  else {
967  log_msg(logger,1,ECHO, "Elliptic lhs matrix is singular!");
968  // we are dealing with a singular system
969  phie_mat_has_nullspace = true;
970  }
971 
972  // solver has not been initialized yet
973  set_dir(INPUT);
974  get_time(t0);
975 
976  setup_linear_solver(logger);
977 
978  dur = timing(t1,t0);
979  log_msg(logger,0,log_flag, "Initializing elliptic solver in %.5f seconds.", dur);
980  set_dir(OUTPUT);
981 }
982 
984 {
985  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
986  double t0, t1, dur;
987  mass_integrator mass_integ;
988 
989  // get mesh reference
990  sf_mesh & mesh = get_mesh(extra_elec_msh);
991  get_time(t0);
992  mass_e->zero();
993 
994  if(param_globals::mass_lumping) {
995  SF::assemble_lumped_matrix(*mass_e, mesh, mass_integ);
996  } else {
997  SF::assemble_matrix(*mass_e, mesh, mass_integ);
998  }
999 
1000  dur = timing(t1,t0);
1001  log_msg(logger,0,log_flag, "Computed elliptic mass matrix in %.3f seconds.", dur);
1002 }
1003 
1004 void elliptic_solver::setup_linear_solver(FILE_SPEC logger)
1005 {
1006 
1007  tol = param_globals::cg_tol_ellip;
1008  max_it = param_globals::cg_maxit_ellip;
1009 
1010  std::string solver_file;
1011  solver_file = param_globals::ellip_options_file;
1012  lin_solver->setup_solver(*phie_mat, tol, max_it, param_globals::cg_norm_ellip,
1013  "elliptic PDE", phie_mat_has_nullspace,
1014  logger, solver_file.c_str(),
1015  "-ksp_type cg -pc_type hypre -pc_hypre_type boomeramg "
1016  "-pc_hypre_boomeramg_max_iter 1 "
1017  "-pc_hypre_boomeramg_strong_threshold 0.0 -options_left");
1018 }
1019 
1020 void elliptic_solver::solve(sf_mat & Ki, sf_vec & Vmv, sf_vec & tmp_i)
1021 {
1022  double t0,t1;
1024  // assembly of rhs for FE
1025  if (phiesrc->mag() > 0.0) {
1026  mass_e->mult(*phiesrc, *currtmp);
1027  phiesrc->deep_copy(*currtmp);
1028  }
1029 
1030  Ki.mult(Vmv, tmp_i);
1031 
1032  bool add = true;
1033  i2e->forward(tmp_i, *phiesrc, add);
1034 
1035  if(dbc != nullptr)
1036  dbc->enforce_dbc_rhs(*phiesrc);
1037 
1038  get_time(t0);
1039  (*lin_solver)(*phie, *phiesrc);
1040 
1041  // treat solver statistics
1042  auto dur = timing(t1, t0);
1043  lin_solver->time += dur;
1044  stats.slvtime += dur;
1045  stats.update_iter(lin_solver->niter);
1046  if(lin_solver->reason < 0) {
1047  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1048  petsc_get_converged_reason_str(lin_solver->reason));
1049  EXIT(1);
1050  }
1051 
1052  add = false;
1053  i2e->backward(*phie, *phie_i, add);
1054 }
1055 
1057 {
1058  double t0,t1;
1059 
1060  if(dbc != nullptr)
1061  dbc->enforce_dbc_rhs(*phiesrc);
1062 
1063  get_time(t0);
1064  (*lin_solver)(*phie, *phiesrc);
1065 
1066  // treat solver statistics
1067  auto dur = timing(t1, t0);
1068  lin_solver->time += dur;
1069  stats.slvtime += dur;
1070  stats.update_iter(lin_solver->niter);
1071 
1072  if(lin_solver->reason < 0) {
1073  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1074  petsc_get_converged_reason_str(lin_solver->reason));
1075  EXIT(1);
1076  }
1077 
1078  // phie_i is only set up when we have an IntraMesh registered
1079  if(is_init(phie_i)) {
1080  bool add = false;
1082  i2e->backward(*phie, *phie_i, add);
1083  }
1084 }
1085 
1087 {
1088  double t0, t1, dur;
1089  get_time(t0);
1090  stats.init_logger("par_stats.dat");
1091 
1092  // here we can differentiate the solvers
1093  SF::init_solver(&lin_solver);
1094 
1095  sf_vec* vm_ptr = get_data(vm_vec);
1096  sf_vec* iion_ptr = get_data(iion_vec);
1097 
1098  if(!(vm_ptr != NULL && iion_ptr != NULL)) {
1099  log_msg(0,5,0, "%s error: global Vm and Iion vectors not properly set up! Ionics seem invalid! Aborting!",
1100  __func__);
1101  EXIT(1);
1102  }
1103 
1104  SF::init_vector(&Vmv, vm_ptr);
1105  Vmv->shallow_copy(*vm_ptr);
1106  SF::init_vector(&IIon, iion_ptr);
1107  IIon->shallow_copy(*iion_ptr);
1108 
1109  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1110  sf_vec::ltype alg_type = sf_vec::algebraic;
1111 
1112  int dpn = 1;
1113  SF::init_vector(&kappa_i, intra_mesh, dpn, alg_type);
1114  SF::init_vector(&tmp_i1, intra_mesh, dpn, alg_type);
1115  SF::init_vector(&tmp_i2, intra_mesh, dpn, alg_type);
1116  SF::init_vector(&old_vm, intra_mesh, dpn, alg_type);
1117 
1118  if(!param_globals::operator_splitting)
1119  SF::init_vector(&Irhs, intra_mesh, dpn, alg_type);
1120 
1121  // alloc matrices
1122  int max_row_entries = max_nodal_edgecount(intra_mesh);
1123 
1124  SF::init_matrix(&lhs_parab);
1125  SF::init_matrix(&rhs_parab);
1126  SF::init_matrix(&mass_i);
1127 
1128  rhs_parab->init(intra_mesh, dpn, dpn, max_row_entries);
1129  mass_i ->init(intra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
1130 
1131  parab_tech = static_cast<parabolic_solver::parabolic_t>(param_globals::parab_solve);
1132  dur = timing(t1, t0);
1133 }
1134 
1136 {
1137  double start, end, period;
1138  get_time(start);
1139  double t0, t1, dur;
1140  mass_integrator mass_integ;
1141  int dpn = 1;
1142 
1143  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
1144  MaterialType & mt = mtype[Electrics::intra_grid];
1145 
1146  double Dt = user_globals::tm_manager->time_step;
1147  get_kappa(*kappa_i, param_globals::imp_region, miif, UM2_to_CM2 / Dt);
1148 
1149  cond_t condType = intra_cond;
1150  sf_mesh & mesh = get_mesh(intra_elec_msh);
1151 
1152  if( (param_globals::bidomain == MONODOMAIN && param_globals::bidm_eqv_mono) ||
1153  (param_globals::bidomain == PSEUDO_BIDM) )
1154  condType = para_cond;
1155 
1156  // set material and conductivity type
1157  set_cond_type(mt, condType);
1158 
1159  // fill the system
1160  {
1161  get_time(t0);
1162 
1163  elec_stiffness_integrator stfn_integ(mt);
1164  SF::assemble_matrix(*rhs_parab, mesh, stfn_integ);
1165 
1166  dur = timing(t1,t0);
1167  log_msg(logger,0,log_flag, "Computed parabolic stiffness matrix in %.3f seconds.", dur);
1168  get_time(t0);
1169 
1170  if(param_globals::mass_lumping)
1171  SF::assemble_lumped_matrix(*mass_i, mesh, mass_integ);
1172  else
1173  SF::assemble_matrix(*mass_i, mesh, mass_integ);
1174 
1175  sf_vec* empty; SF::init_vector(&empty);
1176  mass_i->mult_LR(*kappa_i, *empty);
1177 
1178  dur = timing(t1,t0);
1179  log_msg(logger,0,log_flag, "Computed parabolic mass matrix in %.3f seconds.", dur);
1180  delete empty;
1181  }
1182 
1183  // initialize parab lhs
1184  if(parab_tech != EXPLICIT) {
1185  lhs_parab->duplicate(*rhs_parab);
1186  // if we have mass lumping, then the nonzero pattern between Mi and Ki is different
1187  bool same_nonzero = param_globals::mass_lumping == false;
1188 
1189  if (parab_tech==CN) {
1190  lhs_parab->scale(-param_globals::theta);
1191  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1192  }
1193  else if (parab_tech==O2dT) {
1194  lhs_parab->scale(-0.5);
1195  mass_i->scale(0.5);
1196  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1197  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1198  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1199  }
1200  }
1201  else {
1202  SF::init_vector(&inv_mass_diag, Vmv);
1203  mass_i->get_diagonal(*inv_mass_diag);
1204 
1205  SF_real* p = inv_mass_diag->ptr();
1206 
1207  for(int i=0; i<inv_mass_diag->lsize(); i++)
1208  p[i] = 1.0 / p[i];
1209 
1210  inv_mass_diag->release_ptr(p);
1211  }
1212 
1213  if(parab_tech == CN || parab_tech == O2dT) {
1214  set_dir(INPUT);
1215  get_time(t0);
1216 
1217  setup_linear_solver(logger);
1218 
1219  dur = timing(t1,t0);
1220  log_msg(logger,0,log_flag, "Initializing parabolic solver in %.5f seconds.", dur);
1221  set_dir(OUTPUT);
1222  }
1223  period = timing(end, start);
1224 }
1225 
1226 void parabolic_solver::setup_linear_solver(FILE_SPEC logger)
1227 {
1228  tol = param_globals::cg_tol_parab;
1229  max_it = param_globals::cg_maxit_parab;
1230 
1231  std::string solver_file;
1232  solver_file = param_globals::parab_options_file;
1233  lin_solver->setup_solver(*lhs_parab, tol, max_it, param_globals::cg_norm_parab,
1234  "parabolic PDE", false, logger, solver_file.c_str(),
1235  "-pc_type bjacobi -sub_pc_type ilu -ksp_type cg");
1236 }
1237 
1239 {
1240  switch (parab_tech) {
1241  case CN: solve_CN(phie_i); break;
1242  case O2dT: solve_O2dT(phie_i); break;
1243  default: solve_EF(phie_i); break;
1244  }
1245 }
1246 
1247 void parabolic_solver::solve_CN(sf_vec & phie_i)
1248 {
1249  double t0,t1;
1250  // assembly of rhs for CN
1251  if (param_globals::bidomain == BIDOMAIN) {
1252  tmp_i1->deep_copy(phie_i);
1253  tmp_i1->add_scaled(*Vmv, 1.0 - param_globals::theta);
1254  rhs_parab->mult(*tmp_i1, *tmp_i2);
1255  }
1256  else {
1257  rhs_parab->mult(*Vmv, *tmp_i2);
1258  *tmp_i2 *= 1.0 - param_globals::theta;
1259  }
1260 
1261  mass_i->mult(*Vmv, *tmp_i1);
1262  *tmp_i1 += *tmp_i2;
1263 
1264  // add current contributions to rhs
1265  if(!param_globals::operator_splitting)
1266  tmp_i1->add_scaled(*Irhs, -1.0);
1267 
1268  get_time(t0);
1269 
1270  (*lin_solver)(*Vmv, *tmp_i1);
1271 
1272  if(lin_solver->reason < 0) {
1273  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1274  petsc_get_converged_reason_str(lin_solver->reason));
1275  EXIT(1);
1276  }
1277 
1278  // treat solver statistics
1279  auto dur = timing(t1, t0);
1280  lin_solver->time += dur;
1281  stats.slvtime += dur;
1282  stats.update_iter(lin_solver->niter);
1283 }
1284 
1285 void parabolic_solver::solve_O2dT(sf_vec & phie_i)
1286 {
1287  double t0,t1;
1288  // assembly of rhs for FE
1289  if (param_globals::bidomain == BIDOMAIN) {
1290  tmp_i2->deep_copy(phie_i);
1291  tmp_i2->add_scaled(*Vmv, 0.5);
1292  rhs_parab->mult(*tmp_i2, *tmp_i1); // tmp_i1 = K_i(Vm^t * 0.5 + phi_e)
1293  }
1294  else {
1295  rhs_parab->mult(*Vmv, *tmp_i1);
1296  *tmp_i1 *= 0.5; // tmp_i1 = 0.5 * K_i Vm^t
1297  }
1298 
1299  mass_i->mult(*Vmv, *tmp_i2); // tmp_i2 = M/2 Vm^t
1300  tmp_i1->add_scaled(*tmp_i2, 4.0); // tmp_i1 = (2M+K_i/2)Vm^t
1301  mass_i->mult(*old_vm, *tmp_i2); // tmp_i2 = M/2 Vm^{t-1}
1302 
1303  tmp_i1->add_scaled(*tmp_i2, -1.0); // tmp_i1 = (2M+K_i/2)Vm^t-M/2 Vm^{t-1}
1304  *old_vm = *Vmv;
1305 
1306  get_time(t0);
1307 
1308  // solve
1309  (*lin_solver)(*Vmv, *tmp_i1);
1310 
1311  // treat solver statistics
1312  stats.slvtime += timing(t1, t0);
1313  stats.update_iter(lin_solver->niter);
1314 }
1315 
1316 void parabolic_solver::solve_EF(sf_vec & phie_i)
1317 {
1318  double t0,t1,t2;
1319  get_time(t0);
1320 
1321  // assembly of rhs for FE
1322  if (param_globals::bidomain == BIDOMAIN) {
1323  tmp_i2->deep_copy(phie_i);
1324  *tmp_i2 += *Vmv;
1325  rhs_parab->mult(*tmp_i2, *tmp_i1);
1326  }
1327  else {
1328  rhs_parab->mult(*Vmv, *tmp_i1);
1329  }
1330 
1331  *tmp_i1 *= *inv_mass_diag;
1332  Vmv->add_scaled(*tmp_i1, 1.0);
1333 
1334  if(param_globals::operator_splitting == false)
1335  Vmv->add_scaled(*Irhs, -1.0);
1336 
1337  // record rhs timing
1338  stats.slvtime += timing(t1, t0);
1339 }
1340 
1342 {
1343  char* prvSimDir = strlen(param_globals::start_statef) ?
1344  get_file_dir(param_globals::start_statef) : NULL;
1345 
1346  const char* extn = ".dat";
1347 
1348  // if compute_APD we need an extra 2 acts
1349  int addLATs = param_globals::compute_APD ? 2 : 0;
1350 
1351  bool have_sentinel = param_globals::t_sentinel > 0.0;
1352  bool need_to_add_sentinel = have_sentinel && (param_globals::sentinel_ID < 0);
1353 
1354  addLATs += need_to_add_sentinel ? 1 : 0;
1355  acts.resize(param_globals::num_LATs + addLATs);
1356 
1357  int j=0;
1358  for (int i = 0; i < param_globals::num_LATs; i++ )
1359  {
1360  // using Ph only with bidomain runs
1361  if (param_globals::lats[i].method <= 0 || (param_globals::lats[i].measurand == PHIE && !param_globals::bidomain)) {
1362  log_msg(NULL, 3, 0, "Phie-based LAT measurement requires bidomain >=1 Ignoring lats[%d].", i);
1363  continue;
1364  }
1365 
1366  acts[j].method = (ActMethod)param_globals::lats[i].method;
1367  acts[j].threshold = param_globals::lats[i].threshold;
1368  acts[j].mode = param_globals::lats[i].mode;
1369  acts[j].all = param_globals::lats[i].all;
1370  acts[j].measurand = (PotType)param_globals::lats[i].measurand;
1371  acts[j].ID = param_globals::lats[i].ID;
1372  acts[j].fout = NULL;
1373 
1374  if(param_globals::lats[i].all) {
1375  acts[j].fname = (char*) malloc((strlen(param_globals::lats[i].ID)+strlen(extn)+1)*sizeof(char));
1376  snprintf(acts[j].fname, strlen(param_globals::lats[i].ID)+strlen(extn)+1, "%s%s", param_globals::lats[i].ID, extn);
1377  }
1378  else {
1379  char prfx[] = "init_acts_";
1380  int max_len = strlen(prfx) + strlen(param_globals::lats[i].ID) + strlen(extn) + 1;
1381 
1382  acts[j].fname = (char*) malloc(max_len*sizeof(char));
1383  snprintf(acts[j].fname, max_len, "%s%s%s", prfx, param_globals::lats[i].ID, extn);
1384  }
1385 
1386  // restarting
1387  if(prvSimDir != NULL) {
1388  int len_fname = strlen(prvSimDir)+strlen(acts[j].fname)+2;
1389  acts[j].prv_fname = (char*) malloc(len_fname*sizeof(char));
1390  snprintf(acts[j].prv_fname, len_fname, "%s/%s", prvSimDir, acts[j].fname);
1391  }
1392 
1393  j++;
1394  }
1395 
1396  if(param_globals::compute_APD) {
1397  acts[j].method = ACT_THRESH; // threshold crossing
1398  acts[j].threshold = param_globals::actthresh;
1399  acts[j].mode = 0; // upstroke
1400  acts[j].all = true;
1401  acts[j].measurand = VM; // Vm
1402  //acts[j].ID = dupstr("Vm_Activation");
1403  acts[j].fout = NULL;
1404  acts[j].fname = dupstr("vm_activation.dat");
1405 
1406  j++;
1407  acts[j].method = ACT_THRESH; // threshold crossing
1408  acts[j].threshold = param_globals::recovery_thresh;
1409  acts[j].mode = 1; // repol
1410  acts[j].all = true;
1411  acts[j].measurand = VM; // Vm
1412  //(*acts)[j+1].ID = param_globals::lats[i].ID;
1413  acts[j].fout = NULL;
1414  acts[j].fname = dupstr("vm_repolarisation.dat");
1415 
1416  j++;
1417  }
1418 
1419  // set up sentinel for activity checking
1420  sntl.activated = have_sentinel;
1421  sntl.t_start = param_globals::t_sentinel_start;
1422  sntl.t_window = param_globals::t_sentinel;
1423  sntl.t_quiesc =-1.;
1424  sntl.ID = param_globals::sentinel_ID;
1425 
1426  if(need_to_add_sentinel) {
1427  // add a default LAT detector as sentinel
1428  acts[j].method = ACT_THRESH; // threshold crossing
1429  acts[j].threshold = param_globals::actthresh;
1430  acts[j].mode = 0; // upstroke
1431  acts[j].all = true;
1432  acts[j].measurand = VM; // Vm
1433  //(*acts)[j].ID = dupstr("Vm_Activation");
1434  acts[j].fout = NULL;
1435  acts[j].fname = dupstr("vm_sentinel.dat");
1436  // set sentinel index
1437  sntl.ID = j;
1438  j++;
1439  }
1440 
1441  if(prvSimDir) free(prvSimDir);
1442 }
1443 
1444 void print_act_log(FILE_SPEC logger, const SF::vector<Activation> & acts, int idx)
1445 {
1446  const Activation & act = acts[idx];
1447 
1448  log_msg(logger, 0, 0, "\n");
1449  log_msg(logger, 0, 0, "LAT detector [%2d]", idx);
1450  log_msg(logger, 0, 0, "-----------------\n");
1451 
1452  log_msg(logger, 0, 0, "Measurand: %s", act.measurand ? "Phie" : "Vm");
1453  log_msg(logger, 0, 0, "All: %s", act.all ? "All" : "Only first");
1454  log_msg(logger, 0, 0, "Method: %s", act.method==ACT_DT ? "Derivative" : "Threshold crossing");
1455 
1456  char buf[64], gt[2], sgn[2];
1457  snprintf(sgn, sizeof sgn, "%s", act.mode?"-":"+");
1458  snprintf(gt, sizeof gt, "%s", act.mode?"<":">");
1459 
1460  if(act.method==ACT_DT)
1461  snprintf(buf, sizeof buf, "Maximum %sdf/dt %s %.2f mV", sgn, gt, act.threshold);
1462  else
1463  snprintf(buf, sizeof buf, "Intersection %sdf/dt with %.2f", sgn, act.threshold);
1464 
1465  log_msg(logger, 0, 0, "Mode: %s", buf);
1466  log_msg(logger, 0, 0, "Threshold: %.2f mV\n", act.threshold);
1467 }
1468 
1469 void LAT_detector::init(sf_vec & vm, sf_vec & phie, int offset)
1470 {
1471  // we use the electrics logger for output
1473 
1474  // TODO(init): except for the shallow copies, shouldn't these be deleted?
1475  // When to delete them?
1476  for(size_t i = 0; i < acts.size(); ++i) {
1477  acts[i].init = 1;
1478  SF::init_vector(&(acts[i].phi));
1479  acts[i].phi->shallow_copy(!acts[i].measurand ? vm : phie);
1480  acts[i].offset = offset;
1481 
1482  SF::init_vector(&(acts[i].phip), acts[i].phi);
1483  *acts[i].phip = *acts[i].phi;
1484 
1485  // derivative based detector
1486  if (acts[i].method == ACT_DT) {
1487  SF::init_vector(&(acts[i].dvp0), acts[i].phi);
1488  SF::init_vector(&(acts[i].dvp1), acts[i].phi);
1489  if(acts[i].mode)
1490  log_msg(NULL,2,0, "Detection of -df/dt|max not implemented, +df/dt|max will be detected.");
1491  }
1492 
1493  // allocate additional local buffers
1494  acts[i].ibuf = (int *)malloc(acts[i].phi->lsize()*sizeof(int));
1495  acts[i].actbuf = (double *)malloc(acts[i].phi->lsize()*sizeof(double));
1496 
1497  if (!acts[i].all) {
1498  SF::init_vector(&acts[i].tm, acts[i].phi->gsize(), acts[i].phi->lsize());
1499  acts[i].tm->set(-1.);
1500 
1501  // initialize with previous initial activations
1502  if(acts[i].prv_fname != NULL) {
1503  set_dir(INPUT);
1504  size_t nread = acts[i].tm->read_ascii(acts[i].prv_fname);
1505 
1506  if(nread == 0)
1507  log_msg(NULL,2,ECHO,"Warning: Initialization of LAT[%2d] failed.", i);
1508 
1509  set_dir(OUTPUT);
1510  }
1511  }
1512  else {
1513  if ( !get_rank() ) {
1514  // here we should copy over previous file and open in append mode
1515  if(acts[i].prv_fname!=NULL) {
1516  set_dir(INPUT);
1517  FILE_SPEC in = f_open( acts[i].prv_fname, "r" );
1518  if(in) {
1519  log_msg(NULL,2,0, "Copying over of previous activation file not implemented.\n"); f_close(in);
1520  }
1521  else
1522  log_msg(NULL,3,0,"Warning: Initialization in %s - \n"
1523  "Failed to read activation file %s.\n", __func__, acts[i].prv_fname);
1524 
1525  set_dir(OUTPUT);
1526  }
1527  acts[i].fout = f_open( acts[i].fname, acts[i].prv_fname==NULL?"w":"a" );
1528  }
1529  }
1530  print_act_log(logger, acts, i);
1531  }
1532 
1533  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1534  SF::local_petsc_to_nodal_mapping(intra_mesh, this->petsc_to_nodal);
1535 }
1536 
1537 
1538 int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
1539 {
1540  int rank = get_rank(), gacts = 0, numProc = get_size();
1541 
1542  if (rank == 0) {
1543  // rank 0 writes directly to the table
1544  for (int i=0; i<nlacts; i++)
1545  fprintf(fp->fd, "%d\t%.6f\n", ibuf[i], act_tbuf[i]);
1546 
1547  gacts += nlacts;
1548 
1549  SF::vector<int> buf_inds;
1550  SF::vector<double> buf_acts;
1551 
1552  for (int j=1; j<numProc; j++) {
1553  int acts = 0;
1554  MPI_Status status;
1555  MPI_Recv(&acts, 1, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1556 
1557  if (acts) {
1558  buf_inds.resize(acts);
1559  buf_acts.resize(acts);
1560 
1561  MPI_Recv(buf_inds.data(), acts, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1562  MPI_Recv(buf_acts.data(), acts, MPI_DOUBLE, j, 110, PETSC_COMM_WORLD, &status);
1563 
1564  for(int ii=0; ii<acts; ii++)
1565  fprintf(fp->fd, "%d\t%.6f\n", buf_inds[ii], buf_acts[ii]);
1566 
1567  gacts += acts;
1568  }
1569  }
1570  fflush(fp->fd);
1571  }
1572  else {
1573  MPI_Send(&nlacts, 1, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1574  if (nlacts) {
1575  MPI_Send(ibuf, nlacts, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1576  MPI_Send(act_tbuf, nlacts, MPI_DOUBLE, 0, 110, PETSC_COMM_WORLD);
1577  }
1578  }
1579 
1580  MPI_Bcast(&gacts, 1, MPI_INT, 0, PETSC_COMM_WORLD);
1581  return gacts;
1582 }
1583 
1585 {
1586  int nacts = 0;
1587  double *a;
1588 
1589  for(Activation* aptr = acts.data(); aptr != acts.end(); aptr++)
1590  {
1591  int lacts = 0;
1592  switch (aptr->method) {
1593  case ACT_THRESH:
1594  lacts = check_cross_threshold(*aptr->phi, *aptr->phip, tm,
1595  aptr->ibuf, aptr->actbuf, aptr->threshold, aptr->mode);
1596  break;
1597 
1598  case ACT_DT:
1599  lacts = check_mx_derivative (*aptr->phi, *aptr->phip, tm,
1600  aptr->ibuf, aptr->actbuf, *aptr->dvp0, *aptr->dvp1,
1601  aptr->threshold, aptr->mode);
1602  break;
1603 
1604  default:
1605  break;
1606  }
1607 
1608  if (!aptr->all)
1609  a = aptr->tm->ptr();
1610 
1612 
1613  for(int j=0; j<lacts; j++) {
1614  if(aptr->all) {
1615  int nodal_idx = this->petsc_to_nodal.forward_map(aptr->ibuf[j]);
1616  aptr->ibuf[j] = canon_nbr[nodal_idx] + aptr->offset;
1617  }
1618  else {
1619  if(a[aptr->ibuf[j]] == -1)
1620  a[aptr->ibuf[j]] = aptr->actbuf[j];
1621  }
1622  }
1623 
1624  if(aptr->all)
1625  output_all_activations(aptr->fout, aptr->ibuf, aptr->actbuf, lacts);
1626  else
1627  aptr->tm->release_ptr(a);
1628 
1629  MPI_Allreduce(MPI_IN_PLACE, &lacts, 1, MPI_INT, MPI_SUM, PETSC_COMM_WORLD);
1630  nacts += lacts;
1631 
1632  aptr->nacts = nacts;
1633  }
1634 
1635  return nacts > 0;
1636 }
1637 
1638 
1639 int LAT_detector::check_quiescence(double tm, double dt)
1640 {
1641  static int savequitFlag = 0;
1642  int numNodesActivated = -1;
1643 
1644  if(sntl.activated) {
1645  // initialization
1646  if(sntl.t_quiesc < 0. && sntl.t_window >= 0.0 ) {
1647  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1648  log_msg(0,0,ECHO | NONL, "%s() WARNING: simulation is configured to savequit() after %.2f ms of quiescence\n", __func__, sntl.t_window);
1649  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1650  sntl.t_quiesc = 0.0;
1651  }
1652 
1653  if(tm >= sntl.t_start && !savequitFlag)
1654  {
1655  numNodesActivated = acts[sntl.ID].nacts;
1656 
1657  if(numNodesActivated) sntl.t_quiesc = 0.0;
1658  else sntl.t_quiesc += dt;
1659 
1660  if(sntl.t_window >= 0.0 && sntl.t_quiesc > sntl.t_window && !savequitFlag) {
1661  savequitFlag++;
1662  savequit();
1663  }
1664  }
1665  }
1666 
1667  return numNodesActivated;
1668 }
1669 
1670 
1671 
1672 
1673 int LAT_detector::check_cross_threshold(sf_vec & vm, sf_vec & vmp, double tm,
1674  int *ibuf, double *actbuf, float threshold, int mode)
1675 {
1676  SF_real *c = vm.ptr();
1677  SF_real *p = vmp.ptr();
1678  int lsize = vm.lsize();
1679  int nacts = 0, gnacts = 0;
1680 
1681  for (int i=0; i<lsize; i++) {
1682  int sgn = 1;
1683  bool triggered = false;
1684  if(mode==0) {// detect +slope crossing
1685  triggered = p[i] <= threshold && c[i] > threshold; }
1686  else { // detect -slope crossing
1687  triggered = p[i] >= threshold && c[i] < threshold;
1688  sgn = -1;
1689  }
1690 
1691  if (triggered) {
1692  double tact = tm - param_globals::dt + (threshold-p[i])/(c[i]-p[i])*sgn*param_globals::dt;
1693  ibuf [nacts] = i;
1694  actbuf[nacts] = tact;
1695  nacts++;
1696  }
1697  p[i] = c[i];
1698  }
1699 
1700  vm.release_ptr(c);
1701  vmp.release_ptr(p);
1702  return nacts;
1703 }
1704 
1705 int LAT_detector::check_mx_derivative(sf_vec & vm, sf_vec & vmp, double tm,
1706  int *ibuf, double *actbuf, sf_vec & dvp0, sf_vec & dvp1,
1707  float threshold, int mode)
1708 {
1709  int nacts = 0, gnacts = 0;
1710  double tact, dt2 = 2 * param_globals::dt;
1711  int lsize = vm.lsize();
1712  SF_real ddv0, ddv1, dv, dvdt;
1713  SF_real *c, *p, *pd0, *pd1;
1714 
1715  c = vm.ptr();
1716  p = vmp.ptr();
1717  pd0 = dvp0.ptr();
1718  pd1 = dvp1.ptr();
1719 
1720  for (int i=0; i<lsize; i++ ) {
1721  dv = (c[i]-p[i]);
1722  dvdt = dv/param_globals::dt;
1723  ddv0 = pd1[i]-pd0[i];
1724  ddv1 = dv -pd1[i];
1725 
1726  if (dvdt>=threshold && ddv0>0 && ddv1<0) {
1727  tact = tm-dt2+(ddv0/(ddv0-ddv1))*param_globals::dt;
1728  ibuf [nacts] = i;
1729  actbuf[nacts] = tact;
1730  nacts++;
1731  }
1732  p [i] = c[i];
1733  pd0[i] = pd1[i];
1734  pd1[i] = dv;
1735  }
1736 
1737  vm .release_ptr(c);
1738  vmp .release_ptr(p);
1739  dvp0.release_ptr(pd0);
1740  dvp1.release_ptr(pd1);
1741 
1742  return nacts;
1743 }
1744 
1749 {
1751  assert(sc != NULL);
1752 
1753  bool forward = true;
1754 
1755  for (size_t i = 0; i < acts.size(); i++) {
1756  if (is_init(acts[i].tm)) {
1757  (*sc)(*acts[i].tm, forward);
1758  acts[i].tm->write_ascii(acts[i].fname, false);
1759  }
1760  }
1761 }
1762 
1763 void Electrics::prepace() {
1764  // Default stimulation parameters for the single cells
1765  // To be eventually be replaced by user input or info from bench
1766  float stimdur = 1.;
1767  float stimstr = 60.;
1768 
1769  log_msg(NULL, 0, 0, "Using activation times from file %s to distribute prepacing states\n",
1770  param_globals::prepacing_lats);
1771  log_msg(NULL, 1, 0, "Assuming stimulus strength %f uA/uF with duration %f ms for prepacing\n",
1772  stimstr, stimdur);
1773 
1775  limpet::MULTI_IF* miif = elec->ion.miif;
1776 
1777  const sf_mesh & mesh = get_mesh(intra_elec_msh);
1778  sf_vec* read_lats; SF::init_vector(&read_lats, mesh, 1, sf_vec::algebraic);
1779 
1780  // read in the global distributed vector of all activation times
1781  set_dir(INPUT);
1782  size_t numread = read_lats->read_ascii(param_globals::prepacing_lats);
1783  if (numread == 0) {
1784  log_msg(NULL, 5, 0, "Failed reading required LATs! Skipping prepacing!");
1785  return;
1786  }
1787  set_dir(OUTPUT);
1788 
1790  assert(sc != NULL);
1791 
1792  // permute in-place to petsc permutation
1793  bool forward = false;
1794  (*sc)(*read_lats, forward);
1795 
1796  // take care of negative LAT values
1797  {
1798  PetscReal* lp = read_lats->ptr();
1799  for(int i=0; i<read_lats->lsize(); i++)
1800  if(lp[i] < 0.0) lp[i] = param_globals::tend + 10.0;
1801 
1802  read_lats->release_ptr(lp);
1803  }
1804 
1805  // make LATs relative and figure out the first LAT
1806  // so we know when to save state of each point
1807  SF_real LATmin = read_lats->min();
1808 
1809  if(LATmin < 0.0) {
1810  log_msg(0,3,0, "LAT data is not complete. Skipping prepacing.");
1811  return;
1812  }
1813 
1814  SF_real offset = floor(LATmin / param_globals::prepacing_bcl) * param_globals::prepacing_bcl;
1815  SF_real last_tm = param_globals::prepacing_bcl * param_globals::prepacing_beats;
1816 
1817  // compute read_lats[i] = last_tm - (read_lats[i] - offset)
1818  *read_lats += -offset;
1819  *read_lats *= -1.;
1820  *read_lats += last_tm;
1821 
1822  miif->getRealData();
1823  SF_real *save_tm = read_lats->ptr();
1824  SF_real *vm = miif->gdata[limpet::Vm]->ptr();
1825 
1826  for (int ii = 0; ii < miif->N_IIF; ii++) {
1827  if (!miif->N_Nodes[ii]) continue;
1828 
1829  // create sorted array of save times.
1830  SF::vector<SF::mixed_tuple<double,int>> sorted_save(miif->N_Nodes[ii]); // v1 = time, v2 = index
1831  for (int kk = 0; kk < miif->N_Nodes[ii]; kk++) {
1832  sorted_save[kk].v1 = save_tm[miif->NodeLists[ii][kk]];
1833  sorted_save[kk].v2 = kk;
1834  }
1835  std::sort(sorted_save.begin(), sorted_save.end());
1836 
1837  size_t lastidx = sorted_save.size() - 1;
1838  int paced = sorted_save[lastidx].v2; // IMP index of latest node
1839  int csav = 0;
1840 
1841  for (double t = 0; t < sorted_save[lastidx].v1; t += param_globals::dt) {
1842  if (fmod(t, param_globals::prepacing_bcl) < stimdur &&
1843  t < param_globals::prepacing_bcl * param_globals::prepacing_beats - 1)
1844  miif->ldata[ii][limpet::Vm][paced] += stimstr * param_globals::dt;
1845 
1846  compute_IIF(*miif->IIF[ii], miif->ldata[ii], paced);
1847 
1848  // Vm update always happens now outside of the imp
1849  miif->ldata[ii][limpet::Vm][paced] -= miif->ldata[ii][limpet::Iion][paced] * param_globals::dt;
1850  vm[miif->NodeLists[ii][paced]] = miif->ldata[ii][limpet::Vm][paced];
1851 
1852  while (csav < miif->N_Nodes[ii] - 1 && t >= sorted_save[csav].v1)
1853  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
1854  }
1855 
1856  // get nodes which may be tied for last
1857  while (csav < miif->N_Nodes[ii] - 1)
1858  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
1859  // ipdate global Vm vector
1860  for (int k = 0; k < miif->N_Nodes[ii]; k++) vm[miif->NodeLists[ii][k]] = miif->ldata[ii][limpet::Vm][k];
1861  }
1862 
1863  read_lats->release_ptr(save_tm);
1864  miif->gdata[limpet::Vm]->release_ptr(vm);
1865  miif->releaseRealData();
1866 }
1867 
1868 
1870 {
1871  if (!rcv.pts.size())
1872  return;
1873 
1874  int rank = get_rank();
1875  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys)); assert(elec);
1876  sf_mat & Ki = *elec->parab_solver.rhs_parab;
1877 
1878  const sf_mesh & imesh = get_mesh(intra_elec_msh);
1879  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
1880 
1881  SF_int start, end;
1882  vm.get_ownership_range(start, end);
1883 
1884  if(!rcv.Im) {
1885  SF::init_vector(&rcv.Im, &vm);
1886  SF::init_vector(&rcv.dphi, &vm);
1887  }
1888 
1889  SF_int r_start, r_end;
1890  rcv.phie_rec->get_ownership_range(r_start, r_end);
1891 
1892  SF_real *ph_r = rcv.phie_rec->ptr();
1893 
1894  // use minimum distance to ensure r>0
1895  // consistent with the line source approximation, the "cable radius"
1896  // is used as a lower limit for the source-field point distance
1897  float minDist = 2. / param_globals::imp_region[0].cellSurfVolRatio; // radius in um
1898 
1899  Ki.mult(vm, *rcv.Im);
1900  int numpts = rcv.pts.size() / 3;
1901  Point fpt, cpt;
1902 
1903  for (int j=0; j<numpts; j++) {
1904  fpt = rcv.pts.data() + j*3;
1905 
1906  *rcv.dphi = *rcv.Im;
1907  SF_real* dp = rcv.dphi->ptr();
1908 
1909  for (size_t i = 0; i<alg_nod.size(); i++)
1910  {
1911  mesh_int_t loc_nodal_idx = alg_nod[i];
1912  mesh_int_t loc_petsc_idx = local_nodal_to_local_petsc(imesh, rank, loc_nodal_idx);
1913  cpt = imesh.xyz.data()+loc_nodal_idx*3;
1914 
1915  double r = dist(fpt, cpt) + minDist;
1916  dp[loc_petsc_idx] /= r;
1917  }
1918 
1919  rcv.dphi->release_ptr(dp);
1920 
1921  SF_real phi = rcv.dphi->sum() / 4. / M_PI / rcv.gBath;
1922  if ( (j>=r_start) && (j<r_end) )
1923  ph_r[j-r_start] = phi;
1924  }
1925 
1926  rcv.phie_rec->release_ptr(ph_r);
1927 }
1928 
1930 {
1931  int err = 0, rank = get_rank();
1932 
1934  sf_mesh & imesh = get_mesh(intra_elec_msh);
1935  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys)); assert(elec);
1937 
1938  // we close the files of the default electrics if there are any open
1940 
1941  // register output
1942  set_dir(POSTPROC);
1943  igb_output_manager phie_rec_out;
1944  phie_rec_out.register_output(phie_rcv.phie_rec, phie_recv_msh, 1,
1945  param_globals::phie_recovery_file, "mV");
1946 
1947  // Buffer for Vm data
1948  sf_vec* vm = get_data(vm_vec); assert(vm);
1949 
1950  // set up igb header and point fd to start of Vm file
1951  set_dir(OUTPUT);
1952  IGBheader vm_igb;
1953  if(rank == 0) {
1954  FILE_SPEC file = f_open(param_globals::vofile, "r");
1955  if(file != NULL) {
1956  vm_igb.fileptr(file->fd);
1957  vm_igb.read();
1958 
1959  if(vm_igb.x() != vm->gsize()) {
1960  log_msg(0,4,0, "%s error: Vm dimension does not fit to %s file. Aborting recovery! \n",
1961  __func__, param_globals::vofile);
1962  err++;
1963  }
1964 
1965  delete file;
1966  }
1967  else err++;
1968  }
1969 
1970  err = get_global(err, MPI_MAX);
1971 
1972  if(err == 0) {
1973  FILE* fd = static_cast<FILE*>(vm_igb.fileptr());
1974 
1975  // number of data slices
1976  const int num_io = user_globals::tm_manager->timers[iotm_spacedt]->numIOs;
1977 
1978  // scatterers
1980  assert(petsc_to_canonical != NULL);
1981 
1982  // loop over vm slices and recover phie
1983  for(int i=0; i<num_io; i++) {
1984  log_msg(0,0,0, "Step %d / %d", i+1, num_io);
1985  size_t nread = vm->read_binary<float>(fd);
1986 
1987  if(nread != size_t(vm->gsize())) {
1988  log_msg(0,3,0, "%s warning: read incomplete data slice! Aborting!", __func__);
1989  err++;
1990  break;
1991  }
1992 
1993  // permute vm_buff
1994  bool forward = false;
1995  (*petsc_to_canonical)(*vm, forward);
1996 
1997  // do phie computation
1998  recover_phie_std(*vm, phie_rcv);
1999 
2000  phie_rec_out.write_data();
2001  }
2002 
2003  phie_rec_out.close_files_and_cleanup();
2004  }
2005  return err;
2006 }
2007 
2009 {
2010  int rank = get_rank(), size = get_size();
2011  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys)); assert(elec);
2012 
2013  sf_mesh & imesh = get_mesh(intra_elec_msh);
2014  const std::string basename = param_globals::phie_rec_ptf;
2015  SF::vector<mesh_int_t> ptsidx;
2016 
2017  set_dir(INPUT);
2018  SF::read_points(basename, imesh.comm, data.pts, ptsidx);
2019  make_global(data.pts, imesh.comm); // we want all ranks to have all points
2020 
2021  // set up parallel layout of recovery points
2022  SF::vector<mesh_int_t> layout;
2023  layout_from_count(mesh_int_t(ptsidx.size()), layout, imesh.comm);
2024 
2025  // set up petsc_vector for recovered potentials
2026  SF::init_vector(&data.phie_rec, layout[size], layout[rank+1]-layout[rank], 1, sf_vec::algebraic);
2027 
2028  // get conductivty
2030  data.gBath = static_cast<elecMaterial*>(intra_regions[0].material)->BathVal[0];
2031 }
2032 
2034 {
2035  int rank = get_rank();
2036 
2037  assert(param_globals::bidomain == BIDOMAIN);
2038  double t1, t2;
2039  get_time(t1);
2040 
2041  // set up Extracellular tissue
2044  mtype[Electrics::extra_grid].regionIDs, true, "gregion_e");
2045 
2046  // set up a subset of the complete electrical mappings
2047  int dpn = 1;
2049 
2051  // set up Intracellular tissue
2054  mtype[Electrics::intra_grid].regionIDs, true, "gregion_i");
2055 
2058  }
2059 
2060  // set up stimuli
2061  init_stim_info();
2062  stimuli.resize(param_globals::num_stim);
2063 
2064  for(int i=0; i<param_globals::num_stim; i++) {
2065  // construct new stimulus
2066  stimulus & s = stimuli[i];
2067 
2069  s.translate(i);
2070 
2071  s.setup(i);
2072 
2073  if(s.phys.type == V_ex) {
2074  s.pulse.wform = constPulse;
2075  sample_wave_form(s.pulse, i);
2076  }
2077  }
2078 
2079  set_dir(OUTPUT);
2080 
2081  ellip_solver.init();
2083 
2084  if(param_globals::dump2MatLab) {
2085  std::string bsname = param_globals::dump_basename;
2086  std::string fn;
2087 
2088  set_dir(OUTPUT);
2089  fn = bsname + "_Kie.bin";
2090  ellip_solver.phie_mat->write(fn.c_str());
2091  }
2092 
2093  // the laplace solver executes only once, thus we need a singlestep timer
2094  timer_idx = user_globals::tm_manager->add_singlestep_timer(0.0, 0.0, "laplace trigger", nullptr);
2095 
2096  SF::vector<mesh_int_t>* restr_i = NULL;
2097  SF::vector<mesh_int_t>* restr_e = NULL;
2098 
2099  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
2100  restr_e, param_globals::num_io_nodes > 0);
2101  if(param_globals::dataout_e)
2102  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
2103 
2105  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
2106  restr_i, param_globals::num_io_nodes > 0);
2107  if(param_globals::dataout_i)
2108  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
2109  }
2110 
2111  this->initialize_time += timing(t2, t1);
2112 
2113  this->compute_step();
2114 }
2115 
2117 {}
2118 
2120 {
2121  double t0, t1, dur;
2122  log_msg(0,0,0, "Solving Laplace problem ..");
2123 
2124  get_time(t0);
2126  dur = timing(t1,t0);
2127 
2128  log_msg(0,0,0, "Done in %.5f seconds.", dur);
2129 
2131  this->compute_time += timing(t1, t0);
2132  set_dir(OUTPUT);
2135 
2136  // we clear the elliptic matrices and solver to save some memory when computing
2137  // the laplace solution on-the-fly
2138  delete ellip_solver.mass_e; ellip_solver.mass_e = NULL;
2139  delete ellip_solver.phie_mat; ellip_solver.phie_mat = NULL;
2141 }
2142 
2144 {}
2145 
2146 double Laplace::timer_val(const int timer_id)
2147 {
2148  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2149  double val = 0.0;
2150 
2151  if(sidx != -1) stimuli[sidx].value(val);
2152  else val = std::nan("NaN");
2153  return val;
2154 }
2155 
2156 std::string Laplace::timer_unit(const int timer_id)
2157 {
2158  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2159  std::string s_unit;
2160  if(sidx != -1) s_unit = stimuli[sidx].pulse.wave.f_unit;
2161  return s_unit;
2162 }
2163 
2165  sf_mat & mass_i,
2166  sf_mat & mass_e,
2167  limpet::MULTI_IF *miif,
2168  FILE_SPEC logger)
2169 {
2171 
2172  for(stimulus & s : stimuli) {
2173  if(is_current(s.phys.type) && s.phys.total_current) {
2174  // extracellular current injection
2175  if (s.phys.type == I_ex) {
2176  // compute affected volume in um^3
2177  SF_real vol = get_volume_from_nodes(mass_e, s.electrode.vertices);
2178 
2179  // s->strength holds the total current in uA, compute current density
2180  // in uA/cm^3
2181  float scale = 1.e12/vol;
2182 
2183  s.pulse.strength *= scale;
2184 
2185  log_msg(logger,0,ECHO,
2186  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^3\n",
2187  s.name.c_str(), s.idx, s.pulse.strength*1.e12);
2188  }
2189  else if (s.phys.type == I_tm) {
2190  // compute affected volume in um^3
2191  SF_real vol = get_volume_from_nodes(mass_i, s.electrode.vertices);
2192  const sf_mesh & imesh = get_mesh(intra_elec_msh);
2193  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
2194 
2195  if(alg_idx_map.size() == 0) {
2196  mesh_int_t lidx = 0;
2197  for(mesh_int_t n : alg_nod) {
2198  alg_idx_map[n] = lidx;
2199  lidx++;
2200  }
2201  }
2202 
2203  SF_real surf = 0.0;
2204  for(mesh_int_t n : s.electrode.vertices) {
2205  if(alg_idx_map.count(n)) {
2206  mesh_int_t lidx = alg_idx_map[n];
2207  int r = miif->IIFmask[lidx];
2208  // surf = vol*beta [1/um], surf is in [um^2]
2209  surf = vol * miif->IIF[r]->cgeom().SVratio * param_globals::imp_region[r].volFrac;
2210  //convert to cm^2
2211  surf /= 1.e8;
2212  break;
2213  }
2214  }
2215  surf = get_global(surf, MPI_MAX, PETSC_COMM_WORLD);
2216 
2217  // scale surface density now to result in correct total current
2218  s.pulse.strength /= surf;
2219  log_msg(logger, 0, ECHO,
2220  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^2\n",
2221  s.name.c_str(), s.idx, s.pulse.strength);
2222  }
2223  }
2224  }
2225 }
2226 
2227 
2228 
2229 } // namespace opencarp
SF::scattering * get_permutation(const int mesh_id, const int perm_id, const int dpn)
Get the PETSC to canonical permutation scattering for a given mesh and number of dpn.
SF_real gBath
Bath conductivity.
Definition: electrics.h:234
bool have_dbc_stims(const SF::vector< stimulus > &stimuli)
return wheter any stimuli require dirichlet boundary conditions
Definition: electrics.cc:826
#define DATAOUT_NONE
Definition: sim_utils.h:56
int ** NodeLists
local partitioned node lists for each IMP stored
Definition: MULTI_ION_IF.h:201
stim_electrode electrode
electrode geometry
Definition: stimulate.h:174
dbc_manager * dbc
dbcs require a dbc manager
Definition: electrics.h:58
void init_solver(SF::abstract_linear_solver< T, S > **sol)
Definition: SF_init.h:220
igb_output_manager output_manager
class handling the igb output
Definition: electrics.h:270
gvec_data gvec
datastruct holding global IMP state variable output
Definition: electrics.h:267
int * subregtags
FEM tags forming this region.
Definition: fem_types.h:97
char * regname
name of region
Definition: fem_types.h:94
The mesh storage class. It contains both element and vertex data.
Definition: SF_container.h:383
void log_stats(double tm, bool cflg)
Definition: timers.cc:91
virtual void release_ptr(S *&p)=0
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:452
cond_t g
rule to build conductivity tensor
Definition: fem_types.h:64
stim_pulse pulse
stimulus wave form
Definition: stimulate.h:171
char * dupstr(const char *old_str)
Definition: basics.cc:44
description of materal properties in a mesh
Definition: fem_types.h:110
void init(sf_vec &vm, sf_vec &phie, int offset)
initializes all datastructs after electric solver setup
Definition: electrics.cc:1469
physMaterial * material
material parameter description
Definition: fem_types.h:98
sf_vec * Irhs
weighted transmembrane currents
Definition: electrics.h:105
void read_indices(SF::vector< T > &idx, const std::string filename, const hashmap::unordered_map< mesh_int_t, mesh_int_t > &dd_map, MPI_Comm comm)
Read indices from a file.
Definition: fem_utils.h:120
const char * get_tsav_ext(double time)
Definition: electrics.cc:837
void setup_phie_recovery_data(phie_recovery_data &data)
Definition: electrics.cc:2008
void local_petsc_to_nodal_mapping(const meshdata< T, S > &mesh, index_mapping< T > &petsc_to_nodal)
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:392
void recover_phie_std(sf_vec &vm, phie_recovery_data &rcv)
Definition: electrics.cc:1869
SF::vector< mesh_real_t > pts
The phie recovery locations.
Definition: electrics.h:230
void layout_from_count(const T count, vector< T > &layout, MPI_Comm comm)
Definition: SF_network.h:201
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
PETSc numbering of nodes.
Definition: SF_container.h:191
cond_t
description of electrical tissue properties
Definition: fem_types.h:42
#define PETSC_TO_CANONICAL
Permute algebraic data from PETSC to canonical ordering.
Definition: sf_interface.h:74
physMat_t material_type
ID of physics material.
Definition: fem_types.h:53
void solve(sf_mat &Ki, sf_vec &Vmv, sf_vec &tmp_i)
Definition: electrics.cc:1020
#define BIDOMAIN
Definition: sim_utils.h:143
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:417
int calls
# calls for this interval, this is incremented externally
Definition: timers.h:35
IIF_Mask_t * IIFmask
region for each node
Definition: MULTI_ION_IF.h:214
elliptic_solver ellip_solver
Solver for the elliptic bidomain equation.
Definition: electrics.h:259
lin_solver_stats stats
Definition: electrics.h:55
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:721
void solve(sf_vec &phie_i)
Definition: electrics.cc:1238
void get_kappa(sf_vec &kappa, IMPregion *ir, limpet::MULTI_IF &miif, double k)
compute the vector
Definition: electrics.cc:779
void initialize()
Definition: ionics.cc:60
virtual T lsize() const =0
FILE_SPEC logger
The logger of the physic, each physic should have one.
Definition: physics_types.h:64
timer_manager * tm_manager
a manager for the various physics timers
Definition: main.cc:52
double ExVal[3]
extracellular conductivity eigenvalues
Definition: fem_types.h:62
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:2156
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:189
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
bool is_current(stim_t type)
uses current as stimulation
Definition: stimulate.cc:71
void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid, SF::vector< mesh_int_t > *&restr, bool async=false)
Definition: electrics.cc:592
const char * name
The name of the physic, each physic should have one.
Definition: physics_types.h:62
sf_mat * lhs_parab
lhs matrix (CN) to solve parabolic
Definition: electrics.h:111
int all
determine all or first instants of activation
Definition: electrics.h:168
int add_eq_timer(double istart, double iend, int ntrig, double iintv, double idur, const char *iname, const char *poolname=nullptr)
Add a equidistant step timer to the array of timers.
Definition: timer_utils.cc:78
SF::vector< mesh_int_t > vertices
Definition: stimulate.h:158
size_t read_ascii(FILE *fd)
double time_step
global reference time step
Definition: timer_utils.h:78
void savequit()
save state and quit simulator
Definition: sim_utils.cc:1602
void constant_total_stimulus_current(SF::vector< stimulus > &stimuli, sf_mat &mass_i, sf_mat &mass_e, limpet::MULTI_IF *miif, FILE_SPEC logger)
Definition: electrics.cc:2164
const char * get_mesh_type_name(mesh_t t)
get a char* to the name of a mesh type
Definition: sf_interface.cc:46
#define ECHO
Definition: basics.h:308
vector< S > xyz
node cooridnates
Definition: SF_container.h:415
T * data()
Pointer to the vector&#39;s start.
Definition: SF_vector.h:91
vector< T > con
Definition: SF_container.h:400
sf_vec * tmp_i1
scratch vector for i-grid
Definition: electrics.h:103
mesh_t
The enum identifying the different meshes we might want to load.
Definition: sf_interface.h:58
T & push_back(T val)
Definition: SF_vector.h:283
bool trigger(int ID) const
Definition: timer_utils.h:166
SF_real get_volume_from_nodes(sf_mat &mass, SF::vector< mesh_int_t > &local_idx)
Definition: fem_utils.cc:202
double InVal[3]
intracellular conductivity eigenvalues
Definition: fem_types.h:61
int N_IIF
how many different IIF&#39;s
Definition: MULTI_ION_IF.h:211
void init_stim_info(void)
Definition: stimulate.cc:49
void assemble_lumped_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
void close_files_and_cleanup()
close file descriptors
Definition: sim_utils.cc:1483
PotType measurand
quantity being monitored
Definition: electrics.h:181
generic_timing_stats IO_stats
Definition: electrics.h:275
#define DATAOUT_SURF
Definition: sim_utils.h:57
void init_logger(const char *filename)
Definition: timers.cc:75
virtual S sum() const =0
void init_vector(SF::abstract_vector< T, S > **vec)
Definition: SF_init.h:99
void rebuild_mass(FILE_SPEC logger)
Definition: electrics.cc:983
void region_mask(mesh_t meshspec, SF::vector< RegionSpecs > &regspec, SF::vector< int > &regionIDs, bool mask_elem, const char *reglist)
classify elements/points as belonging to a region
Definition: ionics.cc:356
std::vector< IonIfBase * > IIF
array of IIF&#39;s
Definition: MULTI_ION_IF.h:202
#define ELEM_PETSC_TO_CANONICAL
Permute algebraic element data from PETSC to canonical ordering.
Definition: sf_interface.h:76
int mode
toggle mode from standard to reverse
Definition: electrics.h:167
bool value(double &v) const
Get the current value if the stimulus is active.
Definition: stimulate.cc:442
char * get_file_dir(const char *file)
Definition: sim_utils.cc:1231
bool is_init(const abstract_vector< T, S > *v)
sf_mat * phie_mat
lhs matrix to solve elliptic
Definition: electrics.h:49
const T * end() const
Pointer to the vector&#39;s end.
Definition: SF_vector.h:128
waveform_t wform
wave form of stimulus
Definition: stimulate.h:100
parabolic_solver parab_solver
Solver for the parabolic bidomain equation.
Definition: electrics.h:261
void compute_IIF(limpet::IonIfBase &pIF, limpet::GlobalData_t **impdata, int n)
Definition: ionics.cc:458
Tissue level electrics, main Electrics physics class.
SF::scattering * get_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Get a scattering from the global scatter registry.
virtual S * ptr()=0
bool using_legacy_stimuli
flag storing whether legacy stimuli are used
Definition: main.cc:58
virtual void mult(const abstract_vector< T, S > &x, abstract_vector< T, S > &b) const =0
event detection data structures
Definition: electrics.h:164
void dump_state(char *, float, opencarp::mesh_t gid, bool, unsigned int)
sf_vec * Vmv
global Vm vector
Definition: electrics.h:99
double tot_time
total time, this is incremented externally
Definition: timers.h:37
int mesh_int_t
Definition: SF_container.h:46
virtual void write(const char *filename) const =0
void fileptr(gzFile f)
Definition: IGBheader.cc:336
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:190
void assemble_sv_gvec(gvec_data &gvecs, limpet::MULTI_IF *miif)
Definition: ionics.cc:631
double time
current time
Definition: timer_utils.h:76
int max_nodal_edgecount(const meshdata< T, S > &mesh)
Compute the maximum number of node-to-node edges for a mesh.
Definition: SF_container.h:596
region based variations of arbitrary material parameters
Definition: fem_types.h:93
GlobalData_t *** ldata
data local to each IMP
Definition: MULTI_ION_IF.h:205
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
int check_acts(double tm)
check activations at sim time tm
Definition: electrics.cc:1584
double BathVal[3]
bath conductivity eigenvalues
Definition: fem_types.h:63
virtual void set(const vector< T > &idx, const vector< S > &vals, const bool additive=false)=0
std::string name
label stimulus
Definition: stimulate.h:170
ActMethod method
method to check whether activation occured
Definition: electrics.h:165
int timer_idx
the timer index received from the timer manager
Definition: physics_types.h:66
void write_data()
write registered data to disk
Definition: sim_utils.cc:1436
long d_time
current time instance index
Definition: timer_utils.h:77
sf_sol * lin_solver
petsc or ginkgo lin_solver
Definition: electrics.h:52
sf_mesh & get_mesh(const mesh_t gt)
Get a mesh by specifying the gridID.
Definition: sf_interface.cc:33
void register_output_sync(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Definition: sim_utils.cc:1272
float threshold
threshold for detection of activation
Definition: electrics.h:166
SF::scattering * register_permutation(const int mesh_id, const int perm_id, const int dpn)
Register a permutation between two orderings for a mesh.
size_t read_binary(FILE *fd)
int idx
index in global input stimulus array
Definition: stimulate.h:169
void init_matrix(SF::abstract_matrix< T, S > **mat)
Definition: SF_init.h:199
manager for dirichlet boundary conditions
Definition: stimulate.h:198
#define ALG_TO_NODAL
Scatter algebraic to nodal.
Definition: sf_interface.h:72
size_t size() const
Definition: hashmap.hpp:687
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:2146
void destroy()
Currently we only need to close the file logger.
Definition: electrics.cc:351
sf_vec * phie
phi_e
Definition: electrics.h:43
#define PSEUDO_BIDM
Definition: sim_utils.h:144
void set_elec_tissue_properties(MaterialType *mtype, Electrics::grid_t g, FILE_SPEC logger)
Fill the RegionSpec of an electrics grid with the associated inputs from the param parameters...
Definition: electrics.cc:100
void set_cond_type(MaterialType &m, cond_t type)
Definition: electrics.cc:803
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
SF::vector< double > el_scale
optionally provided per-element params scale
Definition: fem_types.h:116
void get_time(double &tm)
Definition: basics.h:436
Basic_physic * get_physics(physic_t p, bool error_if_missing)
Convinience function to get a physics.
Definition: sim_utils.cc:818
#define EXP_POSTPROCESS
Definition: sim_utils.h:160
int write_trace()
write traces to file
Definition: signals.h:701
bool dbc_update()
check if dbcs have updated
Definition: stimulate.cc:632
void initialize()
Initialize the Electrics.
Definition: electrics.cc:36
int timer_id
timer for stimulus
Definition: stimulate.h:127
void open_trace(MULTI_IF *MIIF, int n_traceNodes, int *traceNodes, int *label, opencarp::sf_mesh *imesh)
Set up ionic model traces at some global node numbers.
T get_global(T in, MPI_Op OP, MPI_Comm comm=PETSC_COMM_WORLD)
Do a global reduction on a variable.
Definition: basics.h:233
int * N_Nodes
#nodes for each IMP
Definition: MULTI_ION_IF.h:200
void destroy()
Definition: ionics.cc:52
size_t root_count_ascii_lines(std::string file, MPI_Comm comm)
count the lines in a ascii file
void print_act_log(FILE_SPEC logger, const SF::vector< Activation > &acts, int idx)
Definition: electrics.cc:1444
int check_quiescence(double tm, double dt)
check for quiescence
Definition: electrics.cc:1639
V timing(V &t2, const V &t1)
Definition: basics.h:448
void compute_restr_idx(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:522
sig::time_trace wave
wave form of stimulus pulse
Definition: stimulate.h:102
void setup(int idx)
Setup from a param stimulus index.
Definition: stimulate.cc:163
SF::vector< stimulus > stimuli
the electrical stimuli
Definition: electrics.h:252
sf_vec * dphi
Auxiliary vectors.
Definition: electrics.h:233
void rebuild_matrices(MaterialType *mtype, limpet::MULTI_IF &miif, FILE_SPEC logger)
Definition: electrics.cc:1135
MaterialType mtype[2]
the material types of intra_grid and extra_grid grids.
Definition: electrics.h:250
int regID
region ID
Definition: fem_types.h:95
void update_cwd()
save the current working directory to curdir so that we can switch back to it if needed.
Definition: sim_utils.cc:441
void assemble_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
Generalized matrix assembly.
File descriptor struct.
Definition: basics.h:133
opencarp::sf_vec * gdata[NUM_IMP_DATA_TYPES]
data used by all IMPs
Definition: MULTI_ION_IF.h:216
int set_dir(IO_t dest)
Definition: sim_utils.cc:446
void dup_IMP_node_state(IonIfBase &IF, int from, int to, GlobalData_t **localdata)
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
sf_mat * mass_e
mass matrix for RHS elliptic calc
Definition: electrics.h:48
int read(bool quiet=false)
Definition: IGBheader.cc:761
sf_vec * phie_rec
The phie recovery output vector buffer.
Definition: electrics.h:231
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void log_stats(double tm, bool cflg)
Definition: timers.cc:25
sf_vec * phiesrc
I_e.
Definition: electrics.h:45
void rebuild_stiffness(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:923
int nsubregs
#subregions forming this region
Definition: fem_types.h:96
Electrical stimulation functions.
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
Definition: basics.cc:72
#define NONL
Definition: basics.h:312
void init_sv_gvec(gvec_data &GVs, limpet::MULTI_IF *miif, sf_vec &tmpl, igb_output_manager &output_manager)
Definition: ionics.cc:560
#define MONODOMAIN
Definition: sim_utils.h:142
bool mesh_is_registered(const mesh_t gt)
check wheter a SF mesh is set
Definition: sf_interface.cc:59
stim_t type
type of stimulus
Definition: stimulate.h:142
#define M_PI
Definition: ION_IF.h:51
void sample_wave_form(stim_pulse &sp, int idx)
sample a signal given in analytic form
Definition: stimulate.cc:365
lin_solver_stats stats
Definition: electrics.h:118
#define DATAOUT_VTX
Definition: sim_utils.h:59
virtual void get_ownership_range(T &start, T &stop) const =0
void output_initial_activations()
output one nodal vector of initial activation time
Definition: electrics.cc:1748
void dump_trace(MULTI_IF *MIIF, limpet::Real time)
#define DATAOUT_VOL
Definition: sim_utils.h:58
grid_t
An electrics grid identifier to distinguish between intra and extra grids.
Definition: electrics.h:246
int numNode
local number of nodes
Definition: MULTI_ION_IF.h:210
const T * begin() const
Pointer to the vector&#39;s start.
Definition: SF_vector.h:116
void f_close(FILE_SPEC &f)
Close a FILE_SPEC.
Definition: basics.cc:162
std::vector< base_timer * > timers
vector containing individual timers
Definition: timer_utils.h:84
void compute_step()
Definition: ionics.cc:35
limpet::MULTI_IF * miif
Definition: ionics.h:66
const meshdata< mesh_int_t, mesh_real_t > * mesh
the connected mesh
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:737
void balance_electrode(SF::vector< stimulus > &stimuli, int balance_from, int balance_to)
Definition: electrics.cc:366
sf_mat * mass_i
lumped for parabolic problem
Definition: electrics.h:109
sf_vec * phie_i
phi_e on intracellular grid
Definition: electrics.h:44
sf_vec * IIon
ionic currents
Definition: electrics.h:98
V dist(const vec3< V > &p1, const vec3< V > &p2)
Definition: vect.h:114
void register_output(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Register a data vector for output.
Definition: sim_utils.cc:1403
LAT_detector lat
the activation time detector
Definition: electrics.h:264
void assign(InputIterator s, InputIterator e)
Assign a memory range.
Definition: SF_vector.h:161
int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
Definition: electrics.cc:1538
std::int32_t SF_int
Use the general std::int32_t as int type.
Definition: SF_globals.h:37
void rebuild_matrices(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:912
double strength
strength of stimulus
Definition: stimulate.h:98
size_t g_numelem
global number of elements
Definition: SF_container.h:386
stim_protocol ptcl
applied stimulation protocol used
Definition: stimulate.h:172
double SF_real
Use the general double as real type.
Definition: SF_globals.h:38
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
void translate(int id)
convert legacy definitions to new format
Definition: stimulate.cc:100
stim_physics phys
physics of stimulus
Definition: stimulate.h:173
void apply_stim_to_vector(const stimulus &s, sf_vec &vec, bool add)
Definition: electrics.cc:427
virtual void get(const vector< T > &idx, S *out)=0
SF::vector< RegionSpecs > regions
array with region params
Definition: fem_types.h:115
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
sf_vec * get_data(datavec_t d)
Retrieve a petsc data vector from the data registry.
Definition: sim_utils.cc:834
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:290
SF::scattering * register_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Register a scattering between to grids, or between algebraic and nodal representation of data on the ...
Definition: sf_interface.cc:65
int stimidx_from_timeridx(const SF::vector< stimulus > &stimuli, const int timer_id)
determine link between timer and stimulus
Definition: electrics.cc:751
sf_mat * rhs_parab
rhs matrix to solve parabolic
Definition: electrics.h:110
bool is_dbc(stim_t type)
whether stimulus is a dirichlet type. implies boundary conditions on matrix
Definition: stimulate.cc:76
void backward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Backward scattering.
phie_recovery_data phie_rcv
struct holding helper data for phie recovery
Definition: electrics.h:273
centralize time managment and output triggering
Definition: timer_utils.h:73
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:276
void compute_restr_idx_async(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:555
int postproc_recover_phie()
Definition: electrics.cc:1929
T local_nodal_to_local_petsc(const meshdata< T, S > &mesh, int rank, T local_nodal)
virtual T gsize() const =0
LAT_detector()
constructor, sets up basic datastructs from global_params
Definition: electrics.cc:1341
#define UM2_to_CM2
convert um^2 to cm^2
Definition: physics_types.h:35
long d_end
final index in multiples of dt
Definition: timer_utils.h:82
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.
Definition: basics.cc:135
void read_points(const std::string basename, const MPI_Comm comm, vector< S > &pts, vector< T > &ptsidx)
Read the points and insert them into a list of meshes.
Definition: SF_mesh_io.h:807
Container for a PETSc VecScatter.
int add_singlestep_timer(double tg, double idur, const char *iname, const char *poolname=nullptr)
Definition: timer_utils.h:143