openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
electrics.cc
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
26 #include "electrics.h"
27 #include "petsc_utils.h"
28 #include "timers.h"
29 #include "stimulate.h"
30 #include "electric_integrators.h"
31 
32 #include "SF_init.h" // for SF::init_xxx()
33 
34 #ifdef WITH_CALIPER
35 #include "caliper/cali.h"
36 #else
37 #include "caliper_hooks.h"
38 #endif
39 
40 
41 namespace opencarp {
42 
44 {
46  double t1, t2;
47  get_time(t1);
48 
49  set_dir(OUTPUT);
50 
51  // open logger
52  logger = f_open("electrics.log", param_globals::experiment != 4 ? "w" : "r");
53 
54  // setup mappings between extra and intra grids, algebraic and nodal,
55  // and between PETSc and canonical orderings
56  setup_mappings();
57 
58  // the ionic physics is currently triggered from inside the Electrics to have tighter
59  // control over it
60  ion.logger = logger;
61  ion.initialize();
62 
63  // set up Intracellular tissue
65  region_mask(intra_elec_msh, mtype[intra_grid].regions, mtype[intra_grid].regionIDs, true, "gregion_i");
66 
67  if (param_globals::bidomain || param_globals::extracell_monodomain_stim) {
68  // set up Extracellular tissue
70  region_mask(extra_elec_msh, mtype[extra_grid].regions, mtype[extra_grid].regionIDs, true, "gregion_e");
71  }
72 
73  // add electrics timer for time stepping, add to time stepper tool (TS)
74  double global_time = user_globals::tm_manager->time;
75  timer_idx = user_globals::tm_manager->add_eq_timer(global_time, param_globals::tend, 0,
76  param_globals::dt, 0, "elec::ref_dt", "TS");
77 
78  // electrics stimuli setup
79  setup_stimuli();
80 
81  // set up the linear equation systems. this needs to happen after the stimuli have been
82  // set up, since we need boundary condition info
83  setup_solvers();
84 
85  // the next setup steps require the solvers to be set up, since they use the matrices
86  // generated by those
87 
88  // balance electrodes, we may need the extracellular mass matrix
89  balance_electrodes();
90  // total current scaling
92  // initialize the LATs detector
94 
95  // initialize phie recovery data
96  if(strlen(param_globals::phie_rec_ptf) > 0)
98 
99  // prepare the electrics output. we skip it if we do post-processing
100  if(param_globals::experiment != EXP_POSTPROCESS)
101  setup_output();
102 
103  if (param_globals::prepacing_bcl > 0)
104  prepace();
105  this->initialize_time += timing(t2, t1);
106 }
107 
109 {
110  MaterialType *m = mtype+g;
111 
112  // initialize random conductivity fluctuation structure with PrM values
113  m->regions.resize(param_globals::num_gregions);
114 
115  const char* grid_name = g == Electrics::intra_grid ? "intracellular" : "extracellular";
116  log_msg(logger, 0, 0, "Setting up %s tissue properties for %d regions ..", grid_name,
117  param_globals::num_gregions);
118 
119  char buf[64];
120  RegionSpecs* reg = m->regions.data();
121 
122  for (size_t i=0; i<m->regions.size(); i++, reg++) {
123  if(!strcmp(param_globals::gregion[i].name, "")) {
124  snprintf(buf, sizeof buf, ", gregion_%d", int(i));
125  param_globals::gregion[i].name = dupstr(buf);
126  }
127 
128  reg->regname = strdup(param_globals::gregion[i].name);
129  reg->regID = i;
130  reg->nsubregs = param_globals::gregion[i].num_IDs;
131  if(!reg->nsubregs)
132  reg->subregtags = NULL;
133  else {
134  reg->subregtags = new int[reg->nsubregs];
135  for (int j=0;j<reg->nsubregs;j++) {
136  reg->subregtags[j] = param_globals::gregion[i].ID[j];
137  if(reg->subregtags[j]==-1)
138  log_msg(NULL,3,ECHO, "Warning: not all %u IDs provided for gregion[%u]!\n", reg->nsubregs, i);
139  }
140  }
141 
142  // describe material in given region
143  elecMaterial *emat = new elecMaterial();
144  emat->material_type = ElecMat;
145 
146  emat->InVal[0] = param_globals::gregion[i].g_il;
147  emat->InVal[1] = param_globals::gregion[i].g_it;
148  emat->InVal[2] = param_globals::gregion[i].g_in;
149 
150  emat->ExVal[0] = param_globals::gregion[i].g_el;
151  emat->ExVal[1] = param_globals::gregion[i].g_et;
152  emat->ExVal[2] = param_globals::gregion[i].g_en;
153 
154  emat->BathVal[0] = param_globals::gregion[i].g_bath;
155  emat->BathVal[1] = param_globals::gregion[i].g_bath;
156  emat->BathVal[2] = param_globals::gregion[i].g_bath;
157 
158  // convert units from S/m -> mS/um
159  for (int j=0; j<3; j++) {
160  emat->InVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
161  emat->ExVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
162  emat->BathVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
163  }
164  reg->material = emat;
165  }
166 
167  if((g == Electrics::intra_grid && strlen(param_globals::gi_scale_vec)) ||
168  (g == Electrics::extra_grid && strlen(param_globals::ge_scale_vec)) )
169  {
171  sf_mesh & mesh = get_mesh(mt);
172  const char* file = g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec;
173 
174  size_t num_file_entries = SF::root_count_ascii_lines(file, mesh.comm);
175 
176  if(num_file_entries != mesh.g_numelem)
177  log_msg(0,4,0, "%s warning: number of %s conductivity scaling entries does not match number of elements!",
178  __func__, get_mesh_type_name(mt));
179 
180  // set up parallel element vector and read data
181  sf_vec *escale;
182  SF::init_vector(&escale, get_mesh(mt), 1, sf_vec::elemwise);
183  escale->read_ascii(g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec);
184 
185  if(get_size() > 1) {
186  // set up element vector permutation and permute
187  if(get_permutation(mt, ELEM_PETSC_TO_CANONICAL, 1) == NULL) {
189  }
191  sc(*escale, false);
192  }
193 
194  // copy data into SF::vector
195  SF_real* p = escale->ptr();
196  m->el_scale.assign(p, p + escale->lsize());
197  escale->release_ptr(p);
198  }
199 }
200 
201 void Electrics::setup_mappings()
202 {
204  bool intra_exits = mesh_is_registered(intra_elec_msh), extra_exists = mesh_is_registered(extra_elec_msh);
205  assert(intra_exits);
206  const int dpn = 1;
207 
208  // It may be that another physic (e.g. ionic models) has already computed the intracellular mappings,
209  // thus we first test their existence
210  if(get_scattering(intra_elec_msh, ALG_TO_NODAL, dpn) == NULL) {
211  log_msg(logger, 0, 0, "%s: Setting up intracellular algebraic-to-nodal scattering.", __func__);
213  }
215  log_msg(logger, 0, 0, "%s: Setting up intracellular PETSc to canonical permutation.", __func__);
217  }
218 
219  // extracellular mappings
220  if(extra_exists) {
221  log_msg(logger, 0, 0, "%s: Setting up extracellular algebraic-to-nodal scattering.", __func__);
223  log_msg(logger, 0, 0, "%s: Setting up extracellular PETSc to canonical permutation.", __func__);
225  log_msg(logger, 0, 0, "%s: Setting up intra-to-extra scattering.", __func__);
227  }
228 
229  bool check_i2e = false;
230  if(check_i2e && extra_exists) {
231  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
232  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
233  int rank = get_rank();
234 
236 
237  const SF::vector<mesh_int_t> & intra_alg_nod = intra_mesh.pl.algebraic_nodes();
238  const SF::vector<mesh_int_t> & extra_alg_nod = extra_mesh.pl.algebraic_nodes();
239  const SF::vector<mesh_int_t> & extra_petsc_nbr = extra_mesh.get_numbering(SF::NBR_PETSC);
240  const SF::vector<mesh_int_t> & intra_ref_nbr = intra_mesh.get_numbering(SF::NBR_REF);
241  const SF::vector<mesh_int_t> & extra_ref_nbr = extra_mesh.get_numbering(SF::NBR_REF);
242 
243  // TODO(init) : delete these three at the end of this section?
244  sf_vec *intra_testvec; SF::init_vector(&intra_testvec, intra_mesh, 1, sf_vec::algebraic);
245  sf_vec *extra_testvec; SF::init_vector(&extra_testvec, extra_mesh, 1, sf_vec::algebraic);
246  sf_vec *i2e_testvec; SF::init_vector(&i2e_testvec, extra_mesh, 1, sf_vec::algebraic);
247 
248  SF_real* id = intra_testvec->ptr();
249  for(size_t i=0; i<intra_alg_nod.size(); i++) {
250  int lpidx = local_nodal_to_local_petsc(intra_mesh, rank, intra_alg_nod[i]);
251  id[lpidx] = intra_ref_nbr[intra_alg_nod[i]];
252  }
253  intra_testvec->release_ptr(id);
254 
255  SF_real* ed = extra_testvec->ptr();
256  for(size_t i=0; i<extra_alg_nod.size(); i++) {
257  int lpidx = local_nodal_to_local_petsc(extra_mesh, rank, extra_alg_nod[i]);
258  ed[lpidx] = extra_ref_nbr[extra_alg_nod[i]];
259  }
260  extra_testvec->release_ptr(ed);
261 
262  i2e_testvec->set(-1.0);
263  i2e.forward(*intra_testvec, *i2e_testvec);
264 
265  int err = 0;
266  for(size_t i=0; i<extra_alg_nod.size(); i++) {
267  auto id = i2e_testvec->get(i);
268  auto ed = extra_testvec->get(i);
269  if(id > -1 && id != ed)
270  err++;
271  }
272 
273  if(get_global(err, MPI_SUM))
274  log_msg(0,5,0, "Electrics mapping test failed!");
275  else
276  log_msg(0,5,0, "Electrics mapping test succeeded!");
277  }
278 }
279 
281 {
283  double t1, t2;
284  get_time(t1);
285 
286  // if requested, we checkpoint the current state
287  checkpointing();
288 
289  // activation checking
290  const double time = user_globals::tm_manager->time,
291  time_step = user_globals::tm_manager->time_step;
292  lat.check_acts(time);
293  lat.check_quiescence(time, time_step);
294 
295  // I believe that we need to treat the stimuli in two ways:
296  // - Extracellular potential stimuli (this includes ground) affect the
297  // elliptic solver in a more delicate way, as such, there is a dbc_manager
298  // to take care of that.
299  // - Extracellular and Intracellular current stimuli are applied to the rhs vectors
300  // and can be managed by the stimulate() code directly.
301  stimulate_extracellular();
302 
303 if(param_globals::bidomain == BIDOMAIN)
305 
306  clamp_Vm();
307 
308  // compute ionics update
309  ion.compute_step();
310 
311  stimulate_intracellular();
312 
313  // store Vm before parabolic step, the full Ic we compute in the output step
314  if(param_globals::dump_data & DUMP_IC)
316 
317  // solver parabolic system
319 
320  clamp_Vm();
321 
322  if(user_globals::tm_manager->trigger(iotm_console)) {
323  // output lin solver stats
325  if(param_globals::bidomain == BIDOMAIN)
327  }
328  this->compute_time += timing(t2, t1);
329 
330  // since the traces have their own timing, we check for trace dumps in the compute step loop
333 
334 }
335 
337 {
339  double t1, t2;
340  get_time(t1);
341 
342  const double time = user_globals::tm_manager->time,
343  time_step = user_globals::tm_manager->time_step;
344 
345  // for pseudo-bidomain we compute extracellular potential only for output
346  if(param_globals::bidomain == PSEUDO_BIDM) {
349  ellip_solver.stats.log_stats(time, false);
350  }
351 
352  if(param_globals::dump_data & DUMP_IVOL)
354 
355  if(param_globals::bidomain && (param_globals::dump_data & DUMP_IACT)) {
357  }
358 
359  if(param_globals::dump_data & DUMP_IC) {
360  PetscReal *Ic = parab_solver.Ic->ptr(), *Vmv = parab_solver.Vmv->ptr();
361 
362  for(PetscInt i=0; i < parab_solver.Ic->lsize(); i++)
363  Ic[i] = (Ic[i] - Vmv[i]) / (-time_step);
364 
366  }
367 
368  // recover phie
369  if(phie_rcv.pts.size()) {
371  }
372 
375 
376  double curtime = timing(t2, t1);
377  this->output_time += curtime;
378 
379  IO_stats.calls++;
380  IO_stats.tot_time += curtime;
381 
383  IO_stats.log_stats(time, false);
384 }
385 
390 {
392  // output LAT data
394 
395  // close logger
396  f_close(logger);
397 
398  // close output files
400 
401  // destroy ionics
402  ion.destroy();
403 }
404 
405 void balance_electrode(elliptic_solver & ellip, SF::vector<stimulus> & stimuli, int balance_from, int balance_to)
406 {
407  log_msg( NULL, 0, 0, "Balancing stimulus %d with %d %s-wise.",balance_from, balance_to,
408  is_current(stimuli[balance_from].phys.type) ? "current" : "voltage" );
409 
410  stimulus & from = stimuli[balance_from];
411  stimulus & to = stimuli[balance_to];
412 
413  to.pulse = from.pulse;
414  to.ptcl = from.ptcl;
415  to.phys = from.phys;
416  to.pulse.strength *= -1.0;
417 
418  if (from.phys.type == I_ex)
419  {
420  // if from is total current, skip volume based adjustment of strength
421  // otherwise, calling constant_total_stimulus_current() will undo the balanced scaling of to.pulse.strength
422  // constant_total_stimulus_current() will do the scaling based on the volume
423  if (!from.phys.total_current) {
424  sf_mat& mass = *ellip.mass_e;
425  SF_real vol0 = get_volume_from_nodes(mass, from.electrode.vertices);
427 
428  to.pulse.strength *= fabs(vol0 / vol1);
429  }
430  }
431 }
432 
433 void Electrics::balance_electrodes()
434 {
435  for(int i=0; i<param_globals::num_stim; i++) {
436  if(param_globals::stim[i].crct.balance != -1) {
437  int from = param_globals::stim[i].crct.balance;
438  int to = i;
439 
440  balance_electrode(this->ellip_solver, stimuli, from, to);
441  }
442  }
443 }
444 
445 void Electrics::setup_stimuli()
446 {
448  // initialize basic stim info data (used units, supported types, etc)
449  init_stim_info();
450  bool dumpTrace = true;
451 
452  if(dumpTrace) set_dir(OUTPUT);
453 
454  stimuli.resize(param_globals::num_stim);
455  for(int i=0; i<param_globals::num_stim; i++)
456  {
457  // construct new stimulus
458  stimulus & s = stimuli[i];
459 
461  s.translate(i);
462 
463  s.setup(i);
464 
465  if(dumpTrace && get_rank() == 0)
466  s.pulse.wave.write_trace(s.name+".trc");
467  }
468 }
469 
470 void apply_stim_to_vector(const stimulus & s, sf_vec & vec, bool add)
471 {
473  double val; s.value(val);
474  const SF::vector<mesh_int_t> & idx = s.electrode.vertices;
475  const int rank = get_rank();
476  SF::vector<mesh_int_t> local_idx = idx;
477  for (size_t i = 0; i < idx.size(); i++) {
478  local_idx[i] = local_nodal_to_local_petsc(*vec.mesh, rank, idx[i]);
479  }
480  vec.set(local_idx, val, add, true);
481 }
482 
483 void Electrics::stimulate_intracellular()
484 {
486  parabolic_solver & ps = parab_solver;
487 
488  // iterate over stimuli
489  for(stimulus & s : stimuli) {
490  if(s.is_active()) {
491  // for active stimuli, deal with the stimuli-type specific stimulus application
492  switch(s.phys.type)
493  {
494  case I_tm: {
495  if(param_globals::operator_splitting) {
496  apply_stim_to_vector(s, *ps.Vmv, true);
497  }
498  else {
499  SF_real Cm = 1.0;
500  timer_manager & tm = *user_globals::tm_manager;
501  SF_real sc = tm.time_step / Cm;
502 
503  ps.Irhs->set(0.0);
504  apply_stim_to_vector(s, *ps.Irhs, true);
505 
506  *ps.tmp_i1 = *ps.IIon;
507  *ps.tmp_i1 -= *ps.Irhs;
508  *ps.tmp_i1 *= sc; // tmp_i1 = sc * (IIon - Irhs)
509 
510  // add ionic, transmembrane and intracellular currents to rhs
511  if(param_globals::parab_solve != parabolic_solver::EXPLICIT)
512  ps.mass_i->mult(*ps.tmp_i1, *ps.Irhs);
513  else
514  *ps.Irhs = *ps.tmp_i1;
515  }
516  break;
517  }
518 
519  case Illum: {
520  sf_vec* illum_vec = ion.miif->gdata[limpet::illum];
521 
522  if(illum_vec == NULL) {
523  log_msg(0,5,0, "Cannot apply illumination stim: global vector not present!");
524  EXIT(EXIT_FAILURE);
525  } else {
526  apply_stim_to_vector(s, *illum_vec, false);
527  }
528 
529  break;
530  }
531 
532  default: break;
533  }
534  }
535  }
536 }
537 
538 void Electrics::clamp_Vm() {
540  for(stimulus & s : stimuli) {
541  if(s.phys.type == Vm_clmp && s.is_active())
543  }
544 }
545 
546 void Electrics::stimulate_extracellular()
547 {
549  if(param_globals::bidomain) {
550  // we check if the DBC layout changed, if so we recompute the matrix and the dbc_manager
551  bool dbcs_have_updated = ellip_solver.dbc != nullptr && ellip_solver.dbc->dbc_update();
553 
554  if(dbcs_have_updated && time_not_final)
556 
557  ellip_solver.phiesrc->set(0.0);
558 
559  for(const stimulus & s : stimuli) {
560  if(s.is_active() && s.phys.type == I_ex)
562  }
563  }
564 }
565 
567  SF::vector<mesh_int_t> & inp_idx,
569 {
570  int mpi_rank = get_rank(), mpi_size = get_size();
571  const SF::vector<mesh_int_t> & layout = mesh.pl.algebraic_layout();
572 
573  SF::vector<mesh_int_t> sndbuff;
574 
575  size_t buffsize = 0;
576  idx.resize(0);
577 
578  for(int pid=0; pid < mpi_size; pid++) {
579  if(mpi_rank == pid) {
580  sndbuff = inp_idx;
581  buffsize = sndbuff.size();
582  }
583 
584  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
585  sndbuff.resize(buffsize);
586  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
587 
588  mesh_int_t start = layout[mpi_rank], stop = layout[mpi_rank+1];
589 
590  for(mesh_int_t i : sndbuff) {
591  if(i >= start && i < stop)
592  idx.push_back(i - start);
593  }
594  }
595 
596  binary_sort(idx); unique_resize(idx);
597 }
598 
600  SF::vector<mesh_int_t> & inp_idx,
602 {
604  int mpi_rank = get_rank(), mpi_size = get_size();
605  const SF::vector<mesh_int_t> & alg_nod = mesh.pl.algebraic_nodes();
607 
609  for(mesh_int_t ii : alg_nod)
610  amap[nbr[ii]] = ii;
611 
612  SF::vector<mesh_int_t> sndbuff;
613  size_t buffsize = 0;
614  idx.resize(0);
615 
616  for(int pid=0; pid < mpi_size; pid++) {
617  if(mpi_rank == pid) {
618  sndbuff = inp_idx;
619  buffsize = sndbuff.size();
620  }
621 
622  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
623  sndbuff.resize(buffsize);
624  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
625 
626  for(mesh_int_t i : sndbuff) {
627  if(amap.count(i))
628  idx.push_back(amap[i]);
629  }
630  }
631 
632  binary_sort(idx); unique_resize(idx);
633 }
634 
635 void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid,
636  SF::vector<mesh_int_t>* & restr, bool async)
637 {
638  sf_mesh & mesh = get_mesh(grid);
639 
640  switch(dataout) {
641 
642  case DATAOUT_SURF: {
643  sf_mesh surfmesh;
644  compute_surface_mesh(mesh, SF::NBR_SUBMESH, surfmesh);
645 
646  SF::vector<mesh_int_t> idxbuff(surfmesh.con);
647  binary_sort(idxbuff); unique_resize(idxbuff);
648 
649  restr = new SF::vector<mesh_int_t>();
650 
651  // for sync output, we need restr to hold the local indices in the petsc vectors
652  // that have been permuted to canonical numbering. For async, we need the
653  // non-overlapping decomposition of indices in NBR_SUBMESH numbering. The petsc indices will be
654  // computed at a later stage. The only reason we need to call compute_restr_idx_async,
655  // is that surface nodes in NBR_SUBMESH, may
656  // reside on partitions where they are not part of the algebraic nodes. thus we need to
657  // recommunicate to make sure the data layout is correct. We do not have this problem for
658  // DATAOUT_VTX.
659  if(!async)
660  compute_restr_idx(mesh, idxbuff, *restr);
661  else
662  compute_restr_idx_async(mesh, idxbuff, *restr);
663 
664  break;
665  }
666 
667  case DATAOUT_VTX: {
668  SF::vector<mesh_int_t> idxbuff;
669 
670  update_cwd();
671 
672  set_dir(INPUT);
673  read_indices(idxbuff, dataout_vtx, mesh, SF::NBR_REF, true, PETSC_COMM_WORLD);
674  set_dir(CURDIR);
675 
676  restr = new SF::vector<mesh_int_t>();
677 
678  if(!async) {
680  for(mesh_int_t & i : idxbuff) i = nbr[i];
681 
682  compute_restr_idx(mesh, idxbuff, *restr);
683  } else {
684  *restr = idxbuff;
685  }
686 
687  break;
688  }
689 
690  case DATAOUT_NONE:
691  case DATAOUT_VOL:
692  default: break;
693  }
694 }
695 
696 void Electrics::setup_output()
697 {
699  int rank = get_rank();
700  SF::vector<mesh_int_t>* restr_i = NULL;
701  SF::vector<mesh_int_t>* restr_e = NULL;
702  set_dir(OUTPUT);
703 
704  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
705  restr_i, param_globals::num_io_nodes > 0);
706 
707  if(param_globals::dataout_i)
708  output_manager.register_output(parab_solver.Vmv, intra_elec_msh, 1, param_globals::vofile, "mV", restr_i);
709 
710  if(param_globals::bidomain) {
711  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
712  restr_e, param_globals::num_io_nodes > 0);
713 
714  if(param_globals::dataout_i)
715  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
716  if(param_globals::dataout_e)
717  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
718  }
719 
720  if(param_globals::dump_data & DUMP_IC) {
721  output_manager.register_output(parab_solver.Ic, intra_elec_msh, 1, "Ic.igb", "uA/cm^2", restr_i);
722  output_manager.register_output(parab_solver.IIon, intra_elec_msh, 1, "Iion.igb","uA/cm^2", restr_i);
723  }
724 
725  if(param_globals::dump_data & DUMP_IVOL)
726  output_manager.register_output(parab_solver.Ivol, intra_elec_msh, 1, "Ivol.igb", "uA", restr_i);
727  if(param_globals::dump_data & DUMP_IACT)
728  output_manager.register_output(parab_solver.Iact, intra_elec_msh, 1, "Iact.igb", "uA", restr_i);
729 
730  if(phie_rcv.pts.size())
731  output_manager.register_output_sync(phie_rcv.phie_rec, phie_recv_msh, 1, param_globals::phie_recovery_file, "mV");
732 
734 
735  if(param_globals::num_trace) {
736  sf_mesh & imesh = get_mesh(intra_elec_msh);
737  open_trace(ion.miif, param_globals::num_trace, param_globals::trace_node, NULL, &imesh);
738  }
739 
740  // initialize generic logger for IO timings per time_dt
741  IO_stats.init_logger("IO_stats.dat");
742 }
743 
744 void Electrics::dump_matrices()
745 {
746  std::string bsname = param_globals::dump_basename;
747  std::string fn;
748 
749  set_dir(OUTPUT);
750 
751  // dump monodomain matrices
752  if ( param_globals::parab_solve==1 ) {
753  // using Crank-Nicolson
754  fn = bsname + "_Ki_CN.bin";
755  parab_solver.lhs_parab->write(fn.c_str());
756  }
757  fn = bsname + "_Ki.bin";
758  parab_solver.rhs_parab->write(fn.c_str());
759 
760  fn = bsname + "_Mi.bin";
761  parab_solver.mass_i->write(fn.c_str());
762 
763  if ( param_globals::bidomain ) {
764  fn = bsname + "_Kie.bin";
765  ellip_solver.phie_mat->write(fn.c_str());
766 
767  fn = bsname + "_Me.bin";
768  ellip_solver.mass_e->write(fn.c_str());
769  }
770 }
771 
772 
775 double Electrics::timer_val(const int timer_id)
776 {
777  // determine
778  int sidx = stimidx_from_timeridx(stimuli, timer_id);
779  double val = 0.0;
780  if(sidx != -1) {
781  stimuli[sidx].value(val);
782  }
783  else
784  val = std::nan("NaN");
785 
786  return val;
787 }
788 
791 std::string Electrics::timer_unit(const int timer_id)
792 {
793  int sidx = stimidx_from_timeridx(stimuli, timer_id);
794  std::string s_unit;
795 
796  if(sidx != -1)
797  // found a timer-linked stimulus
798  s_unit = stimuli[sidx].pulse.wave.f_unit;
799 
800  return s_unit;
801 }
802 
805 int stimidx_from_timeridx(const SF::vector<stimulus> & stimuli, const int timer_id)
806 {
807  // the only electrical quantities linked to a timer are stimuli
808  // thus we search for timer links only among stimuli for now
809 
810  // iterate over stimuli
811  for(size_t i = 0; i<stimuli.size(); i++)
812  {
813  const stimulus & s = stimuli[i];
814 
815  if(s.ptcl.timer_id == timer_id)
816  return s.idx;
817  }
818 
819  // invalid timer index not linked to any stimulus
820  return -1;
821 }
822 
833 void get_kappa(sf_vec & kappa, IMPregion *ir, limpet::MULTI_IF & miif, double k)
834 {
835  double* reg_kappa = new double[miif.N_IIF];
836 
837  for(int i=0; i<miif.N_IIF; i++)
838  reg_kappa[i] = k * miif.IIF[i]->cgeom().SVratio * ir[i].volFrac;
839 
840  double *kd = kappa.ptr();
841 
842  for(int i = 0; i < miif.numNode; i++)
843  kd[i] = reg_kappa[(int) miif.IIFmask[i]];
844 
845  kappa.release_ptr(kd);
846  delete [] reg_kappa;
847 }
848 
849 
858 {
859  for(size_t i=0; i < m.regions.size(); i++) {
860  elecMaterial *emat = static_cast<elecMaterial*>(m.regions[i].material);
861  emat->g = type;
862  }
863 }
864 
865 void Electrics::setup_solvers()
866 {
868  set_dir(OUTPUT);
869  parab_solver.init();
871 
872  if (param_globals::bidomain) {
873  ellip_solver.init();
875  }
876 
877  if(param_globals::dump2MatLab)
878  dump_matrices();
879 }
880 
882 {
883  for(const stimulus & s : stimuli) {
884  if(is_dbc(s.phys.type))
885  return true;
886  }
887  return false;
888 }
889 
890 const char* get_tsav_ext(double time)
891 {
892  int min_idx = -1;
893  double min_diff = 1e100;
894 
895  for(int i=0; i<param_globals::num_tsav; i++)
896  {
897  double diff = fabs(param_globals::tsav[i] - time);
898  if(min_diff > diff) {
899  min_diff = diff;
900  min_idx = i;
901  }
902  }
903 
904  if(min_idx == -1)
905  min_idx = 0;
906 
907  return param_globals::tsav_ext[min_idx];
908 }
909 
910 void Electrics::checkpointing()
911 {
912  const timer_manager & tm = *user_globals::tm_manager;
913 
914  // regular user selected state save
915  if (tm.trigger(iotm_chkpt_list)) {
916  char save_fnm[1024];
917  const char* tsav_ext = get_tsav_ext(tm.time);
918 
919  snprintf(save_fnm, sizeof save_fnm, "%s.%s.roe", param_globals::write_statef, tsav_ext);
920 
921  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
922  }
923 
924  // checkpointing based on interval
925  if (tm.trigger(iotm_chkpt_intv)) {
926  char save_fnm[1024];
927  snprintf(save_fnm, sizeof save_fnm, "checkpoint.%.1f.roe", tm.time);
928  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
929  }
930 }
931 
933 {
935  double t0, t1, dur;
936  get_time(t0);
937  stats.init_logger("ell_stats.dat");
938 
939  // here we can differentiate the solvers
941  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
942  sf_vec::ltype alg_type = sf_vec::algebraic;
943  const int dpn = 1;
944 
945  SF::init_vector(&phie, extra_mesh, dpn, alg_type);
946  SF::init_vector(&phiesrc, extra_mesh, dpn, alg_type);
947  SF::init_vector(&currtmp, extra_mesh, dpn, alg_type);
948 
950  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
951  SF::init_vector(&phie_i, intra_mesh, dpn, alg_type);
952  }
953 
954  int max_row_entries = max_nodal_edgecount(extra_mesh);
955 
958 
959  // alloc stiffness matrix
960  phie_mat->init(extra_mesh, dpn, dpn, max_row_entries);
961  // alloc mass matrix
962  mass_e ->init(extra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
963  dur = timing(t1, t0);
964 }
965 
967  SF::vector<stimulus> & stimuli,
968  FILE_SPEC logger)
969 {
971  double t0, t1, dur;
972  get_time(t0);
973  rebuild_stiffness(mtype, stimuli, logger);
974  rebuild_mass(logger);
975  dur = timing(t1, t0);
976 }
977 
979  SF::vector<stimulus> & stimuli,
980  FILE_SPEC logger)
981 {
983  double t0, t1, dur;
984  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
985 
986  MaterialType & mt = mtype[Electrics::extra_grid];
987  const bool have_dbc = have_dbc_stims(stimuli);
988 
989  cond_t condType = sum_cond;
990  set_cond_type(mt, condType);
991 
992  // get mesh reference
993  sf_mesh & mesh = get_mesh(extra_elec_msh);
994 
995  get_time(t0);
996 
997  // fill the system
998  elec_stiffness_integrator stfn_integ(mt);
999 
1000  phie_mat->zero();
1001  SF::assemble_matrix(*phie_mat, mesh, stfn_integ);
1002  phie_mat->scale(-1.0);
1003 
1004  dur = timing(t1,t0);
1005  log_msg(logger,0,log_flag, "Computed ellipitc stiffness matrix in %.3f seconds.", dur);
1006 
1007  // set boundary conditions
1008  if(have_dbc) {
1009  log_msg(logger,0,log_flag, "Elliptic lhs matrix enforcing Dirichlet boundaries.");
1010  get_time(t0);
1011 
1012  if(dbc == nullptr)
1013  dbc = new dbc_manager(*phie_mat, stimuli);
1014  else
1015  dbc->recompute_dbcs();
1016 
1017  dbc->enforce_dbc_lhs();
1018 
1019  dur = timing(t1,t0);
1020  log_msg(logger,0,log_flag, "Elliptic lhs matrix Dirichlet enforcing done in %.3f seconds.", dur);
1021  }
1022  else {
1023  log_msg(logger,1,ECHO, "Elliptic lhs matrix is singular!");
1024  // we are dealing with a singular system
1025  phie_mat_has_nullspace = true;
1026  }
1027 
1028  // solver has not been initialized yet
1029  set_dir(INPUT);
1030  get_time(t0);
1031 
1032  setup_linear_solver(logger);
1033 
1034  dur = timing(t1,t0);
1035  log_msg(logger,0,log_flag, "Initializing elliptic solver in %.5f seconds.", dur);
1036  set_dir(OUTPUT);
1037 }
1038 
1040 {
1042  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
1043  double t0, t1, dur;
1044  mass_integrator mass_integ;
1045 
1046  // get mesh reference
1047  sf_mesh & mesh = get_mesh(extra_elec_msh);
1048  get_time(t0);
1049  mass_e->zero();
1050 
1051  if(param_globals::mass_lumping) {
1052  SF::assemble_lumped_matrix(*mass_e, mesh, mass_integ);
1053  } else {
1054  SF::assemble_matrix(*mass_e, mesh, mass_integ);
1055  }
1056 
1057  dur = timing(t1,t0);
1058  log_msg(logger,0,log_flag, "Computed elliptic mass matrix in %.3f seconds.", dur);
1059 }
1060 
1061 void elliptic_solver::setup_linear_solver(FILE_SPEC logger)
1062 {
1064 
1065  tol = param_globals::cg_tol_ellip;
1066  max_it = param_globals::cg_maxit_ellip;
1067 
1068  std::string default_opts;
1069  std::string solver_file;
1070  solver_file = param_globals::ellip_options_file;
1071  if (param_globals::flavor == std::string("ginkgo")) {
1072  default_opts = std::string(
1073  R"({
1074  "type": "solver::Cg",
1075  "criteria": [
1076  {
1077  "type": "Iteration",
1078  "max_iters": 100
1079  },
1080  {
1081  "type": "ResidualNorm",
1082  "reduction_factor": 1e-4
1083  }
1084  ],
1085  "preconditioner": {
1086  "type": "solver::Multigrid",
1087  "mg_level": [
1088  {
1089  "type": "multigrid::Pgm",
1090  "deterministic": true
1091  }
1092  ],
1093  "criteria": [
1094  {
1095  "type": "Iteration",
1096  "max_iters": 1
1097  }
1098  ],
1099  "coarsest_solver": {
1100  "type": "preconditioner::Schwarz",
1101  "local_solver": {
1102  "type": "preconditioner::Ilu"
1103  }
1104  },
1105  "max_levels": 10,
1106  "min_coarse_rows": 8,
1107  "default_initial_guess": "zero"
1108  }
1109 })");
1110  } else if (param_globals::flavor == std::string("petsc")) {
1111  default_opts = std::string("-ksp_type cg -pc_type hypre -pc_hypre_type boomeramg -pc_hypre_boomeramg_max_iter 1 -pc_hypre_boomeramg_strong_threshold 0.0 -options_left");
1112  }
1113 
1114  lin_solver->setup_solver(*phie_mat, tol, max_it, param_globals::cg_norm_ellip,
1115  "elliptic PDE", phie_mat_has_nullspace,
1116  logger, solver_file.c_str(), default_opts.c_str());
1117 }
1118 
1119 void elliptic_solver::solve(sf_mat & Ki, sf_vec & Vmv, sf_vec & tmp_i)
1122  double t0,t1;
1124  // assembly of rhs for FE
1125  if (phiesrc->mag() > 0.0) {
1126  mass_e->mult(*phiesrc, *currtmp);
1128  }
1129 
1130  Ki.mult(Vmv, tmp_i);
1131 
1132  bool add = true;
1133  i2e->forward(tmp_i, *phiesrc, add);
1134 
1135  if(dbc != nullptr)
1137 
1138  get_time(t0);
1139  (*lin_solver)(*phie, *phiesrc);
1140 
1141  // treat solver statistics
1142  auto dur = timing(t1, t0);
1143  lin_solver->time += dur;
1144  stats.slvtime += dur;
1146  if(lin_solver->reason < 0) {
1147  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1148  petsc_get_converged_reason_str(lin_solver->reason));
1149  EXIT(1);
1150  }
1152  add = false;
1153  i2e->backward(*phie, *phie_i, add);
1154 }
1155 
1157 {
1159  double t0,t1;
1160 
1161  if(dbc != nullptr)
1163 
1164  get_time(t0);
1165  (*lin_solver)(*phie, *phiesrc);
1166 
1167  // treat solver statistics
1168  auto dur = timing(t1, t0);
1169  lin_solver->time += dur;
1170  stats.slvtime += dur;
1172 
1173  if(lin_solver->reason < 0) {
1174  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1175  petsc_get_converged_reason_str(lin_solver->reason));
1176  EXIT(1);
1177  }
1178 
1179  // phie_i is only set up when we have an IntraMesh registered
1180  if(is_init(phie_i)) {
1181  bool add = false;
1183  i2e->backward(*phie, *phie_i, add);
1184  }
1185 }
1186 
1188 {
1190  double t0, t1, dur;
1191  get_time(t0);
1192  stats.init_logger("par_stats.dat");
1193 
1194  // here we can differentiate the solvers
1196 
1197  sf_vec* vm_ptr = get_data(vm_vec);
1198  sf_vec* iion_ptr = get_data(iion_vec);
1199 
1200  if(!(vm_ptr != NULL && iion_ptr != NULL)) {
1201  log_msg(0,5,0, "%s error: global Vm and Iion vectors not properly set up! Ionics seem invalid! Aborting!",
1202  __func__);
1203  EXIT(1);
1204  }
1205 
1206  SF::init_vector(&Vmv);
1208  Vmv-> shallow_copy(*vm_ptr);
1209  IIon->shallow_copy(*iion_ptr);
1210 
1211  if(param_globals::dump_data & DUMP_IC) SF::init_vector(&Ic , Vmv);
1212  if(param_globals::dump_data & DUMP_IVOL) SF::init_vector(&Ivol, Vmv);
1213  if(param_globals::dump_data & DUMP_IACT) SF::init_vector(&Iact, Vmv);
1214 
1217 
1218  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1219  sf_vec::ltype alg_type = sf_vec::algebraic;
1220 
1221  int dpn = 1;
1222  SF::init_vector(&kappa_i, intra_mesh, dpn, alg_type);
1223  SF::init_vector(&tmp_i1, intra_mesh, dpn, alg_type);
1224  SF::init_vector(&tmp_i2, intra_mesh, dpn, alg_type);
1225  SF::init_vector(&old_vm, intra_mesh, dpn, alg_type);
1226 
1227  if(!param_globals::operator_splitting)
1228  SF::init_vector(&Irhs, intra_mesh, dpn, alg_type);
1229 
1230  // alloc matrices
1231  int max_row_entries = max_nodal_edgecount(intra_mesh);
1232 
1236 
1237  rhs_parab->init(intra_mesh, dpn, dpn, max_row_entries);
1238  mass_i ->init(intra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
1239 
1240  parab_tech = static_cast<parabolic_solver::parabolic_t>(param_globals::parab_solve);
1241  dur = timing(t1, t0);
1242 }
1243 
1245 {
1247  double start, end, period;
1248  get_time(start);
1249  double t0, t1, dur;
1250  mass_integrator mass_integ;
1251  int dpn = 1;
1252 
1253  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
1254  MaterialType & mt = mtype[Electrics::intra_grid];
1255 
1256  double Dt = user_globals::tm_manager->time_step;
1257  get_kappa(*kappa_i, param_globals::imp_region, miif, UM2_to_CM2 / Dt);
1258 
1259  cond_t condType = intra_cond;
1260  sf_mesh & mesh = get_mesh(intra_elec_msh);
1261 
1262  if( (param_globals::bidomain == MONODOMAIN && param_globals::bidm_eqv_mono) ||
1263  (param_globals::bidomain == PSEUDO_BIDM) )
1264  condType = para_cond;
1265 
1266  // set material and conductivity type
1267  set_cond_type(mt, condType);
1268 
1269  // fill the system
1270  {
1271  get_time(t0);
1272 
1273  elec_stiffness_integrator stfn_integ(mt);
1274  SF::assemble_matrix(*rhs_parab, mesh, stfn_integ);
1275 
1276  dur = timing(t1,t0);
1277  log_msg(logger,0,log_flag, "Computed parabolic stiffness matrix in %.3f seconds.", dur);
1278  get_time(t0);
1279 
1280  if(param_globals::mass_lumping)
1281  SF::assemble_lumped_matrix(*mass_i, mesh, mass_integ);
1282  else
1283  SF::assemble_matrix(*mass_i, mesh, mass_integ);
1284 
1285  sf_vec* empty; SF::init_vector(&empty);
1286  mass_i->mult_LR(*kappa_i, *empty);
1287 
1288  dur = timing(t1,t0);
1289  log_msg(logger,0,log_flag, "Computed parabolic mass matrix in %.3f seconds.", dur);
1290  delete empty;
1291  }
1292 
1293  // initialize parab lhs
1294  if(parab_tech != EXPLICIT) {
1296  // if we have mass lumping, then the nonzero pattern between Mi and Ki is different
1297  bool same_nonzero = param_globals::mass_lumping == false;
1298 
1299  if (parab_tech==CN) {
1300  lhs_parab->scale(-param_globals::theta);
1301  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1302  }
1303  else if (parab_tech==O2dT) {
1304  lhs_parab->scale(-0.5);
1305  mass_i->scale(0.5);
1306  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1307  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1308  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1309  }
1310  }
1311  else {
1314 
1315  SF_real* p = inv_mass_diag->ptr();
1316 
1317  for(int i=0; i<inv_mass_diag->lsize(); i++)
1318  p[i] = 1.0 / p[i];
1319 
1321  }
1322 
1323  if(parab_tech == CN || parab_tech == O2dT) {
1324  set_dir(INPUT);
1325  get_time(t0);
1326 
1327  setup_linear_solver(logger);
1328 
1329  dur = timing(t1,t0);
1330  log_msg(logger,0,log_flag, "Initializing parabolic solver in %.5f seconds.", dur);
1331  set_dir(OUTPUT);
1332  }
1333  period = timing(end, start);
1334 }
1335 
1336 void parabolic_solver::setup_linear_solver(FILE_SPEC logger)
1337 {
1339  tol = param_globals::cg_tol_parab;
1340  max_it = param_globals::cg_maxit_parab;
1341 
1342  std::string default_opts;
1343  std::string solver_file;
1344  solver_file = param_globals::parab_options_file;
1345  if (param_globals::flavor == std::string("ginkgo")) {
1346  default_opts = std::string(
1347  R"(
1348 {
1349  "type": "solver::Cg",
1350  "criteria": [
1351  {
1352  "type": "Iteration",
1353  "max_iters": 100
1354  },
1355  {
1356  "type": "ResidualNorm",
1357  "reduction_factor": 1e-4
1358  }
1359  ],
1360  "preconditioner": {
1361  "type": "preconditioner::Schwarz",
1362  "local_solver": {
1363  "type": "preconditioner::Ilu"
1364  }
1365  }
1366 }
1367  )");
1368  } else if (param_globals::flavor == std::string("petsc")) {
1369  default_opts = std::string("-pc_type bjacobi -sub_pc_type ilu -ksp_type cg");
1370  }
1371 
1372  lin_solver->setup_solver(*lhs_parab, tol, max_it, param_globals::cg_norm_parab,
1373  "parabolic PDE", false, logger, solver_file.c_str(),
1374  default_opts.c_str());
1375 }
1376 
1377 void parabolic_solver::solve(sf_vec & phie_i)
1378 {
1380  switch (parab_tech) {
1381  case CN: solve_CN(phie_i); break;
1382  case O2dT: solve_O2dT(phie_i); break;
1383  default: solve_EF(phie_i); break;
1384  }
1385 }
1386 
1387 void parabolic_solver::solve_CN(sf_vec & phie_i)
1388 {
1390  double t0,t1;
1391  // assembly of rhs for CN
1392  if (param_globals::bidomain == BIDOMAIN) {
1393  tmp_i1->deep_copy(phie_i);
1394  tmp_i1->add_scaled(*Vmv, 1.0 - param_globals::theta);
1395  rhs_parab->mult(*tmp_i1, *tmp_i2);
1396  }
1397  else {
1398  rhs_parab->mult(*Vmv, *tmp_i2);
1399  *tmp_i2 *= 1.0 - param_globals::theta;
1400  }
1401 
1402  mass_i->mult(*Vmv, *tmp_i1);
1403  *tmp_i1 += *tmp_i2;
1404 
1405  // add current contributions to rhs
1406  if(!param_globals::operator_splitting)
1407  tmp_i1->add_scaled(*Irhs, -1.0);
1408 
1409  get_time(t0);
1410 
1411  (*lin_solver)(*Vmv, *tmp_i1);
1412 
1413  if(lin_solver->reason < 0) {
1414  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1415  petsc_get_converged_reason_str(lin_solver->reason));
1416  EXIT(1);
1417  }
1418 
1419  // treat solver statistics
1420  auto dur = timing(t1, t0);
1421  lin_solver->time += dur;
1422  stats.slvtime += dur;
1424 }
1425 
1426 void parabolic_solver::solve_O2dT(sf_vec & phie_i)
1427 {
1429  double t0,t1;
1430  // assembly of rhs for FE
1431  if (param_globals::bidomain == BIDOMAIN) {
1432  tmp_i2->deep_copy(phie_i);
1433  tmp_i2->add_scaled(*Vmv, 0.5);
1434  rhs_parab->mult(*tmp_i2, *tmp_i1); // tmp_i1 = K_i(Vm^t * 0.5 + phi_e)
1435  }
1436  else {
1437  rhs_parab->mult(*Vmv, *tmp_i1);
1438  *tmp_i1 *= 0.5; // tmp_i1 = 0.5 * K_i Vm^t
1439  }
1440 
1441  mass_i->mult(*Vmv, *tmp_i2); // tmp_i2 = M/2 Vm^t
1442  tmp_i1->add_scaled(*tmp_i2, 4.0); // tmp_i1 = (2M+K_i/2)Vm^t
1443  mass_i->mult(*old_vm, *tmp_i2); // tmp_i2 = M/2 Vm^{t-1}
1444 
1445  tmp_i1->add_scaled(*tmp_i2, -1.0); // tmp_i1 = (2M+K_i/2)Vm^t-M/2 Vm^{t-1}
1446  *old_vm = *Vmv;
1447 
1448  get_time(t0);
1449 
1450  // solve
1451  (*lin_solver)(*Vmv, *tmp_i1);
1452 
1453  // treat solver statistics
1454  stats.slvtime += timing(t1, t0);
1456 }
1457 
1458 void parabolic_solver::solve_EF(sf_vec & phie_i)
1459 {
1461  double t0,t1,t2;
1462  get_time(t0);
1463 
1464  // assembly of rhs for FE
1465  if (param_globals::bidomain == BIDOMAIN) {
1466  tmp_i2->deep_copy(phie_i);
1467  *tmp_i2 += *Vmv;
1468  rhs_parab->mult(*tmp_i2, *tmp_i1);
1469  }
1470  else {
1471  rhs_parab->mult(*Vmv, *tmp_i1);
1472  }
1473 
1474  *tmp_i1 *= *inv_mass_diag;
1475  Vmv->add_scaled(*tmp_i1, 1.0);
1476 
1477  if(param_globals::operator_splitting == false)
1478  Vmv->add_scaled(*Irhs, -1.0);
1479 
1480  // record rhs timing
1481  stats.slvtime += timing(t1, t0);
1482 }
1483 
1485 {
1486  char* prvSimDir = strlen(param_globals::start_statef) ?
1487  get_file_dir(param_globals::start_statef) : NULL;
1488 
1489  const char* extn = ".dat";
1490 
1491  // if compute_APD we need an extra 2 acts
1492  int addLATs = param_globals::compute_APD ? 2 : 0;
1493 
1494  bool have_sentinel = param_globals::t_sentinel > 0.0;
1495  bool need_to_add_sentinel = have_sentinel && (param_globals::sentinel_ID < 0);
1496 
1497  addLATs += need_to_add_sentinel ? 1 : 0;
1498  acts.resize(param_globals::num_LATs + addLATs);
1499 
1500  int j=0;
1501  for (int i = 0; i < param_globals::num_LATs; i++ )
1502  {
1503  // using Ph only with bidomain runs
1504  if (param_globals::lats[i].method <= 0 || (param_globals::lats[i].measurand == PHIE && !param_globals::bidomain)) {
1505  log_msg(NULL, 3, 0, "Phie-based LAT measurement requires bidomain >=1 Ignoring lats[%d].", i);
1506  continue;
1507  }
1508 
1509  acts[j].method = (ActMethod)param_globals::lats[i].method;
1510  acts[j].threshold = param_globals::lats[i].threshold;
1511  acts[j].mode = param_globals::lats[i].mode;
1512  acts[j].all = param_globals::lats[i].all;
1513  acts[j].measurand = (PotType)param_globals::lats[i].measurand;
1514  acts[j].ID = param_globals::lats[i].ID;
1515  acts[j].fout = NULL;
1516 
1517  if(param_globals::lats[i].all) {
1518  acts[j].fname = (char*) malloc((strlen(param_globals::lats[i].ID)+strlen(extn)+1)*sizeof(char));
1519  snprintf(acts[j].fname, strlen(param_globals::lats[i].ID)+strlen(extn)+1, "%s%s", param_globals::lats[i].ID, extn);
1520  }
1521  else {
1522  char prfx[] = "init_acts_";
1523  int max_len = strlen(prfx) + strlen(param_globals::lats[i].ID) + strlen(extn) + 1;
1524 
1525  acts[j].fname = (char*) malloc(max_len*sizeof(char));
1526  snprintf(acts[j].fname, max_len, "%s%s%s", prfx, param_globals::lats[i].ID, extn);
1527  }
1528 
1529  // restarting
1530  if(prvSimDir != NULL) {
1531  int len_fname = strlen(prvSimDir)+strlen(acts[j].fname)+2;
1532  acts[j].prv_fname = (char*) malloc(len_fname*sizeof(char));
1533  snprintf(acts[j].prv_fname, len_fname, "%s/%s", prvSimDir, acts[j].fname);
1534  }
1535 
1536  j++;
1537  }
1538 
1539  if(param_globals::compute_APD) {
1540  acts[j].method = ACT_THRESH; // threshold crossing
1541  acts[j].threshold = param_globals::actthresh;
1542  acts[j].mode = 0; // upstroke
1543  acts[j].all = true;
1544  acts[j].measurand = VM; // Vm
1545  //acts[j].ID = dupstr("Vm_Activation");
1546  acts[j].fout = NULL;
1547  acts[j].fname = dupstr("vm_activation.dat");
1548 
1549  j++;
1550  acts[j].method = ACT_THRESH; // threshold crossing
1551  acts[j].threshold = param_globals::recovery_thresh;
1552  acts[j].mode = 1; // repol
1553  acts[j].all = true;
1554  acts[j].measurand = VM; // Vm
1555  //(*acts)[j+1].ID = param_globals::lats[i].ID;
1556  acts[j].fout = NULL;
1557  acts[j].fname = dupstr("vm_repolarisation.dat");
1558 
1559  j++;
1560  }
1561 
1562  // set up sentinel for activity checking
1563  sntl.activated = have_sentinel;
1564  sntl.t_start = param_globals::t_sentinel_start;
1565  sntl.t_window = param_globals::t_sentinel;
1566  sntl.t_quiesc =-1.;
1567  sntl.ID = param_globals::sentinel_ID;
1568 
1569  if(need_to_add_sentinel) {
1570  // add a default LAT detector as sentinel
1571  acts[j].method = ACT_THRESH; // threshold crossing
1572  acts[j].threshold = param_globals::actthresh;
1573  acts[j].mode = 0; // upstroke
1574  acts[j].all = true;
1575  acts[j].measurand = VM; // Vm
1576  //(*acts)[j].ID = dupstr("Vm_Activation");
1577  acts[j].fout = NULL;
1578  acts[j].fname = dupstr("vm_sentinel.dat");
1579  // set sentinel index
1580  sntl.ID = j;
1581  j++;
1582  }
1583 
1584  if(prvSimDir) free(prvSimDir);
1585 }
1586 
1587 void print_act_log(FILE_SPEC logger, const SF::vector<Activation> & acts, int idx)
1588 {
1589  const Activation & act = acts[idx];
1590 
1591  log_msg(logger, 0, 0, "\n");
1592  log_msg(logger, 0, 0, "LAT detector [%2d]", idx);
1593  log_msg(logger, 0, 0, "-----------------\n");
1594 
1595  log_msg(logger, 0, 0, "Measurand: %s", act.measurand ? "Phie" : "Vm");
1596  log_msg(logger, 0, 0, "All: %s", act.all ? "All" : "Only first");
1597  log_msg(logger, 0, 0, "Method: %s", act.method==ACT_DT ? "Derivative" : "Threshold crossing");
1598 
1599  char buf[64], gt[2], sgn[2];
1600  snprintf(sgn, sizeof sgn, "%s", act.mode?"-":"+");
1601  snprintf(gt, sizeof gt, "%s", act.mode?"<":">");
1602 
1603  if(act.method==ACT_DT)
1604  snprintf(buf, sizeof buf, "Maximum %sdf/dt %s %.2f mV", sgn, gt, act.threshold);
1605  else
1606  snprintf(buf, sizeof buf, "Intersection %sdf/dt with %.2f", sgn, act.threshold);
1607 
1608  log_msg(logger, 0, 0, "Mode: %s", buf);
1609  log_msg(logger, 0, 0, "Threshold: %.2f mV\n", act.threshold);
1610 }
1611 
1612 void LAT_detector::init(sf_vec & vm, sf_vec & phie, int offset, enum physic_t phys_t)
1613 {
1614  if(!get_physics(phys_t)) {
1615  log_msg(0,0,5, "There seems to be no EP is defined. LAT detector requires active EP! Aborting LAT setup!");
1616  return;
1617  }
1618 
1619  // we use the electrics logger for output
1620  FILE_SPEC logger = get_physics(phys_t)->logger;
1621 
1622  // TODO(init): except for the shallow copies, shouldn't these be deleted?
1623  // When to delete them?
1624  for(size_t i = 0; i < acts.size(); ++i) {
1625  acts[i].init = 1;
1626  SF::init_vector(&(acts[i].phi));
1627  acts[i].phi->shallow_copy(!acts[i].measurand ? vm : phie);
1628  acts[i].offset = offset;
1629 
1630  SF::init_vector(&(acts[i].phip), acts[i].phi);
1631  *acts[i].phip = *acts[i].phi;
1632 
1633  // derivative based detector
1634  if (acts[i].method == ACT_DT) {
1635  SF::init_vector(&(acts[i].dvp0), acts[i].phi);
1636  SF::init_vector(&(acts[i].dvp1), acts[i].phi);
1637  if(acts[i].mode)
1638  log_msg(NULL,2,0, "Detection of -df/dt|max not implemented, +df/dt|max will be detected.");
1639  }
1640 
1641  // allocate additional local buffers
1642  acts[i].ibuf = (int *)malloc(acts[i].phi->lsize()*sizeof(int));
1643  acts[i].actbuf = (double *)malloc(acts[i].phi->lsize()*sizeof(double));
1644 
1645  if (!acts[i].all) {
1646  SF::init_vector(&acts[i].tm, acts[i].phi->gsize(), acts[i].phi->lsize());
1647  acts[i].tm->set(-1.);
1648 
1649  // initialize with previous initial activations
1650  if(acts[i].prv_fname != NULL) {
1651  set_dir(INPUT);
1652  size_t nread = acts[i].tm->read_ascii(acts[i].prv_fname);
1653 
1654  if(nread == 0)
1655  log_msg(NULL,2,ECHO,"Warning: Initialization of LAT[%2d] failed.", i);
1656 
1657  set_dir(OUTPUT);
1658  }
1659  }
1660  else {
1661  if ( !get_rank() ) {
1662  // here we should copy over previous file and open in append mode
1663  if(acts[i].prv_fname!=NULL) {
1664  set_dir(INPUT);
1665  FILE_SPEC in = f_open( acts[i].prv_fname, "r" );
1666  if(in) {
1667  log_msg(NULL,2,0, "Copying over of previous activation file not implemented.\n"); f_close(in);
1668  }
1669  else
1670  log_msg(NULL,3,0,"Warning: Initialization in %s - \n"
1671  "Failed to read activation file %s.\n", __func__, acts[i].prv_fname);
1672 
1673  set_dir(OUTPUT);
1674  }
1675  acts[i].fout = f_open( acts[i].fname, acts[i].prv_fname==NULL?"w":"a" );
1676  }
1677  }
1678  print_act_log(logger, acts, i);
1679  }
1680 
1681  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1683 }
1684 
1685 
1686 int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
1687 {
1688  int rank = get_rank(), gacts = 0, numProc = get_size();
1689 
1690  if (rank == 0) {
1691  // rank 0 writes directly to the table
1692  for (int i=0; i<nlacts; i++)
1693  fprintf(fp->fd, "%d\t%.6f\n", ibuf[i], act_tbuf[i]);
1694 
1695  gacts += nlacts;
1696 
1697  SF::vector<int> buf_inds;
1698  SF::vector<double> buf_acts;
1699 
1700  for (int j=1; j<numProc; j++) {
1701  int acts = 0;
1702  MPI_Status status;
1703  MPI_Recv(&acts, 1, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1704 
1705  if (acts) {
1706  buf_inds.resize(acts);
1707  buf_acts.resize(acts);
1708 
1709  MPI_Recv(buf_inds.data(), acts, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1710  MPI_Recv(buf_acts.data(), acts, MPI_DOUBLE, j, 110, PETSC_COMM_WORLD, &status);
1711 
1712  for(int ii=0; ii<acts; ii++)
1713  fprintf(fp->fd, "%d\t%.6f\n", buf_inds[ii], buf_acts[ii]);
1714 
1715  gacts += acts;
1716  }
1717  }
1718  fflush(fp->fd);
1719  }
1720  else {
1721  MPI_Send(&nlacts, 1, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1722  if (nlacts) {
1723  MPI_Send(ibuf, nlacts, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1724  MPI_Send(act_tbuf, nlacts, MPI_DOUBLE, 0, 110, PETSC_COMM_WORLD);
1725  }
1726  }
1727 
1728  MPI_Bcast(&gacts, 1, MPI_INT, 0, PETSC_COMM_WORLD);
1729  return gacts;
1730 }
1732 int LAT_detector::check_acts(double tm)
1733 {
1734  int nacts = 0;
1735  double *a;
1736 
1737  for(Activation* aptr = acts.data(); aptr != acts.end(); aptr++)
1738  {
1739  int lacts = 0;
1740  switch (aptr->method) {
1741  case ACT_THRESH:
1742  lacts = check_cross_threshold(*aptr->phi, *aptr->phip, tm,
1743  aptr->ibuf, aptr->actbuf, aptr->threshold, aptr->mode);
1744  break;
1745 
1746  case ACT_DT:
1747  lacts = check_mx_derivative (*aptr->phi, *aptr->phip, tm,
1748  aptr->ibuf, aptr->actbuf, *aptr->dvp0, *aptr->dvp1,
1749  aptr->threshold, aptr->mode);
1750  break;
1751 
1752  default:
1753  break;
1754  }
1755 
1756  if (!aptr->all)
1757  a = aptr->tm->ptr();
1758 
1760 
1761  for(int j=0; j<lacts; j++) {
1762  if(aptr->all) {
1763  int nodal_idx = this->petsc_to_nodal.forward_map(aptr->ibuf[j]);
1764  aptr->ibuf[j] = canon_nbr[nodal_idx] + aptr->offset;
1765  }
1766  else {
1767  if(a[aptr->ibuf[j]] == -1)
1768  a[aptr->ibuf[j]] = aptr->actbuf[j];
1769  }
1770  }
1771 
1772  if(aptr->all)
1773  output_all_activations(aptr->fout, aptr->ibuf, aptr->actbuf, lacts);
1774  else
1775  aptr->tm->release_ptr(a);
1776 
1777  MPI_Allreduce(MPI_IN_PLACE, &lacts, 1, MPI_INT, MPI_SUM, PETSC_COMM_WORLD);
1778  nacts += lacts;
1779 
1780  aptr->nacts = nacts;
1781  }
1782 
1783  return nacts > 0;
1784 }
1785 
1786 
1787 int LAT_detector::check_quiescence(double tm, double dt)
1788 {
1789  static int savequitFlag = 0;
1790  int numNodesActivated = -1;
1791 
1792  if(sntl.activated) {
1793  // initialization
1794  if(sntl.t_quiesc < 0. && sntl.t_window >= 0.0 ) {
1795  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1796  log_msg(0,0,ECHO | NONL, "%s() WARNING: simulation is configured to savequit() after %.2f ms of quiescence\n", __func__, sntl.t_window);
1797  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1798  sntl.t_quiesc = 0.0;
1799  }
1800 
1801  if(tm >= sntl.t_start && !savequitFlag)
1802  {
1803  numNodesActivated = acts[sntl.ID].nacts;
1804 
1805  if(numNodesActivated) sntl.t_quiesc = 0.0;
1806  else sntl.t_quiesc += dt;
1807 
1808  if(sntl.t_window >= 0.0 && sntl.t_quiesc > sntl.t_window && !savequitFlag) {
1809  savequitFlag++;
1810  savequit();
1811  }
1812  }
1813  }
1814 
1815  return numNodesActivated;
1816 }
1817 
1818 
1819 
1820 
1821 int LAT_detector::check_cross_threshold(sf_vec & vm, sf_vec & vmp, double tm,
1822  int *ibuf, double *actbuf, float threshold, int mode)
1823 {
1824  SF_real *c = vm.ptr();
1825  SF_real *p = vmp.ptr();
1826  int lsize = vm.lsize();
1827  int nacts = 0, gnacts = 0;
1828 
1829  for (int i=0; i<lsize; i++) {
1830  int sgn = 1;
1831  bool triggered = false;
1832  if(mode==0) {// detect +slope crossing
1833  triggered = p[i] <= threshold && c[i] > threshold; }
1834  else { // detect -slope crossing
1835  triggered = p[i] >= threshold && c[i] < threshold;
1836  sgn = -1;
1837  }
1838 
1839  if (triggered) {
1840  double tact = tm - param_globals::dt + (threshold-p[i])/(c[i]-p[i])*sgn*param_globals::dt;
1841  ibuf [nacts] = i;
1842  actbuf[nacts] = tact;
1843  nacts++;
1844  }
1845  p[i] = c[i];
1846  }
1847 
1848  vm.release_ptr(c);
1849  vmp.release_ptr(p);
1850  return nacts;
1851 }
1852 
1853 int LAT_detector::check_mx_derivative(sf_vec & vm, sf_vec & vmp, double tm,
1854  int *ibuf, double *actbuf, sf_vec & dvp0, sf_vec & dvp1,
1855  float threshold, int mode)
1856 {
1857  int nacts = 0, gnacts = 0;
1858  double tact, dt2 = 2 * param_globals::dt;
1859  int lsize = vm.lsize();
1860  SF_real ddv0, ddv1, dv, dvdt;
1861  SF_real *c, *p, *pd0, *pd1;
1862 
1863  c = vm.ptr();
1864  p = vmp.ptr();
1865  pd0 = dvp0.ptr();
1866  pd1 = dvp1.ptr();
1867 
1868  for (int i=0; i<lsize; i++ ) {
1869  dv = (c[i]-p[i]);
1870  dvdt = dv/param_globals::dt;
1871  ddv0 = pd1[i]-pd0[i];
1872  ddv1 = dv -pd1[i];
1873 
1874  if (dvdt>=threshold && ddv0>0 && ddv1<0) {
1875  tact = tm-dt2+(ddv0/(ddv0-ddv1))*param_globals::dt;
1876  ibuf [nacts] = i;
1877  actbuf[nacts] = tact;
1878  nacts++;
1879  }
1880  p [i] = c[i];
1881  pd0[i] = pd1[i];
1882  pd1[i] = dv;
1883  }
1884 
1885  vm .release_ptr(c);
1886  vmp .release_ptr(p);
1887  dvp0.release_ptr(pd0);
1888  dvp1.release_ptr(pd1);
1889 
1890  return nacts;
1891 }
1892 
1897 {
1899  assert(sc != NULL);
1900 
1901  bool forward = true;
1902 
1903  for (size_t i = 0; i < acts.size(); i++) {
1904  if (is_init(acts[i].tm)) {
1905  (*sc)(*acts[i].tm, forward);
1906  acts[i].tm->write_ascii(acts[i].fname, false);
1907  }
1908  }
1909 }
1910 
1911 void Electrics::prepace() {
1912  log_msg(NULL, 0, 0, "Using activation times from file %s to distribute prepacing states\n",
1913  param_globals::prepacing_lats);
1914  log_msg(NULL, 0, 0, "Assuming stimulus strength %f uA/uF with duration %f ms for prepacing\n",
1915  param_globals::prepacing_stimstr, param_globals::prepacing_stimdur);
1916 
1917  limpet::MULTI_IF* miif = this->ion.miif;
1918 
1919  const sf_mesh & mesh = get_mesh(intra_elec_msh);
1920  sf_vec* read_lats; SF::init_vector(&read_lats, mesh, 1, sf_vec::algebraic);
1921 
1922  // read in the global distributed vector of all activation times
1923  set_dir(INPUT);
1924  size_t numread = read_lats->read_ascii(param_globals::prepacing_lats);
1925  if (numread == 0) {
1926  log_msg(NULL, 5, 0, "Failed reading required LATs! Skipping prepacing!");
1927  return;
1928  }
1929  set_dir(OUTPUT);
1930 
1932  assert(sc != NULL);
1933 
1934  // permute in-place to petsc permutation
1935  bool forward = false;
1936  (*sc)(*read_lats, forward);
1937 
1938  // take care of negative LAT values
1939  {
1940  PetscReal* lp = read_lats->ptr();
1941  for(int i=0; i<read_lats->lsize(); i++)
1942  if(lp[i] < 0.0) lp[i] = param_globals::tend + 10.0;
1943 
1944  read_lats->release_ptr(lp);
1945  }
1946 
1947  // make LATs relative and figure out the first LAT
1948  // so we know when to save state of each point
1949  SF_real LATmin = read_lats->min();
1950 
1951  if(LATmin < 0.0) {
1952  log_msg(0,3,0, "LAT data is not complete. Skipping prepacing.");
1953  return;
1954  }
1956  SF_real offset = floor(LATmin / param_globals::prepacing_bcl) * param_globals::prepacing_bcl;
1957  SF_real last_tm = param_globals::prepacing_bcl * param_globals::prepacing_beats;
1958 
1959  // compute read_lats[i] = last_tm - (read_lats[i] - offset)
1960  *read_lats += -offset;
1961  *read_lats *= -1.;
1962  *read_lats += last_tm;
1963 
1964  miif->getRealData();
1965  SF_real *save_tm = read_lats->ptr();
1966  SF_real *vm = miif->gdata[limpet::Vm]->ptr();
1967 
1968  for (int ii = 0; ii < miif->N_IIF; ii++) {
1969  if (!miif->N_Nodes[ii]) continue;
1970 
1971  // create sorted array of save times.
1972  SF::vector<SF::mixed_tuple<double,int>> sorted_save(miif->N_Nodes[ii]); // v1 = time, v2 = index
1973  for (int kk = 0; kk < miif->N_Nodes[ii]; kk++) {
1974  sorted_save[kk].v1 = save_tm[miif->NodeLists[ii][kk]];
1975  sorted_save[kk].v2 = kk;
1976  }
1977  std::sort(sorted_save.begin(), sorted_save.end());
1978 
1979  size_t lastidx = sorted_save.size() - 1;
1980  int paced = sorted_save[lastidx].v2; // IMP index of latest node
1981  int csav = 0;
1982 
1983  for (double t = 0; t < sorted_save[lastidx].v1; t += param_globals::dt) {
1984  if (fmod(t, param_globals::prepacing_bcl) < param_globals::prepacing_stimdur &&
1985  t < param_globals::prepacing_bcl * param_globals::prepacing_beats - 1)
1986  miif->ldata[ii][limpet::Vm][paced] += param_globals::prepacing_stimstr * param_globals::dt;
1987 
1988  compute_IIF(*miif->IIF[ii], miif->ldata[ii], paced);
1989 
1990  // Vm update always happens now outside of the imp
1991  miif->ldata[ii][limpet::Vm][paced] -= miif->ldata[ii][limpet::Iion][paced] * param_globals::dt;
1992  vm[miif->NodeLists[ii][paced]] = miif->ldata[ii][limpet::Vm][paced];
1993 
1994  while (csav < miif->N_Nodes[ii] - 1 && t >= sorted_save[csav].v1)
1995  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
1996  }
1997 
1998  // get nodes which may be tied for last
1999  while (csav < miif->N_Nodes[ii] - 1)
2000  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
2001  // ipdate global Vm vector
2002  for (int k = 0; k < miif->N_Nodes[ii]; k++) vm[miif->NodeLists[ii][k]] = miif->ldata[ii][limpet::Vm][k];
2003  }
2004 
2005  read_lats->release_ptr(save_tm);
2006  miif->gdata[limpet::Vm]->release_ptr(vm);
2007  miif->releaseRealData();
2008 }
2009 
2010 
2011 void recover_phie_std(sf_vec & vm, phie_recovery_data & rcv)
2012 {
2014  if (!rcv.pts.size())
2015  return;
2016 
2017  int rank = get_rank();
2018 
2019  if(!get_physics(elec_phys)) {
2020  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2021  return;
2022  }
2023 
2024  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2025  sf_mat & Ki = *elec->parab_solver.rhs_parab;
2026 
2027  const sf_mesh & imesh = get_mesh(intra_elec_msh);
2028  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
2029 
2030  SF_int start, end;
2031  vm.get_ownership_range(start, end);
2032 
2033  if(!rcv.Im) {
2034  SF::init_vector(&rcv.Im, &vm);
2035  SF::init_vector(&rcv.dphi, &vm);
2036  }
2037 
2038  SF_int r_start, r_end;
2039  rcv.phie_rec->get_ownership_range(r_start, r_end);
2040 
2041  SF_real *ph_r = rcv.phie_rec->ptr();
2042 
2043  // use minimum distance to ensure r>0
2044  // consistent with the line source approximation, the "cable radius"
2045  // is used as a lower limit for the source-field point distance
2046  float minDist = 2. / param_globals::imp_region[0].cellSurfVolRatio; // radius in um
2047 
2048  Ki.mult(vm, *rcv.Im);
2049  int numpts = rcv.pts.size() / 3;
2050  Point fpt, cpt;
2051 
2052  for (int j=0; j<numpts; j++) {
2053  fpt = rcv.pts.data() + j*3;
2054 
2055  *rcv.dphi = *rcv.Im;
2056  SF_real* dp = rcv.dphi->ptr();
2057 
2058  for (size_t i = 0; i<alg_nod.size(); i++)
2059  {
2060  mesh_int_t loc_nodal_idx = alg_nod[i];
2061  mesh_int_t loc_petsc_idx = local_nodal_to_local_petsc(imesh, rank, loc_nodal_idx);
2062  cpt = imesh.xyz.data()+loc_nodal_idx*3;
2063 
2064  double r = dist(fpt, cpt) + minDist;
2065  dp[loc_petsc_idx] /= r;
2066  }
2067 
2068  rcv.dphi->release_ptr(dp);
2069 
2070  SF_real phi = rcv.dphi->sum() / 4. / M_PI / rcv.gBath;
2071  if ( (j>=r_start) && (j<r_end) )
2072  ph_r[j-r_start] = phi;
2073  }
2074 
2075  rcv.phie_rec->release_ptr(ph_r);
2076 }
2077 
2079 {
2080  int err = 0, rank = get_rank();
2081 
2083  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2084  return 1;
2085  }
2086 
2087  sf_mesh & imesh = get_mesh(intra_elec_msh);
2088  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2089  phie_recovery_data & phie_rcv = elec->phie_rcv;
2090 
2091  // we close the files of the default electrics if there are any open
2092  elec->output_manager.close_files_and_cleanup();
2093 
2094  // register output
2095  set_dir(POSTPROC);
2096  igb_output_manager phie_rec_out;
2097  phie_rec_out.register_output(phie_rcv.phie_rec, phie_recv_msh, 1,
2098  param_globals::phie_recovery_file, "mV");
2099 
2100  // Buffer for Vm data
2101  sf_vec* vm = get_data(vm_vec); assert(vm);
2102 
2103  // set up igb header and point fd to start of Vm file
2104  set_dir(OUTPUT);
2105  IGBheader vm_igb;
2106  if(rank == 0) {
2107  FILE_SPEC file = f_open(param_globals::vofile, "r");
2108  if(file != NULL) {
2109  vm_igb.fileptr(file->fd);
2110  vm_igb.read();
2111 
2112  if(vm_igb.x() != vm->gsize()) {
2113  log_msg(0,4,0, "%s error: Vm dimension does not fit to %s file. Aborting recovery! \n",
2114  __func__, param_globals::vofile);
2115  err++;
2116  }
2117 
2118  delete file;
2119  }
2120  else err++;
2121  }
2122 
2123  err = get_global(err, MPI_MAX);
2124 
2125  if(err == 0) {
2126  FILE* fd = static_cast<FILE*>(vm_igb.fileptr());
2127 
2128  // number of data slices
2129  const int num_io = user_globals::tm_manager->timers[iotm_spacedt]->numIOs;
2130 
2131  // scatterers
2133  assert(petsc_to_canonical != NULL);
2134 
2135  // loop over vm slices and recover phie
2136  for(int i=0; i<num_io; i++) {
2137  log_msg(0,0,0, "Step %d / %d", i+1, num_io);
2138  size_t nread = vm->read_binary<float>(fd);
2139 
2140  if(nread != size_t(vm->gsize())) {
2141  log_msg(0,3,0, "%s warning: read incomplete data slice! Aborting!", __func__);
2142  err++;
2143  break;
2144  }
2145 
2146  // permute vm_buff
2147  bool forward = false;
2148  (*petsc_to_canonical)(*vm, forward);
2149 
2150  // do phie computation
2151  recover_phie_std(*vm, phie_rcv);
2152 
2153  phie_rec_out.write_data();
2154  }
2155 
2156  phie_rec_out.close_files_and_cleanup();
2157  }
2158  return err;
2159 }
2160 
2161 void setup_phie_recovery_data(phie_recovery_data & data)
2162 {
2164  if(!get_physics(elec_phys) ) {
2165  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2166  return;
2167  }
2168 
2169  int rank = get_rank(), size = get_size();
2170  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2171 
2172  sf_mesh & imesh = get_mesh(intra_elec_msh);
2173  const std::string basename = param_globals::phie_rec_ptf;
2174  SF::vector<mesh_int_t> ptsidx;
2175 
2176  set_dir(INPUT);
2177  SF::read_points(basename, imesh.comm, data.pts, ptsidx);
2178  make_global(data.pts, imesh.comm); // we want all ranks to have all points
2179 
2180  // set up parallel layout of recovery points
2181  SF::vector<mesh_int_t> layout;
2182  layout_from_count(mesh_int_t(ptsidx.size()), layout, imesh.comm);
2183 
2184  // set up petsc_vector for recovered potentials
2185  SF::init_vector(&data.phie_rec, layout[size], layout[rank+1]-layout[rank], 1, sf_vec::algebraic);
2186 
2187  // get conductivty
2188  SF::vector<RegionSpecs> & intra_regions = elec->mtype[Electrics::intra_grid].regions;
2189  data.gBath = static_cast<elecMaterial*>(intra_regions[0].material)->BathVal[0];
2190 }
2191 
2192 void Laplace::initialize()
2193 {
2195  int rank = get_rank();
2196 
2197  assert(param_globals::bidomain == BIDOMAIN);
2198  double t1, t2;
2199  get_time(t1);
2200 
2201  // set up Extracellular tissue
2204  mtype[Electrics::extra_grid].regionIDs, true, "gregion_e");
2205 
2206  // set up a subset of the complete electrical mappings
2207  int dpn = 1;
2209 
2211  // set up Intracellular tissue
2214  mtype[Electrics::intra_grid].regionIDs, true, "gregion_i");
2215 
2218  }
2219 
2220  // set up stimuli
2221  init_stim_info();
2222  stimuli.resize(param_globals::num_stim);
2224  for(int i=0; i<param_globals::num_stim; i++) {
2225  // construct new stimulus
2226  stimulus & s = stimuli[i];
2227 
2229  s.translate(i);
2230 
2231  s.setup(i);
2232 
2233  if(s.phys.type == Phi_ex) {
2234  s.pulse.wform = constPulse;
2235  sample_wave_form(s.pulse, i);
2236  }
2237  }
2238 
2239  set_dir(OUTPUT);
2240 
2241  ellip_solver.init();
2243 
2244  if(param_globals::dump2MatLab) {
2245  std::string bsname = param_globals::dump_basename;
2246  std::string fn;
2247 
2248  set_dir(OUTPUT);
2249  fn = bsname + "_Kie.bin";
2250  ellip_solver.phie_mat->write(fn.c_str());
2251  }
2252 
2253  // the laplace solver executes only once, thus we need a singlestep timer
2254  timer_idx = user_globals::tm_manager->add_singlestep_timer(0.0, 0.0, "laplace trigger", nullptr);
2255 
2256  SF::vector<mesh_int_t>* restr_i = NULL;
2257  SF::vector<mesh_int_t>* restr_e = NULL;
2258 
2259  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
2260  restr_e, param_globals::num_io_nodes > 0);
2261  if(param_globals::dataout_e)
2262  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
2263 
2265  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
2266  restr_i, param_globals::num_io_nodes > 0);
2267  if(param_globals::dataout_i)
2268  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
2269  }
2270 
2271  this->initialize_time += timing(t2, t1);
2273  this->compute_step();
2274 }
2275 
2276 void Laplace::destroy()
2277 {}
2278 
2279 void Laplace::compute_step()
2280 {
2282  // Laplace compute might be called multiple times, we want to run only once..
2283  if(!ellip_solver.lin_solver) return;
2284 
2285  double t0, t1, dur;
2286  log_msg(0,0,0, "Solving Laplace problem ..");
2287 
2288  get_time(t0);
2290  dur = timing(t1,t0);
2291 
2292  log_msg(0,0,0, "Done in %.5f seconds.", dur);
2293 
2295  this->compute_time += timing(t1, t0);
2296  set_dir(OUTPUT);
2299 
2300  // we clear the elliptic matrices and solver to save some memory when computing
2301  // the laplace solution on-the-fly
2302  delete ellip_solver.mass_e; ellip_solver.mass_e = NULL;
2303  delete ellip_solver.phie_mat; ellip_solver.phie_mat = NULL;
2305 }
2306 
2307 void Laplace::output_step()
2308 {}
2309 
2310 double Laplace::timer_val(const int timer_id)
2311 {
2312  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2313  double val = 0.0;
2314 
2315  if(sidx != -1) stimuli[sidx].value(val);
2316  else val = std::nan("NaN");
2317  return val;
2318 }
2319 
2320 std::string Laplace::timer_unit(const int timer_id)
2321 {
2322  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2323  std::string s_unit;
2324  if(sidx != -1) s_unit = stimuli[sidx].pulse.wave.f_unit;
2325  return s_unit;
2326 }
2327 
2329  sf_mat & mass_i,
2330  sf_mat & mass_e,
2331  limpet::MULTI_IF *miif,
2332  FILE_SPEC logger)
2333 {
2335 
2336  for(stimulus & s : stimuli) {
2337  if(is_current(s.phys.type) && s.phys.total_current) {
2338  // extracellular current injection
2339  if (s.phys.type == I_ex) {
2340  // compute affected volume in um^3
2341  SF_real vol = get_volume_from_nodes(mass_e, s.electrode.vertices);
2342 
2343  // s->strength holds the total current in uA, compute current density in uA/cm^3
2344  // Theoretically, we don't need to scale the volume to cm^3 here since we later
2345  // multiply with the mass matrix and we get um^3 * uA/um^3 = uA.
2346  // However, for I_ex there is an additional um^3 to cm^3 scaling in phys.scale,
2347  // since I_e is expected to be in uA/cm^3. Therefore, we need to compensate for that to arrive at uA later.
2348  float scale = 1.e12/vol;
2349 
2350  s.pulse.strength *= scale;
2351 
2352  log_msg(logger,0,ECHO,
2353  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^3\n",
2354  s.name.c_str(), s.idx, s.pulse.strength);
2355  }
2356  else if (s.phys.type == I_tm) {
2357  // compute affected volume in um^3
2358  SF_real vol = get_volume_from_nodes(mass_i, s.electrode.vertices);
2359  const sf_mesh & imesh = get_mesh(intra_elec_msh);
2360  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
2361 
2362  if(alg_idx_map.size() == 0) {
2363  mesh_int_t lidx = 0;
2364  for(mesh_int_t n : alg_nod) {
2365  alg_idx_map[n] = lidx;
2366  lidx++;
2367  }
2368  }
2369 
2370  SF_real surf = 0.0;
2371  for(mesh_int_t n : s.electrode.vertices) {
2372  if(alg_idx_map.count(n)) {
2373  mesh_int_t lidx = alg_idx_map[n];
2374  int r = miif->IIFmask[lidx];
2375  // surf = vol*beta [1/um], surf is in [um^2]
2376  surf = vol * miif->IIF[r]->cgeom().SVratio * param_globals::imp_region[r].volFrac;
2377  //convert to cm^2
2378  surf /= 1.e8;
2379  break;
2380  }
2381  }
2382  surf = get_global(surf, MPI_MAX, PETSC_COMM_WORLD);
2383 
2384  // scale surface density now to result in correct total current
2385  s.pulse.strength /= surf;
2386  log_msg(logger, 0, ECHO,
2387  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^2\n",
2388  s.name.c_str(), s.idx, s.pulse.strength);
2389  }
2390  }
2391  }
2392 }
2393 
2394 
2395 
2396 } // namespace opencarp
#define M_PI
Definition: ION_IF.h:52
int mesh_int_t
Definition: SF_container.h:46
double SF_real
Use the general double as real type.
Definition: SF_globals.h:38
std::int32_t SF_int
Use the general std::int32_t as int type.
Definition: SF_globals.h:37
#define ECHO
Definition: basics.h:308
#define NONL
Definition: basics.h:312
#define CALI_CXX_MARK_FUNCTION
Definition: caliper_hooks.h:6
virtual void mult(const abstract_vector< T, S > &x, abstract_vector< T, S > &b) const =0
virtual void scale(S s)=0
virtual void zero()=0
virtual void get_diagonal(abstract_vector< T, S > &vec) const =0
virtual void mult_LR(const abstract_vector< T, S > &L, const abstract_vector< T, S > &R)=0
virtual void init(T iNRows, T iNCols, T ilrows, T ilcols, T loc_offset, T mxent)
virtual void duplicate(const abstract_matrix< T, S > &M)=0
virtual void add_scaled_matrix(const abstract_matrix< T, S > &A, const S s, const bool same_nnz)=0
virtual void write(const char *filename) const =0
size_t read_ascii(FILE *fd)
virtual S mag() const =0
virtual S * ptr()=0
virtual void release_ptr(S *&p)=0
virtual void deep_copy(const abstract_vector< T, S > &v)=0
virtual void shallow_copy(const abstract_vector< T, S > &v)=0
virtual void add_scaled(const abstract_vector< T, S > &vec, S k)=0
virtual T lsize() const =0
virtual void set(const vector< T > &idx, const vector< S > &vals, const bool additive=false, const bool local=false)=0
const meshdata< mesh_int_t, mesh_real_t > * mesh
the connected mesh
T forward_map(T idx) const
Map one index from a to b.
Definition: SF_container.h:264
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:429
vector< T > con
Definition: SF_container.h:412
size_t g_numelem
global number of elements
Definition: SF_container.h:398
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:404
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:464
Container for a PETSc VecScatter.
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
void backward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Backward scattering.
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
const T * end() const
Pointer to the vector's end.
Definition: SF_vector.h:128
void assign(InputIterator s, InputIterator e)
Assign a memory range.
Definition: SF_vector.h:161
const T * begin() const
Pointer to the vector's start.
Definition: SF_vector.h:116
T * data()
Pointer to the vector's start.
Definition: SF_vector.h:91
T & push_back(T val)
Definition: SF_vector.h:283
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
Definition: hashmap.hpp:687
int numNode
local number of nodes
Definition: MULTI_ION_IF.h:210
std::vector< IonIfBase * > IIF
array of IIF's
Definition: MULTI_ION_IF.h:202
opencarp::sf_vec * gdata[NUM_IMP_DATA_TYPES]
data used by all IMPs
Definition: MULTI_ION_IF.h:216
void dump_state(char *, float, opencarp::mesh_t gid, bool, unsigned int)
GlobalData_t *** ldata
data local to each IMP
Definition: MULTI_ION_IF.h:205
int N_IIF
how many different IIF's
Definition: MULTI_ION_IF.h:211
int * N_Nodes
#nodes for each IMP
Definition: MULTI_ION_IF.h:200
int ** NodeLists
local partitioned node lists for each IMP stored
Definition: MULTI_ION_IF.h:201
IIF_Mask_t * IIFmask
region for each node
Definition: MULTI_ION_IF.h:214
int timer_idx
the timer index received from the timer manager
Definition: physics_types.h:66
FILE_SPEC logger
The logger of the physic, each physic should have one.
Definition: physics_types.h:64
SF::vector< stimulus > stimuli
the electrical stimuli
Definition: electrics.h:263
LAT_detector lat
the activation time detector
Definition: electrics.h:275
grid_t
An electrics grid identifier to distinguish between intra and extra grids.
Definition: electrics.h:257
phie_recovery_data phie_rcv
struct holding helper data for phie recovery
Definition: electrics.h:284
generic_timing_stats IO_stats
Definition: electrics.h:286
void destroy()
Currently we only need to close the file logger.
Definition: electrics.cc:389
gvec_data gvec
datastruct holding global IMP state variable output
Definition: electrics.h:278
elliptic_solver ellip_solver
Solver for the elliptic bidomain equation.
Definition: electrics.h:270
MaterialType mtype[2]
the material types of intra_grid and extra_grid grids.
Definition: electrics.h:261
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:791
parabolic_solver parab_solver
Solver for the parabolic bidomain equation.
Definition: electrics.h:272
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:775
void initialize()
Initialize the Electrics.
Definition: electrics.cc:43
igb_output_manager output_manager
class handling the igb output
Definition: electrics.h:281
int read(bool quiet=false)
Definition: IGBheader.cc:761
void fileptr(gzFile f)
Definition: IGBheader.cc:336
limpet::MULTI_IF * miif
Definition: ionics.h:66
void compute_step()
Definition: ionics.cc:35
void initialize()
Definition: ionics.cc:60
void destroy()
Definition: ionics.cc:52
SF::index_mapping< mesh_int_t > petsc_to_nodal
Definition: electrics.h:220
int check_quiescence(double tm, double dt)
check for quiescence
Definition: electrics.cc:1731
void output_initial_activations()
output one nodal vector of initial activation time
Definition: electrics.cc:1840
void init(sf_vec &vm, sf_vec &phie, int offset, enum physic_t=elec_phys)
initializes all datastructs after electric solver setup
Definition: electrics.cc:1556
int check_acts(double tm)
check activations at sim time tm
Definition: electrics.cc:1676
SF::vector< Activation > acts
Definition: electrics.h:219
LAT_detector()
constructor, sets up basic datastructs from global_params
Definition: electrics.cc:1428
SF::vector< stimulus > stimuli
the electrical stimuli
Definition: electrics.h:390
elliptic_solver ellip_solver
Solver for the elliptic bidomain equation.
Definition: electrics.h:393
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:2254
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:2264
MaterialType mtype[2]
the material types of intra_grid and extra_grid grids.
Definition: electrics.h:388
igb_output_manager output_manager
class handling the igb output
Definition: electrics.h:395
manager for dirichlet boundary conditions
Definition: stimulate.h:193
void enforce_dbc_rhs(sf_vec &rhs)
Definition: stimulate.cc:657
void recompute_dbcs()
recompute the dbc data.
Definition: stimulate.cc:581
bool dbc_update()
check if dbcs have updated
Definition: stimulate.cc:620
sf_mat * phie_mat
lhs matrix to solve elliptic
Definition: electrics.h:54
void rebuild_stiffness(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:978
void rebuild_matrices(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:966
lin_solver_stats stats
Definition: electrics.h:60
sf_vec * phie_i
phi_e on intracellular grid
Definition: electrics.h:49
void solve(sf_mat &Ki, sf_vec &Vmv, sf_vec &tmp_i)
Definition: electrics.cc:1083
sf_vec * phie
phi_e
Definition: electrics.h:48
sf_sol * lin_solver
petsc or ginkgo lin_solver
Definition: electrics.h:57
sf_mat * mass_e
mass matrix for RHS elliptic calc
Definition: electrics.h:53
double tol
CG stopping tolerance.
Definition: electrics.h:67
sf_vec * currtmp
temp vector for phiesrc
Definition: electrics.h:51
dbc_manager * dbc
dbcs require a dbc manager
Definition: electrics.h:63
int max_it
maximum number of iterations
Definition: electrics.h:68
sf_vec * phiesrc
I_e.
Definition: electrics.h:50
void rebuild_mass(FILE_SPEC logger)
Definition: electrics.cc:1039
void write_data()
write registered data to disk
Definition: sim_utils.cc:1480
void register_output_sync(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Definition: sim_utils.cc:1316
void close_files_and_cleanup()
close file descriptors
Definition: sim_utils.cc:1527
void register_output(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Register a data vector for output.
Definition: sim_utils.cc:1447
sf_vec * Ivol
global Vm vector
Definition: electrics.h:107
double tol
CG stopping tolerance.
Definition: electrics.h:132
sf_vec * Iact
global Vm vector
Definition: electrics.h:108
sf_vec * Diff_term
Diffusion current.
Definition: electrics.h:123
sf_mat * rhs_parab
rhs matrix to solve parabolic
Definition: electrics.h:119
sf_vec * kappa_i
scaling vector for intracellular mass matrix, M
Definition: electrics.h:111
lin_solver_stats stats
Definition: electrics.h:129
void rebuild_matrices(MaterialType *mtype, limpet::MULTI_IF &miif, FILE_SPEC logger)
Definition: electrics.cc:1208
parabolic_t parab_tech
manner in which parabolic equations are solved
Definition: electrics.h:134
void solve(sf_vec &phie_i)
Definition: electrics.cc:1321
sf_vec * inv_mass_diag
inverse diagonal of mass matrix, for EXPLICIT solving
Definition: electrics.h:115
sf_mat * mass_i
lumped for parabolic problem
Definition: electrics.h:118
sf_vec * Ic
global Vm vector
Definition: electrics.h:106
sf_vec * tmp_i2
scratch vector for i-grid
Definition: electrics.h:113
int max_it
maximum number of iterations
Definition: electrics.h:133
sf_vec * tmp_i1
scratch vector for i-grid
Definition: electrics.h:112
sf_mat * lhs_parab
lhs matrix (CN) to solve parabolic
Definition: electrics.h:120
sf_vec * Vmv
global Vm vector
Definition: electrics.h:104
sf_vec * Irhs
weighted transmembrane currents
Definition: electrics.h:114
sf_vec * old_vm
older Vm needed for 2nd order dT
Definition: electrics.h:110
sf_sol * lin_solver
petsc or ginkgo lin_solver
Definition: electrics.h:126
sf_vec * IIon
ionic currents
Definition: electrics.h:103
SF::vector< mesh_int_t > vertices
Definition: stimulate.h:152
bool total_current
whether we apply total current scaling
Definition: stimulate.h:140
stim_t type
type of stimulus
Definition: stimulate.h:136
int timer_id
timer for stimulus
Definition: stimulate.h:121
waveform_t wform
wave form of stimulus
Definition: stimulate.h:94
double strength
strength of stimulus
Definition: stimulate.h:92
stim_protocol ptcl
applied stimulation protocol used
Definition: stimulate.h:166
int idx
index in global input stimulus array
Definition: stimulate.h:163
stim_electrode electrode
electrode geometry
Definition: stimulate.h:168
stim_pulse pulse
stimulus wave form
Definition: stimulate.h:165
void translate(int id)
convert legacy definitions to new format
Definition: stimulate.cc:100
void setup(int idx)
Setup from a param stimulus index.
Definition: stimulate.cc:161
stim_physics phys
physics of stimulus
Definition: stimulate.h:167
bool value(double &v) const
Get the current value if the stimulus is active.
Definition: stimulate.cc:430
long d_time
current time instance index
Definition: timer_utils.h:77
double time_step
global reference time step
Definition: timer_utils.h:78
int add_eq_timer(double istart, double iend, int ntrig, double iintv, double idur, const char *iname, const char *poolname=nullptr)
Add a equidistant step timer to the array of timers.
Definition: timer_utils.cc:78
int add_singlestep_timer(double tg, double idur, const char *iname, const char *poolname=nullptr)
Definition: timer_utils.h:143
long d_end
final index in multiples of dt
Definition: timer_utils.h:82
std::vector< base_timer * > timers
vector containing individual timers
Definition: timer_utils.h:84
double time
current time
Definition: timer_utils.h:76
Tissue level electrics, main Electrics physics class.
#define DUMP_IC
Definition: electrics.h:39
#define DUMP_IACT
Definition: electrics.h:41
#define DUMP_IVOL
Definition: electrics.h:40
void init_solver(SF::abstract_linear_solver< T, S > **sol)
Definition: SF_init.h:220
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void read_points(const std::string basename, const MPI_Comm comm, vector< S > &pts, vector< T > &ptsidx)
Read the points and insert them into a list of meshes.
Definition: SF_mesh_io.h:844
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
void assemble_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
Generalized matrix assembly.
int max_nodal_edgecount(const meshdata< T, S > &mesh)
Compute the maximum number of node-to-node edges for a mesh.
Definition: SF_container.h:608
void local_petsc_to_nodal_mapping(const meshdata< T, S > &mesh, index_mapping< T > &petsc_to_nodal)
T local_nodal_to_local_petsc(const meshdata< T, S > &mesh, int rank, T local_nodal)
size_t root_count_ascii_lines(std::string file, MPI_Comm comm)
count the lines in a ascii file
void assemble_lumped_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
bool is_init(const abstract_vector< T, S > *v)
void layout_from_count(const T count, vector< T > &layout, MPI_Comm comm)
Definition: SF_network.h:201
void init_vector(SF::abstract_vector< T, S > **vec)
Definition: SF_init.h:99
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
void init_matrix(SF::abstract_matrix< T, S > **mat)
Definition: SF_init.h:199
@ NBR_PETSC
PETSc numbering of nodes.
Definition: SF_container.h:203
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:201
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:202
void dup_IMP_node_state(IonIfBase &IF, int from, int to, GlobalData_t **localdata)
void dump_trace(MULTI_IF *MIIF, limpet::Real time)
void open_trace(MULTI_IF *MIIF, int n_traceNodes, int *traceNodes, int *label, opencarp::sf_mesh *imesh)
Set up ionic model traces at some global node numbers.
timer_manager * tm_manager
a manager for the various physics timers
Definition: main.cc:58
bool using_legacy_stimuli
flag storing whether legacy stimuli are used
Definition: main.cc:64
void get_kappa(sf_vec &kappa, IMPregion *ir, limpet::MULTI_IF &miif, double k)
compute the vector
Definition: electrics.cc:833
physic_t
Identifier for the different physics we want to set up.
Definition: physics_types.h:51
int stimidx_from_timeridx(const SF::vector< stimulus > &stimuli, const int timer_id)
determine link between timer and stimulus
Definition: electrics.cc:805
@ iotm_chkpt_list
Definition: timer_utils.h:44
@ iotm_console
Definition: timer_utils.h:44
@ iotm_spacedt
Definition: timer_utils.h:44
@ iotm_trace
Definition: timer_utils.h:44
@ iotm_chkpt_intv
Definition: timer_utils.h:44
sf_vec * get_data(datavec_t d)
Retrieve a petsc data vector from the data registry.
Definition: sim_utils.cc:880
SF::scattering * get_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Get a scattering from the global scatter registry.
void set_cond_type(MaterialType &m, cond_t type)
Definition: electrics.cc:857
void sample_wave_form(stim_pulse &sp, int idx)
sample a signal given in analytic form
Definition: stimulate.cc:330
SF_real get_volume_from_nodes(sf_mat &mass, SF::vector< mesh_int_t > &local_idx)
Definition: fem_utils.cc:202
sf_mesh & get_mesh(const mesh_t gt)
Get a mesh by specifying the gridID.
Definition: sf_interface.cc:33
SF::scattering * register_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Register a scattering between to grids, or between algebraic and nodal representation of data on the ...
Definition: sf_interface.cc:65
cond_t
description of electrical tissue properties
Definition: fem_types.h:42
@ sum_cond
Definition: fem_types.h:43
@ intra_cond
Definition: fem_types.h:43
@ para_cond
Definition: fem_types.h:43
void print_act_log(FILE_SPEC logger, const SF::vector< Activation > &acts, int idx)
Definition: electrics.cc:1531
SF::scattering * get_permutation(const int mesh_id, const int perm_id, const int dpn)
Get the PETSC to canonical permutation scattering for a given mesh and number of dpn.
bool is_dbc(stim_t type)
whether stimulus is a dirichlet type. implies boundary conditions on matrix
Definition: stimulate.cc:76
SF::meshdata< mesh_int_t, mesh_real_t > sf_mesh
Definition: sf_interface.h:47
@ constPulse
Definition: stimulate.h:73
void compute_restr_idx_async(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:599
void apply_stim_to_vector(const stimulus &s, sf_vec &vec, bool add)
Definition: electrics.cc:470
void recover_phie_std(sf_vec &vm, phie_recovery_data &rcv)
Definition: electrics.cc:1955
int set_dir(IO_t dest)
Definition: sim_utils.cc:483
@ ACT_THRESH
Definition: electrics.h:172
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:276
T get_global(T in, MPI_Op OP, MPI_Comm comm=PETSC_COMM_WORLD)
Do a global reduction on a variable.
Definition: basics.h:233
V dist(const vec3< V > &p1, const vec3< V > &p2)
Definition: vect.h:114
@ Phi_ex
Definition: stimulate.h:77
@ Vm_clmp
Definition: stimulate.h:77
const char * get_mesh_type_name(mesh_t t)
get a char* to the name of a mesh type
Definition: sf_interface.cc:46
void region_mask(mesh_t meshspec, SF::vector< RegionSpecs > &regspec, SF::vector< int > &regionIDs, bool mask_elem, const char *reglist)
classify elements/points as belonging to a region
Definition: ionics.cc:362
void init_stim_info(void)
uses potential for stimulation
Definition: stimulate.cc:49
int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
Definition: electrics.cc:1630
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.
Definition: basics.cc:135
void savequit()
save state and quit simulator
Definition: sim_utils.cc:1646
bool have_dbc_stims(const SF::vector< stimulus > &stimuli)
return wheter any stimuli require dirichlet boundary conditions
Definition: electrics.cc:881
SF::scattering * register_permutation(const int mesh_id, const int perm_id, const int dpn)
Register a permutation between two orderings for a mesh.
bool is_current(stim_t type)
uses current as stimulation
Definition: stimulate.cc:71
void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid, SF::vector< mesh_int_t > *&restr, bool async)
Definition: electrics.cc:635
char * get_file_dir(const char *file)
Definition: sim_utils.cc:1275
@ POSTPROC
Definition: sim_utils.h:53
@ CURDIR
Definition: sim_utils.h:53
@ OUTPUT
Definition: sim_utils.h:53
void init_sv_gvec(gvec_data &GVs, limpet::MULTI_IF *miif, sf_vec &tmpl, igb_output_manager &output_manager)
Definition: ionics.cc:566
void assemble_sv_gvec(gvec_data &gvecs, limpet::MULTI_IF *miif)
Definition: ionics.cc:637
void constant_total_stimulus_current(SF::vector< stimulus > &stimuli, sf_mat &mass_i, sf_mat &mass_e, limpet::MULTI_IF *miif, FILE_SPEC logger)
Scales stimulus current to maintain constant total current across affected regions.
Definition: electrics.cc:2272
int postproc_recover_phie()
Definition: electrics.cc:2022
char * dupstr(const char *old_str)
Definition: basics.cc:44
void balance_electrode(elliptic_solver &ellip, SF::vector< stimulus > &stimuli, int balance_from, int balance_to)
Definition: electrics.cc:405
void set_elec_tissue_properties(MaterialType *mtype, Electrics::grid_t g, FILE_SPEC logger)
Fill the RegionSpec of an electrics grid with the associated inputs from the param parameters.
Definition: electrics.cc:108
void compute_restr_idx(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:566
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
Definition: basics.cc:72
mesh_t
The enum identifying the different meshes we might want to load.
Definition: sf_interface.h:58
@ extra_elec_msh
Definition: sf_interface.h:60
@ phie_recv_msh
Definition: sf_interface.h:65
@ intra_elec_msh
Definition: sf_interface.h:59
void get_time(double &tm)
Definition: basics.h:436
bool mesh_is_registered(const mesh_t gt)
check wheter a SF mesh is set
Definition: sf_interface.cc:59
void setup_phie_recovery_data(phie_recovery_data &data)
Definition: electrics.cc:2105
SF::abstract_vector< SF_int, SF_real > sf_vec
Definition: sf_interface.h:49
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:290
Basic_physic * get_physics(physic_t p, bool error_if_missing)
Convinience function to get a physics.
Definition: sim_utils.cc:864
const char * get_tsav_ext(double time)
Definition: electrics.cc:890
SF::abstract_matrix< SF_int, SF_real > sf_mat
Definition: sf_interface.h:51
void compute_IIF(limpet::IonIfBase &pIF, limpet::GlobalData_t **impdata, int n)
Definition: ionics.cc:464
V timing(V &t2, const V &t1)
Definition: basics.h:448
void read_indices(SF::vector< T > &idx, const std::string filename, const hashmap::unordered_map< mesh_int_t, mesh_int_t > &dd_map, MPI_Comm comm)
Read indices from a file.
Definition: fem_utils.h:120
void update_cwd()
save the current working directory to curdir so that we can switch back to it if needed.
Definition: sim_utils.cc:478
void f_close(FILE_SPEC &f)
Close a FILE_SPEC.
Definition: basics.cc:162
@ ElecMat
Definition: fem_types.h:39
vec3< POINT_REAL > Point
Definition: vect.h:93
file_desc * FILE_SPEC
Definition: basics.h:138
#define UM2_to_CM2
convert um^2 to cm^2
Definition: physics_types.h:35
#define PETSC_TO_CANONICAL
Permute algebraic data from PETSC to canonical ordering.
Definition: sf_interface.h:74
#define ALG_TO_NODAL
Scatter algebraic to nodal.
Definition: sf_interface.h:72
#define ELEM_PETSC_TO_CANONICAL
Permute algebraic element data from PETSC to canonical ordering.
Definition: sf_interface.h:76
#define DATAOUT_SURF
Definition: sim_utils.h:58
#define BIDOMAIN
Definition: sim_utils.h:144
#define DATAOUT_VOL
Definition: sim_utils.h:59
#define MONODOMAIN
Definition: sim_utils.h:143
#define EXP_POSTPROCESS
Definition: sim_utils.h:161
#define DATAOUT_NONE
Definition: sim_utils.h:57
#define PSEUDO_BIDM
Definition: sim_utils.h:145
#define DATAOUT_VTX
Definition: sim_utils.h:60
Electrical stimulation functions.
SF_int niter
number of iterations
SF_int reason
number of iterations
std::string name
the solver name
virtual void setup_solver(abstract_matrix< T, S > &mat, double tol, int max_it, short norm, std::string name, bool has_nullspace, void *logger, const char *solver_opts_file, const char *default_opts)=0
event detection data structures
Definition: electrics.h:175
description of materal properties in a mesh
Definition: fem_types.h:110
SF::vector< RegionSpecs > regions
array with region params
Definition: fem_types.h:115
SF::vector< double > el_scale
optionally provided per-element params scale
Definition: fem_types.h:116
region based variations of arbitrary material parameters
Definition: fem_types.h:93
physMaterial * material
material parameter description
Definition: fem_types.h:98
int nsubregs
#subregions forming this region
Definition: fem_types.h:96
int * subregtags
FEM tags forming this region.
Definition: fem_types.h:97
char * regname
name of region
Definition: fem_types.h:94
int regID
region ID
Definition: fem_types.h:95
bool activated
flag sentinel activation
Definition: electrics.h:199
int ID
ID of LAT detector used as sentinel.
Definition: electrics.h:203
double t_start
start of observation window
Definition: electrics.h:200
double t_window
duration of observation window
Definition: electrics.h:201
double t_quiesc
measure current duration of quiescence
Definition: electrics.h:202
double ExVal[3]
extracellular conductivity eigenvalues
Definition: fem_types.h:62
cond_t g
rule to build conductivity tensor
Definition: fem_types.h:64
double InVal[3]
intracellular conductivity eigenvalues
Definition: fem_types.h:61
double BathVal[3]
bath conductivity eigenvalues
Definition: fem_types.h:63
File descriptor struct.
Definition: basics.h:133
void log_stats(double tm, bool cflg)
Definition: timers.cc:93
void init_logger(const char *filename)
Definition: timers.cc:77
int calls
# calls for this interval, this is incremented externally
Definition: timers.h:70
double tot_time
total time, this is incremented externally
Definition: timers.h:72
void init_logger(const char *filename)
Definition: timers.cc:11
void log_stats(double tm, bool cflg)
Definition: timers.cc:27
void update_iter(const int curiter)
Definition: timers.cc:69
double slvtime
total solver time
Definition: timers.h:21
sf_vec * phie_rec
The phie recovery output vector buffer.
Definition: electrics.h:242
SF::vector< mesh_real_t > pts
The phie recovery locations.
Definition: electrics.h:241
physMat_t material_type
ID of physics material.
Definition: fem_types.h:53