openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
electrics.cc
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
26 #include "electrics.h"
27 #include "petsc_utils.h"
28 #include "timers.h"
29 #include "stimulate.h"
30 #include "electric_integrators.h"
31 
32 #include "SF_init.h" // for SF::init_xxx()
33 
34 #ifdef WITH_CALIPER
35 #include "caliper/cali.h"
36 #else
37 #include "caliper_hooks.h"
38 #endif
39 
40 
41 namespace opencarp {
42 
44 {
46  double t1, t2;
47  get_time(t1);
48 
49  set_dir(OUTPUT);
50 
51  // open logger
52  logger = f_open("electrics.log", param_globals::experiment != 4 ? "w" : "r");
53 
54  // setup mappings between extra and intra grids, algebraic and nodal,
55  // and between PETSc and canonical orderings
56  setup_mappings();
57 
58  // the ionic physics is currently triggered from inside the Electrics to have tighter
59  // control over it
60  ion.logger = logger;
61  ion.initialize();
62 
63  // set up Intracellular tissue
65  region_mask(intra_elec_msh, mtype[intra_grid].regions, mtype[intra_grid].regionIDs, true, "gregion_i");
66 
67  if (param_globals::bidomain || param_globals::extracell_monodomain_stim) {
68  // set up Extracellular tissue
70  region_mask(extra_elec_msh, mtype[extra_grid].regions, mtype[extra_grid].regionIDs, true, "gregion_e");
71  }
72 
73  // add electrics timer for time stepping, add to time stepper tool (TS)
74  double global_time = user_globals::tm_manager->time;
75  timer_idx = user_globals::tm_manager->add_eq_timer(global_time, param_globals::tend, 0,
76  param_globals::dt, 0, "elec::ref_dt", "TS");
77 
78  // electrics stimuli setup
79  setup_stimuli();
80 
81  // set up the linear equation systems. this needs to happen after the stimuli have been
82  // set up, since we need boundary condition info
83  setup_solvers();
84 
85  // the next setup steps require the solvers to be set up, since they use the matrices
86  // generated by those
87 
88  // balance electrodes, we may need the extracellular mass matrix
89  balance_electrodes();
90  // total current scaling
92  // initialize the LATs detector
94 
95  // initialize phie recovery data
96  if(strlen(param_globals::phie_rec_ptf) > 0)
98 
99  // prepare the electrics output. we skip it if we do post-processing
100  if(param_globals::experiment != EXP_POSTPROCESS)
101  setup_output();
102 
103  if (param_globals::prepacing_bcl > 0)
104  prepace();
105  this->initialize_time += timing(t2, t1);
106 }
107 
109 {
110  MaterialType *m = mtype+g;
111 
112  // initialize random conductivity fluctuation structure with PrM values
113  m->regions.resize(param_globals::num_gregions);
114 
115  const char* grid_name = g == Electrics::intra_grid ? "intracellular" : "extracellular";
116  log_msg(logger, 0, 0, "Setting up %s tissue properties for %d regions ..", grid_name,
117  param_globals::num_gregions);
118 
119  char buf[64];
120  RegionSpecs* reg = m->regions.data();
121 
122  for (size_t i=0; i<m->regions.size(); i++, reg++) {
123  if(!strcmp(param_globals::gregion[i].name, "")) {
124  snprintf(buf, sizeof buf, ", gregion_%d", int(i));
125  param_globals::gregion[i].name = dupstr(buf);
126  }
127 
128  reg->regname = strdup(param_globals::gregion[i].name);
129  reg->regID = i;
130  reg->nsubregs = param_globals::gregion[i].num_IDs;
131  if(!reg->nsubregs)
132  reg->subregtags = NULL;
133  else {
134  reg->subregtags = new int[reg->nsubregs];
135  for (int j=0;j<reg->nsubregs;j++) {
136  reg->subregtags[j] = param_globals::gregion[i].ID[j];
137  if(reg->subregtags[j]==-1)
138  log_msg(NULL,3,ECHO, "Warning: not all %u IDs provided for gregion[%u]!\n", reg->nsubregs, i);
139  }
140  }
141 
142  // describe material in given region
143  elecMaterial *emat = new elecMaterial();
144  emat->material_type = ElecMat;
145 
146  emat->InVal[0] = param_globals::gregion[i].g_il;
147  emat->InVal[1] = param_globals::gregion[i].g_it;
148  emat->InVal[2] = param_globals::gregion[i].g_in;
149 
150  emat->ExVal[0] = param_globals::gregion[i].g_el;
151  emat->ExVal[1] = param_globals::gregion[i].g_et;
152  emat->ExVal[2] = param_globals::gregion[i].g_en;
153 
154  emat->BathVal[0] = param_globals::gregion[i].g_bath;
155  emat->BathVal[1] = param_globals::gregion[i].g_bath;
156  emat->BathVal[2] = param_globals::gregion[i].g_bath;
157 
158  // convert units from S/m -> mS/um
159  for (int j=0; j<3; j++) {
160  emat->InVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
161  emat->ExVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
162  emat->BathVal[j] *= 1e-3 * param_globals::gregion[i].g_mult;
163  }
164  reg->material = emat;
165  }
166 
167  if((g == Electrics::intra_grid && strlen(param_globals::gi_scale_vec)) ||
168  (g == Electrics::extra_grid && strlen(param_globals::ge_scale_vec)) )
169  {
171  sf_mesh & mesh = get_mesh(mt);
172  const char* file = g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec;
173 
174  size_t num_file_entries = SF::root_count_ascii_lines(file, mesh.comm);
175 
176  if(num_file_entries != mesh.g_numelem)
177  log_msg(0,4,0, "%s warning: number of %s conductivity scaling entries does not match number of elements!",
178  __func__, get_mesh_type_name(mt));
179 
180  // set up parallel element vector and read data
181  sf_vec *escale;
182  SF::init_vector(&escale, get_mesh(mt), 1, sf_vec::elemwise);
183  escale->read_ascii(g == Electrics::intra_grid ? param_globals::gi_scale_vec : param_globals::ge_scale_vec);
184 
185  if(get_size() > 1) {
186  // set up element vector permutation and permute
187  if(get_permutation(mt, ELEM_PETSC_TO_CANONICAL, 1) == NULL) {
189  }
191  sc(*escale, false);
192  }
193 
194  // copy data into SF::vector
195  SF_real* p = escale->ptr();
196  m->el_scale.assign(p, p + escale->lsize());
197  escale->release_ptr(p);
198  }
199 }
200 
201 void Electrics::setup_mappings()
202 {
204  bool intra_exits = mesh_is_registered(intra_elec_msh), extra_exists = mesh_is_registered(extra_elec_msh);
205  assert(intra_exits);
206  const int dpn = 1;
207 
208  // It may be that another physic (e.g. ionic models) has already computed the intracellular mappings,
209  // thus we first test their existence
210  if(get_scattering(intra_elec_msh, ALG_TO_NODAL, dpn) == NULL) {
211  log_msg(logger, 0, 0, "%s: Setting up intracellular algebraic-to-nodal scattering.", __func__);
213  }
215  log_msg(logger, 0, 0, "%s: Setting up intracellular PETSc to canonical permutation.", __func__);
217  }
218 
219  // extracellular mappings
220  if(extra_exists) {
221  log_msg(logger, 0, 0, "%s: Setting up extracellular algebraic-to-nodal scattering.", __func__);
223  log_msg(logger, 0, 0, "%s: Setting up extracellular PETSc to canonical permutation.", __func__);
225  log_msg(logger, 0, 0, "%s: Setting up intra-to-extra scattering.", __func__);
227  }
228 
229  bool check_i2e = false;
230  if(check_i2e && extra_exists) {
231  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
232  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
233  int rank = get_rank();
234 
236 
237  const SF::vector<mesh_int_t> & intra_alg_nod = intra_mesh.pl.algebraic_nodes();
238  const SF::vector<mesh_int_t> & extra_alg_nod = extra_mesh.pl.algebraic_nodes();
239  const SF::vector<mesh_int_t> & extra_petsc_nbr = extra_mesh.get_numbering(SF::NBR_PETSC);
240  const SF::vector<mesh_int_t> & intra_ref_nbr = intra_mesh.get_numbering(SF::NBR_REF);
241  const SF::vector<mesh_int_t> & extra_ref_nbr = extra_mesh.get_numbering(SF::NBR_REF);
242 
243  // TODO(init) : delete these three at the end of this section?
244  sf_vec *intra_testvec; SF::init_vector(&intra_testvec, intra_mesh, 1, sf_vec::algebraic);
245  sf_vec *extra_testvec; SF::init_vector(&extra_testvec, extra_mesh, 1, sf_vec::algebraic);
246  sf_vec *i2e_testvec; SF::init_vector(&i2e_testvec, extra_mesh, 1, sf_vec::algebraic);
247 
248  SF_real* id = intra_testvec->ptr();
249  for(size_t i=0; i<intra_alg_nod.size(); i++) {
250  int lpidx = local_nodal_to_local_petsc(intra_mesh, rank, intra_alg_nod[i]);
251  id[lpidx] = intra_ref_nbr[intra_alg_nod[i]];
252  }
253  intra_testvec->release_ptr(id);
254 
255  SF_real* ed = extra_testvec->ptr();
256  for(size_t i=0; i<extra_alg_nod.size(); i++) {
257  int lpidx = local_nodal_to_local_petsc(extra_mesh, rank, extra_alg_nod[i]);
258  ed[lpidx] = extra_ref_nbr[extra_alg_nod[i]];
259  }
260  extra_testvec->release_ptr(ed);
261 
262  i2e_testvec->set(-1.0);
263  i2e.forward(*intra_testvec, *i2e_testvec);
264 
265  int err = 0;
266  for(size_t i=0; i<extra_alg_nod.size(); i++) {
267  auto id = i2e_testvec->get(i);
268  auto ed = extra_testvec->get(i);
269  if(id > -1 && id != ed)
270  err++;
271  }
272 
273  if(get_global(err, MPI_SUM))
274  log_msg(0,5,0, "Electrics mapping test failed!");
275  else
276  log_msg(0,5,0, "Electrics mapping test succeeded!");
277  }
278 }
279 
281 {
283  double t1, t2;
284  get_time(t1);
285 
286  // if requested, we checkpoint the current state
287  checkpointing();
288 
289  // activation checking
290  const double time = user_globals::tm_manager->time,
291  time_step = user_globals::tm_manager->time_step;
292  lat.check_acts(time);
293  lat.check_quiescence(time, time_step);
294 
295  // I believe that we need to treat the stimuli in two ways:
296  // - Extracellular potential stimuli (this includes ground) affect the
297  // elliptic solver in a more delicate way, as such, there is a dbc_manager
298  // to take care of that.
299  // - Extracellular and Intracellular current stimuli are applied to the rhs vectors
300  // and can be managed by the stimulate() code directly.
301  stimulate_extracellular();
302 
303 if(param_globals::bidomain == BIDOMAIN)
305 
306  clamp_Vm();
307 
308  // compute ionics update
309  ion.compute_step();
310 
311  stimulate_intracellular();
312 
313  // store Vm before parabolic step, the full Ic we compute in the output step
314  if(param_globals::dump_data & DUMP_IC)
316 
317  // solver parabolic system
319 
320  clamp_Vm();
321 
322  if(user_globals::tm_manager->trigger(iotm_console)) {
323  // output lin solver stats
325  if(param_globals::bidomain == BIDOMAIN)
327  }
328  this->compute_time += timing(t2, t1);
329 
330  // since the traces have their own timing, we check for trace dumps in the compute step loop
333 
334 }
335 
337 {
339  double t1, t2;
340  get_time(t1);
341 
342  const double time = user_globals::tm_manager->time,
343  time_step = user_globals::tm_manager->time_step;
344 
345  // for pseudo-bidomain we compute extracellular potential only for output
346  if(param_globals::bidomain == PSEUDO_BIDM) {
349  ellip_solver.stats.log_stats(time, false);
350  }
351 
352  if(param_globals::dump_data & DUMP_IVOL)
354 
355  if(param_globals::bidomain && (param_globals::dump_data & DUMP_IACT)) {
357  }
358 
359  if(param_globals::dump_data & DUMP_IC) {
360  PetscReal *Ic = parab_solver.Ic->ptr(), *Vmv = parab_solver.Vmv->ptr();
361 
362  for(PetscInt i=0; i < parab_solver.Ic->lsize(); i++)
363  Ic[i] = (Ic[i] - Vmv[i]) / (-time_step);
364 
366  }
367 
368  // recover phie
369  if(phie_rcv.pts.size()) {
371  }
372 
375 
376  double curtime = timing(t2, t1);
377  this->output_time += curtime;
378 
379  IO_stats.calls++;
380  IO_stats.tot_time += curtime;
381 
383  IO_stats.log_stats(time, false);
384 }
385 
390 {
392  // output LAT data
394 
395  // close logger
396  f_close(logger);
397 
398  // close output files
400 
401  // destroy ionics
402  ion.destroy();
403 }
404 
405 void balance_electrode(elliptic_solver & ellip, SF::vector<stimulus> & stimuli, int balance_from, int balance_to)
406 {
407  log_msg( NULL, 0, 0, "Balancing stimulus %d with %d %s-wise.",balance_from, balance_to,
408  is_current(stimuli[balance_from].phys.type) ? "current" : "voltage" );
409 
410  stimulus & from = stimuli[balance_from];
411  stimulus & to = stimuli[balance_to];
412 
413  to.pulse = from.pulse;
414  to.ptcl = from.ptcl;
415  to.phys = from.phys;
416  to.pulse.strength *= -1.0;
417 
418  if (from.phys.type == I_ex)
419  {
420  // if from is total current, skip volume based adjustment of strength
421  // otherwise, calling constant_total_stimulus_current() will undo the balanced scaling of to.pulse.strength
422  // constant_total_stimulus_current() will do the scaling based on the volume
423  if (!from.phys.total_current) {
424  sf_mat& mass = *ellip.mass_e;
425  SF_real vol0 = get_volume_from_nodes(mass, from.electrode.vertices);
427 
428  to.pulse.strength *= fabs(vol0 / vol1);
429  }
430  }
431 }
432 
433 void Electrics::balance_electrodes()
434 {
435  for(int i=0; i<param_globals::num_stim; i++) {
436  if(param_globals::stim[i].crct.balance != -1) {
437  int from = param_globals::stim[i].crct.balance;
438  int to = i;
439 
440  balance_electrode(this->ellip_solver, stimuli, from, to);
441  }
442  }
443 }
444 
445 void Electrics::setup_stimuli()
446 {
447  // initialize basic stim info data (used units, supported types, etc)
448  init_stim_info();
449 
450  stimuli.resize(param_globals::num_stim);
451  for(int i=0; i<param_globals::num_stim; i++)
452  {
453  // construct new stimulus
454  stimulus & s = stimuli[i];
455 
457  s.translate(i);
458 
459  s.setup(i);
460 
461  if (s.electrode.dump_vtx)
462  s.dump_vtx_file(i);
463 
464  if(param_globals::stim[i].pulse.dumpTrace && get_rank() == 0) {
465  set_dir(OUTPUT);
466  s.pulse.wave.write_trace(s.name+".trc");
467  }
468  }
469 }
470 
471 void apply_stim_to_vector(const stimulus & s, sf_vec & vec, bool add)
472 {
473  double val; s.value(val);
474  const SF::vector<mesh_int_t> & idx = s.electrode.vertices;
475  const int rank = get_rank();
476  SF::vector<mesh_int_t> local_idx = idx;
477  for (size_t i = 0; i < idx.size(); i++) {
478  local_idx[i] = local_nodal_to_local_petsc(*vec.mesh, rank, idx[i]);
479  }
480  vec.set(local_idx, val, add, true);
481 }
482 
483 void Electrics::stimulate_intracellular()
484 {
485  parabolic_solver & ps = parab_solver;
486 
487  // iterate over stimuli
488  for(stimulus & s : stimuli) {
489  if(s.is_active()) {
490  // for active stimuli, deal with the stimuli-type specific stimulus application
491  switch(s.phys.type)
492  {
493  case I_tm: {
494  if(param_globals::operator_splitting) {
495  apply_stim_to_vector(s, *ps.Vmv, true);
496  }
497  else {
498  SF_real Cm = 1.0;
499  timer_manager & tm = *user_globals::tm_manager;
500  SF_real sc = tm.time_step / Cm;
501 
502  ps.Irhs->set(0.0);
503  apply_stim_to_vector(s, *ps.Irhs, true);
504 
505  *ps.tmp_i1 = *ps.IIon;
506  *ps.tmp_i1 -= *ps.Irhs;
507  *ps.tmp_i1 *= sc; // tmp_i1 = sc * (IIon - Irhs)
508 
509  // add ionic, transmembrane and intracellular currents to rhs
510  if(param_globals::parab_solve != parabolic_solver::EXPLICIT)
511  ps.mass_i->mult(*ps.tmp_i1, *ps.Irhs);
512  else
513  *ps.Irhs = *ps.tmp_i1;
514  }
515  break;
516  }
517 
518  case Illum: {
519  sf_vec* illum_vec = ion.miif->gdata[limpet::illum];
520 
521  if(illum_vec == NULL) {
522  log_msg(0,5,0, "Cannot apply illumination stim: global vector not present!");
523  EXIT(EXIT_FAILURE);
524  } else {
525  apply_stim_to_vector(s, *illum_vec, false);
526  }
527 
528  break;
529  }
530 
531  default: break;
532  }
533  }
534  }
535 }
536 
537 void Electrics::clamp_Vm() {
538  for(stimulus & s : stimuli) {
539  if(s.phys.type == Vm_clmp && s.is_active())
541  }
542 }
543 
544 void Electrics::stimulate_extracellular()
545 {
546  if(param_globals::bidomain) {
547  // we check if the DBC layout changed, if so we recompute the matrix and the dbc_manager
548  bool dbcs_have_updated = ellip_solver.dbc != nullptr && ellip_solver.dbc->dbc_update();
550 
551  if(dbcs_have_updated && time_not_final)
553 
554  ellip_solver.phiesrc->set(0.0);
555 
556  for(const stimulus & s : stimuli) {
557  if(s.is_active() && s.phys.type == I_ex)
559  }
560  }
561 }
562 
564  SF::vector<mesh_int_t> & inp_idx,
566 {
567  int mpi_rank = get_rank(), mpi_size = get_size();
568  const SF::vector<mesh_int_t> & layout = mesh.pl.algebraic_layout();
569 
570  SF::vector<mesh_int_t> sndbuff;
571 
572  size_t buffsize = 0;
573  idx.resize(0);
574 
575  for(int pid=0; pid < mpi_size; pid++) {
576  if(mpi_rank == pid) {
577  sndbuff = inp_idx;
578  buffsize = sndbuff.size();
579  }
580 
581  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
582  sndbuff.resize(buffsize);
583  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
584 
585  mesh_int_t start = layout[mpi_rank], stop = layout[mpi_rank+1];
586 
587  for(mesh_int_t i : sndbuff) {
588  if(i >= start && i < stop)
589  idx.push_back(i - start);
590  }
591  }
592 
593  binary_sort(idx); unique_resize(idx);
594 }
595 
597  SF::vector<mesh_int_t> & inp_idx,
599 {
600  int mpi_rank = get_rank(), mpi_size = get_size();
601  const SF::vector<mesh_int_t> & alg_nod = mesh.pl.algebraic_nodes();
603 
605  for(mesh_int_t ii : alg_nod)
606  amap[nbr[ii]] = ii;
607 
608  SF::vector<mesh_int_t> sndbuff;
609  size_t buffsize = 0;
610  idx.resize(0);
611 
612  for(int pid=0; pid < mpi_size; pid++) {
613  if(mpi_rank == pid) {
614  sndbuff = inp_idx;
615  buffsize = sndbuff.size();
616  }
617 
618  MPI_Bcast(&buffsize, sizeof(size_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
619  sndbuff.resize(buffsize);
620  MPI_Bcast(sndbuff.data(), buffsize*sizeof(mesh_int_t), MPI_BYTE, pid, PETSC_COMM_WORLD);
621 
622  for(mesh_int_t i : sndbuff) {
623  if(amap.count(i))
624  idx.push_back(amap[i]);
625  }
626  }
627 
628  binary_sort(idx); unique_resize(idx);
629 }
630 
631 void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid,
632  SF::vector<mesh_int_t>* & restr, bool async)
633 {
634  sf_mesh & mesh = get_mesh(grid);
635 
636  switch(dataout) {
637 
638  case DATAOUT_SURF: {
639  sf_mesh surfmesh;
640  compute_surface_mesh(mesh, SF::NBR_SUBMESH, surfmesh);
641 
642  SF::vector<mesh_int_t> idxbuff(surfmesh.con);
643  binary_sort(idxbuff); unique_resize(idxbuff);
644 
645  restr = new SF::vector<mesh_int_t>();
646 
647  // for sync output, we need restr to hold the local indices in the petsc vectors
648  // that have been permuted to canonical numbering. For async, we need the
649  // non-overlapping decomposition of indices in NBR_SUBMESH numbering. The petsc indices will be
650  // computed at a later stage. The only reason we need to call compute_restr_idx_async,
651  // is that surface nodes in NBR_SUBMESH, may
652  // reside on partitions where they are not part of the algebraic nodes. thus we need to
653  // recommunicate to make sure the data layout is correct. We do not have this problem for
654  // DATAOUT_VTX.
655  if(!async)
656  compute_restr_idx(mesh, idxbuff, *restr);
657  else
658  compute_restr_idx_async(mesh, idxbuff, *restr);
659 
660  break;
661  }
662 
663  case DATAOUT_VTX: {
664  SF::vector<mesh_int_t> idxbuff;
665 
666  update_cwd();
667 
668  set_dir(INPUT);
669  read_indices(idxbuff, dataout_vtx, mesh, SF::NBR_REF, true, PETSC_COMM_WORLD);
670  set_dir(CURDIR);
671 
672  restr = new SF::vector<mesh_int_t>();
673 
674  if(!async) {
676  for(mesh_int_t & i : idxbuff) i = nbr[i];
677 
678  compute_restr_idx(mesh, idxbuff, *restr);
679  } else {
680  *restr = idxbuff;
681  }
682 
683  break;
684  }
685 
686  case DATAOUT_NONE:
687  case DATAOUT_VOL:
688  default: break;
689  }
690 }
691 
692 void Electrics::setup_output()
693 {
694  int rank = get_rank();
695  SF::vector<mesh_int_t>* restr_i = NULL;
696  SF::vector<mesh_int_t>* restr_e = NULL;
697  set_dir(OUTPUT);
698 
699  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
700  restr_i, param_globals::num_io_nodes > 0);
701 
702  if(param_globals::dataout_i)
703  output_manager.register_output(parab_solver.Vmv, intra_elec_msh, 1, param_globals::vofile, "mV", restr_i);
704 
705  if(param_globals::bidomain) {
706  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
707  restr_e, param_globals::num_io_nodes > 0);
708 
709  if(param_globals::dataout_i)
710  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
711  if(param_globals::dataout_e)
712  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
713  }
714 
715  if(param_globals::dump_data & DUMP_IC) {
716  output_manager.register_output(parab_solver.Ic, intra_elec_msh, 1, "Ic.igb", "uA/cm^2", restr_i);
717  output_manager.register_output(parab_solver.IIon, intra_elec_msh, 1, "Iion.igb","uA/cm^2", restr_i);
718  }
719 
720  if(param_globals::dump_data & DUMP_IVOL)
721  output_manager.register_output(parab_solver.Ivol, intra_elec_msh, 1, "Ivol.igb", "uA", restr_i);
722  if(param_globals::dump_data & DUMP_IACT)
723  output_manager.register_output(parab_solver.Iact, intra_elec_msh, 1, "Iact.igb", "uA", restr_i);
724 
725  if(phie_rcv.pts.size())
726  output_manager.register_output_sync(phie_rcv.phie_rec, phie_recv_msh, 1, param_globals::phie_recovery_file, "mV");
727 
729 
730  if(param_globals::num_trace) {
731  sf_mesh & imesh = get_mesh(intra_elec_msh);
732  open_trace(ion.miif, param_globals::num_trace, param_globals::trace_node, NULL, &imesh);
733  }
734 
735  // initialize generic logger for IO timings per time_dt
736  IO_stats.init_logger("IO_stats.dat");
737 }
738 
739 void Electrics::dump_matrices()
740 {
741  std::string bsname = param_globals::dump_basename;
742  std::string fn;
743 
744  set_dir(OUTPUT);
745 
746  // dump monodomain matrices
747  if ( param_globals::parab_solve==1 ) {
748  // using Crank-Nicolson
749  fn = bsname + "_Ki_CN.bin";
750  parab_solver.lhs_parab->write(fn.c_str());
751  }
752  fn = bsname + "_Ki.bin";
753  parab_solver.rhs_parab->write(fn.c_str());
754 
755  fn = bsname + "_Mi.bin";
756  parab_solver.mass_i->write(fn.c_str());
757 
758  if ( param_globals::bidomain ) {
759  fn = bsname + "_Kie.bin";
760  ellip_solver.phie_mat->write(fn.c_str());
761 
762  fn = bsname + "_Me.bin";
763  ellip_solver.mass_e->write(fn.c_str());
764  }
765 }
766 
767 
770 double Electrics::timer_val(const int timer_id)
771 {
772  // determine
773  int sidx = stimidx_from_timeridx(stimuli, timer_id);
774  double val = 0.0;
775  if(sidx != -1) {
776  stimuli[sidx].value(val);
777  }
778  else
779  val = std::nan("NaN");
780 
781  return val;
782 }
783 
786 std::string Electrics::timer_unit(const int timer_id)
787 {
788  int sidx = stimidx_from_timeridx(stimuli, timer_id);
789  std::string s_unit;
790 
791  if(sidx != -1)
792  // found a timer-linked stimulus
793  s_unit = stimuli[sidx].pulse.wave.f_unit;
794 
795  return s_unit;
796 }
797 
800 int stimidx_from_timeridx(const SF::vector<stimulus> & stimuli, const int timer_id)
801 {
802  // the only electrical quantities linked to a timer are stimuli
803  // thus we search for timer links only among stimuli for now
804 
805  // iterate over stimuli
806  for(size_t i = 0; i<stimuli.size(); i++)
807  {
808  const stimulus & s = stimuli[i];
809 
810  if(s.ptcl.timer_id == timer_id)
811  return s.idx;
812  }
813 
814  // invalid timer index not linked to any stimulus
815  return -1;
816 }
817 
828 void get_kappa(sf_vec & kappa, IMPregion *ir, limpet::MULTI_IF & miif, double k)
829 {
830  double* reg_kappa = new double[miif.N_IIF];
831 
832  for(int i=0; i<miif.N_IIF; i++)
833  reg_kappa[i] = k * miif.IIF[i]->cgeom().SVratio * ir[i].volFrac;
834 
835  double *kd = kappa.ptr();
836 
837  for(int i = 0; i < miif.numNode; i++)
838  kd[i] = reg_kappa[(int) miif.IIFmask[i]];
839 
840  kappa.release_ptr(kd);
841  delete [] reg_kappa;
842 }
843 
844 
853 {
854  for(size_t i=0; i < m.regions.size(); i++) {
855  elecMaterial *emat = static_cast<elecMaterial*>(m.regions[i].material);
856  emat->g = type;
857  }
858 }
859 
860 void Electrics::setup_solvers()
861 {
862  set_dir(OUTPUT);
863  parab_solver.init();
865 
866  if (param_globals::bidomain) {
867  ellip_solver.init();
869  }
870 
871  if(param_globals::dump2MatLab)
872  dump_matrices();
873 }
874 
876 {
877  for(const stimulus & s : stimuli) {
878  if(is_dbc(s.phys.type))
879  return true;
880  }
881  return false;
882 }
883 
884 const char* get_tsav_ext(double time)
885 {
886  int min_idx = -1;
887  double min_diff = 1e100;
888 
889  for(int i=0; i<param_globals::num_tsav; i++)
890  {
891  double diff = fabs(param_globals::tsav[i] - time);
892  if(min_diff > diff) {
893  min_diff = diff;
894  min_idx = i;
895  }
896  }
897 
898  if(min_idx == -1)
899  min_idx = 0;
900 
901  return param_globals::tsav_ext[min_idx];
902 }
903 
904 void Electrics::checkpointing()
905 {
906  const timer_manager & tm = *user_globals::tm_manager;
907 
908  // regular user selected state save
909  if (tm.trigger(iotm_chkpt_list)) {
910  char save_fnm[1024];
911  const char* tsav_ext = get_tsav_ext(tm.time);
912 
913  snprintf(save_fnm, sizeof save_fnm, "%s.%s.roe", param_globals::write_statef, tsav_ext);
914 
915  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
916  }
917 
918  // checkpointing based on interval
919  if (tm.trigger(iotm_chkpt_intv)) {
920  char save_fnm[1024];
921  snprintf(save_fnm, sizeof save_fnm, "checkpoint.%.1f.roe", tm.time);
922  ion.miif->dump_state(save_fnm, tm.time, intra_elec_msh, false, GIT_COMMIT_COUNT);
923  }
924 }
925 
927 {
929  double t0, t1, dur;
930  get_time(t0);
931  stats.init_logger("ell_stats.dat");
932 
933  // here we can differentiate the solvers
935  sf_mesh & extra_mesh = get_mesh(extra_elec_msh);
936  sf_vec::ltype alg_type = sf_vec::algebraic;
937  const int dpn = 1;
938 
939  SF::init_vector(&phie, extra_mesh, dpn, alg_type);
940  SF::init_vector(&phiesrc, extra_mesh, dpn, alg_type);
941  SF::init_vector(&currtmp, extra_mesh, dpn, alg_type);
942 
944  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
945  SF::init_vector(&phie_i, intra_mesh, dpn, alg_type);
946  }
947 
948  int max_row_entries = max_nodal_edgecount(extra_mesh);
949 
952 
953  // alloc stiffness matrix
954  phie_mat->init(extra_mesh, dpn, dpn, max_row_entries);
955  // alloc mass matrix
956  mass_e ->init(extra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
957  dur = timing(t1, t0);
958 }
959 
961  SF::vector<stimulus> & stimuli,
962  FILE_SPEC logger)
963 {
965  double t0, t1, dur;
966  get_time(t0);
967  rebuild_stiffness(mtype, stimuli, logger);
968  rebuild_mass(logger);
969  dur = timing(t1, t0);
970 }
971 
973  SF::vector<stimulus> & stimuli,
974  FILE_SPEC logger)
975 {
977  double t0, t1, dur;
978  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
979 
980  MaterialType & mt = mtype[Electrics::extra_grid];
981  const bool have_dbc = have_dbc_stims(stimuli);
982 
983  cond_t condType = sum_cond;
984  set_cond_type(mt, condType);
985 
986  // get mesh reference
987  sf_mesh & mesh = get_mesh(extra_elec_msh);
988 
989  get_time(t0);
990 
991  // fill the system
992  elec_stiffness_integrator stfn_integ(mt);
993 
994  phie_mat->zero();
995  SF::assemble_matrix(*phie_mat, mesh, stfn_integ);
996  phie_mat->scale(-1.0);
997 
998  dur = timing(t1,t0);
999  log_msg(logger,0,log_flag, "Computed ellipitc stiffness matrix in %.3f seconds.", dur);
1000 
1001  // set boundary conditions
1002  if(have_dbc) {
1003  log_msg(logger,0,log_flag, "Elliptic lhs matrix enforcing Dirichlet boundaries.");
1004  get_time(t0);
1005 
1006  if(dbc == nullptr)
1007  dbc = new dbc_manager(*phie_mat, stimuli);
1008  else
1009  dbc->recompute_dbcs();
1010 
1011  dbc->enforce_dbc_lhs();
1012 
1013  dur = timing(t1,t0);
1014  log_msg(logger,0,log_flag, "Elliptic lhs matrix Dirichlet enforcing done in %.3f seconds.", dur);
1015  }
1016  else {
1017  log_msg(logger,1,ECHO, "Elliptic lhs matrix is singular!");
1018  // we are dealing with a singular system
1019  phie_mat_has_nullspace = true;
1020  }
1021 
1022  // solver has not been initialized yet
1023  set_dir(INPUT);
1024  get_time(t0);
1025 
1026  setup_linear_solver(logger);
1027 
1028  dur = timing(t1,t0);
1029  log_msg(logger,0,log_flag, "Initializing elliptic solver in %.5f seconds.", dur);
1030  set_dir(OUTPUT);
1031 }
1032 
1034 {
1036  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
1037  double t0, t1, dur;
1038  mass_integrator mass_integ;
1039 
1040  // get mesh reference
1041  sf_mesh & mesh = get_mesh(extra_elec_msh);
1042  get_time(t0);
1043  mass_e->zero();
1044 
1045  if(param_globals::mass_lumping) {
1046  SF::assemble_lumped_matrix(*mass_e, mesh, mass_integ);
1047  } else {
1048  SF::assemble_matrix(*mass_e, mesh, mass_integ);
1049  }
1050 
1051  dur = timing(t1,t0);
1052  log_msg(logger,0,log_flag, "Computed elliptic mass matrix in %.3f seconds.", dur);
1053 }
1054 
1055 void elliptic_solver::setup_linear_solver(FILE_SPEC logger)
1056 {
1058 
1059  tol = param_globals::cg_tol_ellip;
1060  max_it = param_globals::cg_maxit_ellip;
1061 
1062  std::string default_opts;
1063  std::string solver_file;
1064  solver_file = param_globals::ellip_options_file;
1065  if (param_globals::flavor == std::string("ginkgo")) {
1066  default_opts = std::string(
1067  R"({
1068  "type": "solver::Cg",
1069  "criteria": [
1070  {
1071  "type": "Iteration",
1072  "max_iters": 100
1073  },
1074  {
1075  "type": "ResidualNorm",
1076  "reduction_factor": 1e-4
1077  }
1078  ],
1079  "preconditioner": {
1080  "type": "solver::Multigrid",
1081  "mg_level": [
1082  {
1083  "type": "multigrid::Pgm",
1084  "deterministic": true
1085  }
1086  ],
1087  "criteria": [
1088  {
1089  "type": "Iteration",
1090  "max_iters": 1
1091  }
1092  ],
1093  "coarsest_solver": {
1094  "type": "preconditioner::Schwarz",
1095  "local_solver": {
1096  "type": "preconditioner::Ilu"
1097  }
1098  },
1099  "max_levels": 10,
1100  "min_coarse_rows": 8,
1101  "default_initial_guess": "zero"
1102  }
1103 })");
1104  } else if (param_globals::flavor == std::string("petsc")) {
1105  default_opts = std::string("-ksp_type cg -pc_type hypre -pc_hypre_type boomeramg -pc_hypre_boomeramg_max_iter 1 -pc_hypre_boomeramg_strong_threshold 0.0 -options_left");
1106  }
1107 
1108  lin_solver->setup_solver(*phie_mat, tol, max_it, param_globals::cg_norm_ellip,
1109  "elliptic PDE", phie_mat_has_nullspace,
1110  logger, solver_file.c_str(), default_opts.c_str());
1111 }
1112 
1113 void elliptic_solver::solve(sf_mat & Ki, sf_vec & Vmv, sf_vec & tmp_i)
1116  double t0,t1;
1118  // assembly of rhs for FE
1119  if (phiesrc->mag() > 0.0) {
1120  mass_e->mult(*phiesrc, *currtmp);
1122  }
1123 
1124  Ki.mult(Vmv, tmp_i);
1125 
1126  bool add = true;
1127  i2e->forward(tmp_i, *phiesrc, add);
1128 
1129  if(dbc != nullptr)
1131 
1132  get_time(t0);
1133  (*lin_solver)(*phie, *phiesrc);
1134 
1135  // treat solver statistics
1136  auto dur = timing(t1, t0);
1137  lin_solver->time += dur;
1138  stats.slvtime += dur;
1140  if(lin_solver->reason < 0) {
1141  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1142  petsc_get_converged_reason_str(lin_solver->reason));
1143  EXIT(1);
1144  }
1146  add = false;
1147  i2e->backward(*phie, *phie_i, add);
1148 }
1149 
1151 {
1153  double t0,t1;
1154 
1155  if(dbc != nullptr)
1157 
1158  get_time(t0);
1159  (*lin_solver)(*phie, *phiesrc);
1160 
1161  // treat solver statistics
1162  auto dur = timing(t1, t0);
1163  lin_solver->time += dur;
1164  stats.slvtime += dur;
1166 
1167  if(lin_solver->reason < 0) {
1168  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1169  petsc_get_converged_reason_str(lin_solver->reason));
1170  EXIT(1);
1171  }
1172 
1173  // phie_i is only set up when we have an IntraMesh registered
1174  if(is_init(phie_i)) {
1175  bool add = false;
1177  i2e->backward(*phie, *phie_i, add);
1178  }
1179 }
1180 
1182 {
1184  double t0, t1, dur;
1185  get_time(t0);
1186  stats.init_logger("par_stats.dat");
1187 
1188  // here we can differentiate the solvers
1190 
1191  sf_vec* vm_ptr = get_data(vm_vec);
1192  sf_vec* iion_ptr = get_data(iion_vec);
1193 
1194  if(!(vm_ptr != NULL && iion_ptr != NULL)) {
1195  log_msg(0,5,0, "%s error: global Vm and Iion vectors not properly set up! Ionics seem invalid! Aborting!",
1196  __func__);
1197  EXIT(1);
1198  }
1199 
1200  SF::init_vector(&Vmv);
1202  Vmv-> shallow_copy(*vm_ptr);
1203  IIon->shallow_copy(*iion_ptr);
1204 
1205  if(param_globals::dump_data & DUMP_IC) SF::init_vector(&Ic , Vmv);
1206  if(param_globals::dump_data & DUMP_IVOL) SF::init_vector(&Ivol, Vmv);
1207  if(param_globals::dump_data & DUMP_IACT) SF::init_vector(&Iact, Vmv);
1208 
1211 
1212  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1213  sf_vec::ltype alg_type = sf_vec::algebraic;
1214 
1215  int dpn = 1;
1216  SF::init_vector(&kappa_i, intra_mesh, dpn, alg_type);
1217  SF::init_vector(&tmp_i1, intra_mesh, dpn, alg_type);
1218  SF::init_vector(&tmp_i2, intra_mesh, dpn, alg_type);
1219  SF::init_vector(&old_vm, intra_mesh, dpn, alg_type);
1220 
1221  if(!param_globals::operator_splitting)
1222  SF::init_vector(&Irhs, intra_mesh, dpn, alg_type);
1223 
1224  // alloc matrices
1225  int max_row_entries = max_nodal_edgecount(intra_mesh);
1226 
1230 
1231  rhs_parab->init(intra_mesh, dpn, dpn, max_row_entries);
1232  mass_i ->init(intra_mesh, dpn, dpn, param_globals::mass_lumping ? 1 : max_row_entries);
1233 
1234  parab_tech = static_cast<parabolic_solver::parabolic_t>(param_globals::parab_solve);
1235  dur = timing(t1, t0);
1236 }
1237 
1239 {
1241  double start, end, period;
1242  get_time(start);
1243  double t0, t1, dur;
1244  mass_integrator mass_integ;
1245  int dpn = 1;
1246 
1247  int log_flag = param_globals::output_level > 1 ? ECHO : 0;
1248  MaterialType & mt = mtype[Electrics::intra_grid];
1249 
1250  double Dt = user_globals::tm_manager->time_step;
1251  get_kappa(*kappa_i, param_globals::imp_region, miif, UM2_to_CM2 / Dt);
1252 
1253  cond_t condType = intra_cond;
1254  sf_mesh & mesh = get_mesh(intra_elec_msh);
1255 
1256  if( (param_globals::bidomain == MONODOMAIN && param_globals::bidm_eqv_mono) ||
1257  (param_globals::bidomain == PSEUDO_BIDM) )
1258  condType = para_cond;
1259 
1260  // set material and conductivity type
1261  set_cond_type(mt, condType);
1262 
1263  // fill the system
1264  {
1265  get_time(t0);
1266 
1267  elec_stiffness_integrator stfn_integ(mt);
1268  SF::assemble_matrix(*rhs_parab, mesh, stfn_integ);
1269 
1270  dur = timing(t1,t0);
1271  log_msg(logger,0,log_flag, "Computed parabolic stiffness matrix in %.3f seconds.", dur);
1272  get_time(t0);
1273 
1274  if(param_globals::mass_lumping)
1275  SF::assemble_lumped_matrix(*mass_i, mesh, mass_integ);
1276  else
1277  SF::assemble_matrix(*mass_i, mesh, mass_integ);
1278 
1279  sf_vec* empty; SF::init_vector(&empty);
1280  mass_i->mult_LR(*kappa_i, *empty);
1281 
1282  dur = timing(t1,t0);
1283  log_msg(logger,0,log_flag, "Computed parabolic mass matrix in %.3f seconds.", dur);
1284  delete empty;
1285  }
1286 
1287  // initialize parab lhs
1288  if(parab_tech != EXPLICIT) {
1290  // if we have mass lumping, then the nonzero pattern between Mi and Ki is different
1291  bool same_nonzero = param_globals::mass_lumping == false;
1292 
1293  if (parab_tech==CN) {
1294  lhs_parab->scale(-param_globals::theta);
1295  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1296  }
1297  else if (parab_tech==O2dT) {
1298  lhs_parab->scale(-0.5);
1299  mass_i->scale(0.5);
1300  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1301  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1302  lhs_parab->add_scaled_matrix(*mass_i, 1.0, same_nonzero);
1303  }
1304  }
1305  else {
1308 
1309  SF_real* p = inv_mass_diag->ptr();
1310 
1311  for(int i=0; i<inv_mass_diag->lsize(); i++)
1312  p[i] = 1.0 / p[i];
1313 
1315  }
1316 
1317  if(parab_tech == CN || parab_tech == O2dT) {
1318  set_dir(INPUT);
1319  get_time(t0);
1320 
1321  setup_linear_solver(logger);
1322 
1323  dur = timing(t1,t0);
1324  log_msg(logger,0,log_flag, "Initializing parabolic solver in %.5f seconds.", dur);
1325  set_dir(OUTPUT);
1326  }
1327  period = timing(end, start);
1328 }
1329 
1330 void parabolic_solver::setup_linear_solver(FILE_SPEC logger)
1331 {
1333  tol = param_globals::cg_tol_parab;
1334  max_it = param_globals::cg_maxit_parab;
1335 
1336  std::string default_opts;
1337  std::string solver_file;
1338  solver_file = param_globals::parab_options_file;
1339  if (param_globals::flavor == std::string("ginkgo")) {
1340  default_opts = std::string(
1341  R"(
1342 {
1343  "type": "solver::Cg",
1344  "criteria": [
1345  {
1346  "type": "Iteration",
1347  "max_iters": 100
1348  },
1349  {
1350  "type": "ResidualNorm",
1351  "reduction_factor": 1e-4
1352  }
1353  ],
1354  "preconditioner": {
1355  "type": "preconditioner::Schwarz",
1356  "local_solver": {
1357  "type": "preconditioner::Ilu"
1358  }
1359  }
1360 }
1361  )");
1362  } else if (param_globals::flavor == std::string("petsc")) {
1363  default_opts = std::string("-pc_type bjacobi -sub_pc_type ilu -ksp_type cg");
1364  }
1365 
1366  lin_solver->setup_solver(*lhs_parab, tol, max_it, param_globals::cg_norm_parab,
1367  "parabolic PDE", false, logger, solver_file.c_str(),
1368  default_opts.c_str());
1369 }
1370 
1371 void parabolic_solver::solve(sf_vec & phie_i)
1372 {
1374  switch (parab_tech) {
1375  case CN: solve_CN(phie_i); break;
1376  case O2dT: solve_O2dT(phie_i); break;
1377  default: solve_EF(phie_i); break;
1378  }
1379 }
1380 
1381 void parabolic_solver::solve_CN(sf_vec & phie_i)
1382 {
1384  double t0,t1;
1385  // assembly of rhs for CN
1386  if (param_globals::bidomain == BIDOMAIN) {
1387  tmp_i1->deep_copy(phie_i);
1388  tmp_i1->add_scaled(*Vmv, 1.0 - param_globals::theta);
1389  rhs_parab->mult(*tmp_i1, *tmp_i2);
1390  }
1391  else {
1392  rhs_parab->mult(*Vmv, *tmp_i2);
1393  *tmp_i2 *= 1.0 - param_globals::theta;
1394  }
1395 
1396  mass_i->mult(*Vmv, *tmp_i1);
1397  *tmp_i1 += *tmp_i2;
1398 
1399  // add current contributions to rhs
1400  if(!param_globals::operator_splitting)
1401  tmp_i1->add_scaled(*Irhs, -1.0);
1402 
1403  get_time(t0);
1404 
1405  (*lin_solver)(*Vmv, *tmp_i1);
1406 
1407  if(lin_solver->reason < 0) {
1408  log_msg(0, 5, 0,"%s solver diverged. Reason: %s.", lin_solver->name.c_str(),
1409  petsc_get_converged_reason_str(lin_solver->reason));
1410  EXIT(1);
1411  }
1412 
1413  // treat solver statistics
1414  auto dur = timing(t1, t0);
1415  lin_solver->time += dur;
1416  stats.slvtime += dur;
1418 }
1419 
1420 void parabolic_solver::solve_O2dT(sf_vec & phie_i)
1421 {
1423  double t0,t1;
1424  // assembly of rhs for FE
1425  if (param_globals::bidomain == BIDOMAIN) {
1426  tmp_i2->deep_copy(phie_i);
1427  tmp_i2->add_scaled(*Vmv, 0.5);
1428  rhs_parab->mult(*tmp_i2, *tmp_i1); // tmp_i1 = K_i(Vm^t * 0.5 + phi_e)
1429  }
1430  else {
1431  rhs_parab->mult(*Vmv, *tmp_i1);
1432  *tmp_i1 *= 0.5; // tmp_i1 = 0.5 * K_i Vm^t
1433  }
1434 
1435  mass_i->mult(*Vmv, *tmp_i2); // tmp_i2 = M/2 Vm^t
1436  tmp_i1->add_scaled(*tmp_i2, 4.0); // tmp_i1 = (2M+K_i/2)Vm^t
1437  mass_i->mult(*old_vm, *tmp_i2); // tmp_i2 = M/2 Vm^{t-1}
1438 
1439  tmp_i1->add_scaled(*tmp_i2, -1.0); // tmp_i1 = (2M+K_i/2)Vm^t-M/2 Vm^{t-1}
1440  *old_vm = *Vmv;
1441 
1442  get_time(t0);
1443 
1444  // solve
1445  (*lin_solver)(*Vmv, *tmp_i1);
1446 
1447  // treat solver statistics
1448  stats.slvtime += timing(t1, t0);
1450 }
1451 
1452 void parabolic_solver::solve_EF(sf_vec & phie_i)
1453 {
1455  double t0,t1,t2;
1456  get_time(t0);
1457 
1458  // assembly of rhs for FE
1459  if (param_globals::bidomain == BIDOMAIN) {
1460  tmp_i2->deep_copy(phie_i);
1461  *tmp_i2 += *Vmv;
1462  rhs_parab->mult(*tmp_i2, *tmp_i1);
1463  }
1464  else {
1465  rhs_parab->mult(*Vmv, *tmp_i1);
1466  }
1467 
1468  *tmp_i1 *= *inv_mass_diag;
1469  Vmv->add_scaled(*tmp_i1, 1.0);
1470 
1471  if(param_globals::operator_splitting == false)
1472  Vmv->add_scaled(*Irhs, -1.0);
1473 
1474  // record rhs timing
1475  stats.slvtime += timing(t1, t0);
1476 }
1477 
1479 {
1480  char* prvSimDir = strlen(param_globals::start_statef) ?
1481  get_file_dir(param_globals::start_statef) : NULL;
1482 
1483  const char* extn = ".dat";
1484 
1485  // if compute_APD we need an extra 2 acts
1486  int addLATs = param_globals::compute_APD ? 2 : 0;
1487 
1488  bool have_sentinel = param_globals::t_sentinel > 0.0;
1489  bool need_to_add_sentinel = have_sentinel && (param_globals::sentinel_ID < 0);
1490 
1491  addLATs += need_to_add_sentinel ? 1 : 0;
1492  acts.resize(param_globals::num_LATs + addLATs);
1493 
1494  int j=0;
1495  for (int i = 0; i < param_globals::num_LATs; i++ )
1496  {
1497  // using Ph only with bidomain runs
1498  if (param_globals::lats[i].method <= 0 || (param_globals::lats[i].measurand == PHIE && !param_globals::bidomain)) {
1499  log_msg(NULL, 3, 0, "Phie-based LAT measurement requires bidomain >=1 Ignoring lats[%d].", i);
1500  continue;
1501  }
1502 
1503  acts[j].method = (ActMethod)param_globals::lats[i].method;
1504  acts[j].threshold = param_globals::lats[i].threshold;
1505  acts[j].mode = param_globals::lats[i].mode;
1506  acts[j].all = param_globals::lats[i].all;
1507  acts[j].measurand = (PotType)param_globals::lats[i].measurand;
1508  acts[j].ID = param_globals::lats[i].ID;
1509  acts[j].fout = NULL;
1510 
1511  if(param_globals::lats[i].all) {
1512  acts[j].fname = (char*) malloc((strlen(param_globals::lats[i].ID)+strlen(extn)+1)*sizeof(char));
1513  snprintf(acts[j].fname, strlen(param_globals::lats[i].ID)+strlen(extn)+1, "%s%s", param_globals::lats[i].ID, extn);
1514  }
1515  else {
1516  char prfx[] = "init_acts_";
1517  int max_len = strlen(prfx) + strlen(param_globals::lats[i].ID) + strlen(extn) + 1;
1518 
1519  acts[j].fname = (char*) malloc(max_len*sizeof(char));
1520  snprintf(acts[j].fname, max_len, "%s%s%s", prfx, param_globals::lats[i].ID, extn);
1521  }
1522 
1523  // restarting
1524  if(prvSimDir != NULL) {
1525  int len_fname = strlen(prvSimDir)+strlen(acts[j].fname)+2;
1526  acts[j].prv_fname = (char*) malloc(len_fname*sizeof(char));
1527  snprintf(acts[j].prv_fname, len_fname, "%s/%s", prvSimDir, acts[j].fname);
1528  }
1529 
1530  j++;
1531  }
1532 
1533  if(param_globals::compute_APD) {
1534  acts[j].method = ACT_THRESH; // threshold crossing
1535  acts[j].threshold = param_globals::actthresh;
1536  acts[j].mode = 0; // upstroke
1537  acts[j].all = true;
1538  acts[j].measurand = VM; // Vm
1539  //acts[j].ID = dupstr("Vm_Activation");
1540  acts[j].fout = NULL;
1541  acts[j].fname = dupstr("vm_activation.dat");
1542 
1543  j++;
1544  acts[j].method = ACT_THRESH; // threshold crossing
1545  acts[j].threshold = param_globals::recovery_thresh;
1546  acts[j].mode = 1; // repol
1547  acts[j].all = true;
1548  acts[j].measurand = VM; // Vm
1549  //(*acts)[j+1].ID = param_globals::lats[i].ID;
1550  acts[j].fout = NULL;
1551  acts[j].fname = dupstr("vm_repolarisation.dat");
1552 
1553  j++;
1554  }
1555 
1556  // set up sentinel for activity checking
1557  sntl.activated = have_sentinel;
1558  sntl.t_start = param_globals::t_sentinel_start;
1559  sntl.t_window = param_globals::t_sentinel;
1560  sntl.t_quiesc =-1.;
1561  sntl.ID = param_globals::sentinel_ID;
1562 
1563  if(need_to_add_sentinel) {
1564  // add a default LAT detector as sentinel
1565  acts[j].method = ACT_THRESH; // threshold crossing
1566  acts[j].threshold = param_globals::actthresh;
1567  acts[j].mode = 0; // upstroke
1568  acts[j].all = true;
1569  acts[j].measurand = VM; // Vm
1570  //(*acts)[j].ID = dupstr("Vm_Activation");
1571  acts[j].fout = NULL;
1572  acts[j].fname = dupstr("vm_sentinel.dat");
1573  // set sentinel index
1574  sntl.ID = j;
1575  j++;
1576  }
1577 
1578  if(prvSimDir) free(prvSimDir);
1579 }
1580 
1581 void print_act_log(FILE_SPEC logger, const SF::vector<Activation> & acts, int idx)
1582 {
1583  const Activation & act = acts[idx];
1584 
1585  log_msg(logger, 0, 0, "\n");
1586  log_msg(logger, 0, 0, "LAT detector [%2d]", idx);
1587  log_msg(logger, 0, 0, "-----------------\n");
1588 
1589  log_msg(logger, 0, 0, "Measurand: %s", act.measurand ? "Phie" : "Vm");
1590  log_msg(logger, 0, 0, "All: %s", act.all ? "All" : "Only first");
1591  log_msg(logger, 0, 0, "Method: %s", act.method==ACT_DT ? "Derivative" : "Threshold crossing");
1592 
1593  char buf[64], gt[2], sgn[2];
1594  snprintf(sgn, sizeof sgn, "%s", act.mode?"-":"+");
1595  snprintf(gt, sizeof gt, "%s", act.mode?"<":">");
1596 
1597  if(act.method==ACT_DT)
1598  snprintf(buf, sizeof buf, "Maximum %sdf/dt %s %.2f mV", sgn, gt, act.threshold);
1599  else
1600  snprintf(buf, sizeof buf, "Intersection %sdf/dt with %.2f", sgn, act.threshold);
1601 
1602  log_msg(logger, 0, 0, "Mode: %s", buf);
1603  log_msg(logger, 0, 0, "Threshold: %.2f mV\n", act.threshold);
1604 }
1605 
1606 void LAT_detector::init(sf_vec & vm, sf_vec & phie, int offset, enum physic_t phys_t)
1607 {
1608  if(!get_physics(phys_t)) {
1609  log_msg(0,0,5, "There seems to be no EP is defined. LAT detector requires active EP! Aborting LAT setup!");
1610  return;
1611  }
1612 
1613  // we use the electrics logger for output
1614  FILE_SPEC logger = get_physics(phys_t)->logger;
1615 
1616  // TODO(init): except for the shallow copies, shouldn't these be deleted?
1617  // When to delete them?
1618  for(size_t i = 0; i < acts.size(); ++i) {
1619  acts[i].init = 1;
1620  SF::init_vector(&(acts[i].phi));
1621  acts[i].phi->shallow_copy(!acts[i].measurand ? vm : phie);
1622  acts[i].offset = offset;
1623 
1624  SF::init_vector(&(acts[i].phip), acts[i].phi);
1625  *acts[i].phip = *acts[i].phi;
1626 
1627  // derivative based detector
1628  if (acts[i].method == ACT_DT) {
1629  SF::init_vector(&(acts[i].dvp0), acts[i].phi);
1630  SF::init_vector(&(acts[i].dvp1), acts[i].phi);
1631  if(acts[i].mode)
1632  log_msg(NULL,2,0, "Detection of -df/dt|max not implemented, +df/dt|max will be detected.");
1633  }
1634 
1635  // allocate additional local buffers
1636  acts[i].ibuf = (int *)malloc(acts[i].phi->lsize()*sizeof(int));
1637  acts[i].actbuf = (double *)malloc(acts[i].phi->lsize()*sizeof(double));
1638 
1639  if (!acts[i].all) {
1640  SF::init_vector(&acts[i].tm, acts[i].phi->gsize(), acts[i].phi->lsize());
1641  acts[i].tm->set(-1.);
1642 
1643  // initialize with previous initial activations
1644  if(acts[i].prv_fname != NULL) {
1645  set_dir(INPUT);
1646  size_t nread = acts[i].tm->read_ascii(acts[i].prv_fname);
1647 
1648  if(nread == 0)
1649  log_msg(NULL,2,ECHO,"Warning: Initialization of LAT[%2d] failed.", i);
1650 
1651  set_dir(OUTPUT);
1652  }
1653  }
1654  else {
1655  if ( !get_rank() ) {
1656  // here we should copy over previous file and open in append mode
1657  if(acts[i].prv_fname!=NULL) {
1658  set_dir(INPUT);
1659  FILE_SPEC in = f_open( acts[i].prv_fname, "r" );
1660  if(in) {
1661  log_msg(NULL,2,0, "Copying over of previous activation file not implemented.\n"); f_close(in);
1662  }
1663  else
1664  log_msg(NULL,3,0,"Warning: Initialization in %s - \n"
1665  "Failed to read activation file %s.\n", __func__, acts[i].prv_fname);
1666 
1667  set_dir(OUTPUT);
1668  }
1669  acts[i].fout = f_open( acts[i].fname, acts[i].prv_fname==NULL?"w":"a" );
1670  }
1671  }
1672  print_act_log(logger, acts, i);
1673  }
1674 
1675  sf_mesh & intra_mesh = get_mesh(intra_elec_msh);
1677 }
1678 
1679 
1680 int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
1681 {
1682  int rank = get_rank(), gacts = 0, numProc = get_size();
1683 
1684  if (rank == 0) {
1685  // rank 0 writes directly to the table
1686  for (int i=0; i<nlacts; i++)
1687  fprintf(fp->fd, "%d\t%.6f\n", ibuf[i], act_tbuf[i]);
1688 
1689  gacts += nlacts;
1690 
1691  SF::vector<int> buf_inds;
1692  SF::vector<double> buf_acts;
1693 
1694  for (int j=1; j<numProc; j++) {
1695  int acts = 0;
1696  MPI_Status status;
1697  MPI_Recv(&acts, 1, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1698 
1699  if (acts) {
1700  buf_inds.resize(acts);
1701  buf_acts.resize(acts);
1702 
1703  MPI_Recv(buf_inds.data(), acts, MPI_INT, j, 110, PETSC_COMM_WORLD, &status);
1704  MPI_Recv(buf_acts.data(), acts, MPI_DOUBLE, j, 110, PETSC_COMM_WORLD, &status);
1705 
1706  for(int ii=0; ii<acts; ii++)
1707  fprintf(fp->fd, "%d\t%.6f\n", buf_inds[ii], buf_acts[ii]);
1708 
1709  gacts += acts;
1710  }
1711  }
1712  fflush(fp->fd);
1713  }
1714  else {
1715  MPI_Send(&nlacts, 1, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1716  if (nlacts) {
1717  MPI_Send(ibuf, nlacts, MPI_INT, 0, 110, PETSC_COMM_WORLD);
1718  MPI_Send(act_tbuf, nlacts, MPI_DOUBLE, 0, 110, PETSC_COMM_WORLD);
1719  }
1720  }
1721 
1722  MPI_Bcast(&gacts, 1, MPI_INT, 0, PETSC_COMM_WORLD);
1723  return gacts;
1724 }
1726 int LAT_detector::check_acts(double tm)
1727 {
1728  int nacts = 0;
1729  double *a;
1730 
1731  for(Activation* aptr = acts.data(); aptr != acts.end(); aptr++)
1732  {
1733  int lacts = 0;
1734  switch (aptr->method) {
1735  case ACT_THRESH:
1736  lacts = check_cross_threshold(*aptr->phi, *aptr->phip, tm,
1737  aptr->ibuf, aptr->actbuf, aptr->threshold, aptr->mode);
1738  break;
1739 
1740  case ACT_DT:
1741  lacts = check_mx_derivative (*aptr->phi, *aptr->phip, tm,
1742  aptr->ibuf, aptr->actbuf, *aptr->dvp0, *aptr->dvp1,
1743  aptr->threshold, aptr->mode);
1744  break;
1745 
1746  default:
1747  break;
1748  }
1749 
1750  if (!aptr->all)
1751  a = aptr->tm->ptr();
1752 
1754 
1755  for(int j=0; j<lacts; j++) {
1756  if(aptr->all) {
1757  int nodal_idx = this->petsc_to_nodal.forward_map(aptr->ibuf[j]);
1758  aptr->ibuf[j] = canon_nbr[nodal_idx] + aptr->offset;
1759  }
1760  else {
1761  if(a[aptr->ibuf[j]] == -1)
1762  a[aptr->ibuf[j]] = aptr->actbuf[j];
1763  }
1764  }
1765 
1766  if(aptr->all)
1767  output_all_activations(aptr->fout, aptr->ibuf, aptr->actbuf, lacts);
1768  else
1769  aptr->tm->release_ptr(a);
1770 
1771  MPI_Allreduce(MPI_IN_PLACE, &lacts, 1, MPI_INT, MPI_SUM, PETSC_COMM_WORLD);
1772  nacts += lacts;
1773 
1774  aptr->nacts = nacts;
1775  }
1776 
1777  return nacts > 0;
1778 }
1779 
1780 
1781 int LAT_detector::check_quiescence(double tm, double dt)
1782 {
1783  static int savequitFlag = 0;
1784  int numNodesActivated = -1;
1785 
1786  if(sntl.activated) {
1787  // initialization
1788  if(sntl.t_quiesc < 0. && sntl.t_window >= 0.0 ) {
1789  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1790  log_msg(0,0,ECHO | NONL, "%s() WARNING: simulation is configured to savequit() after %.2f ms of quiescence\n", __func__, sntl.t_window);
1791  log_msg(0,0,ECHO | NONL, "================================================================================================\n");
1792  sntl.t_quiesc = 0.0;
1793  }
1794 
1795  if(tm >= sntl.t_start && !savequitFlag)
1796  {
1797  numNodesActivated = acts[sntl.ID].nacts;
1798 
1799  if(numNodesActivated) sntl.t_quiesc = 0.0;
1800  else sntl.t_quiesc += dt;
1801 
1802  if(sntl.t_window >= 0.0 && sntl.t_quiesc > sntl.t_window && !savequitFlag) {
1803  savequitFlag++;
1804  savequit();
1805  }
1806  }
1807  }
1808 
1809  return numNodesActivated;
1810 }
1811 
1812 
1813 
1814 
1815 int LAT_detector::check_cross_threshold(sf_vec & vm, sf_vec & vmp, double tm,
1816  int *ibuf, double *actbuf, float threshold, int mode)
1817 {
1818  SF_real *c = vm.ptr();
1819  SF_real *p = vmp.ptr();
1820  int lsize = vm.lsize();
1821  int nacts = 0, gnacts = 0;
1822 
1823  for (int i=0; i<lsize; i++) {
1824  int sgn = 1;
1825  bool triggered = false;
1826  if(mode==0) {// detect +slope crossing
1827  triggered = p[i] <= threshold && c[i] > threshold; }
1828  else { // detect -slope crossing
1829  triggered = p[i] >= threshold && c[i] < threshold;
1830  sgn = -1;
1831  }
1832 
1833  if (triggered) {
1834  double tact = tm - param_globals::dt + (threshold-p[i])/(c[i]-p[i])*sgn*param_globals::dt;
1835  ibuf [nacts] = i;
1836  actbuf[nacts] = tact;
1837  nacts++;
1838  }
1839  p[i] = c[i];
1840  }
1841 
1842  vm.release_ptr(c);
1843  vmp.release_ptr(p);
1844  return nacts;
1845 }
1846 
1847 int LAT_detector::check_mx_derivative(sf_vec & vm, sf_vec & vmp, double tm,
1848  int *ibuf, double *actbuf, sf_vec & dvp0, sf_vec & dvp1,
1849  float threshold, int mode)
1850 {
1851  int nacts = 0, gnacts = 0;
1852  double tact, dt2 = 2 * param_globals::dt;
1853  int lsize = vm.lsize();
1854  SF_real ddv0, ddv1, dv, dvdt;
1855  SF_real *c, *p, *pd0, *pd1;
1856 
1857  c = vm.ptr();
1858  p = vmp.ptr();
1859  pd0 = dvp0.ptr();
1860  pd1 = dvp1.ptr();
1861 
1862  for (int i=0; i<lsize; i++ ) {
1863  dv = (c[i]-p[i]);
1864  dvdt = dv/param_globals::dt;
1865  ddv0 = pd1[i]-pd0[i];
1866  ddv1 = dv -pd1[i];
1867 
1868  if (dvdt>=threshold && ddv0>0 && ddv1<0) {
1869  tact = tm-dt2+(ddv0/(ddv0-ddv1))*param_globals::dt;
1870  ibuf [nacts] = i;
1871  actbuf[nacts] = tact;
1872  nacts++;
1873  }
1874  p [i] = c[i];
1875  pd0[i] = pd1[i];
1876  pd1[i] = dv;
1877  }
1878 
1879  vm .release_ptr(c);
1880  vmp .release_ptr(p);
1881  dvp0.release_ptr(pd0);
1882  dvp1.release_ptr(pd1);
1883 
1884  return nacts;
1885 }
1886 
1891 {
1893  assert(sc != NULL);
1894 
1895  bool forward = true;
1896 
1897  for (size_t i = 0; i < acts.size(); i++) {
1898  if (is_init(acts[i].tm)) {
1899  (*sc)(*acts[i].tm, forward);
1900  acts[i].tm->write_ascii(acts[i].fname, false);
1901  }
1902  }
1903 }
1904 
1905 void Electrics::prepace() {
1906  log_msg(NULL, 0, 0, "Using activation times from file %s to distribute prepacing states\n",
1907  param_globals::prepacing_lats);
1908  log_msg(NULL, 0, 0, "Assuming stimulus strength %f uA/uF with duration %f ms for prepacing\n",
1909  param_globals::prepacing_stimstr, param_globals::prepacing_stimdur);
1910 
1911  limpet::MULTI_IF* miif = this->ion.miif;
1912 
1913  const sf_mesh & mesh = get_mesh(intra_elec_msh);
1914  sf_vec* read_lats; SF::init_vector(&read_lats, mesh, 1, sf_vec::algebraic);
1915 
1916  // read in the global distributed vector of all activation times
1917  set_dir(INPUT);
1918  size_t numread = read_lats->read_ascii(param_globals::prepacing_lats);
1919  if (numread == 0) {
1920  log_msg(NULL, 5, 0, "Failed reading required LATs! Skipping prepacing!");
1921  return;
1922  }
1923  set_dir(OUTPUT);
1924 
1926  assert(sc != NULL);
1927 
1928  // permute in-place to petsc permutation
1929  bool forward = false;
1930  (*sc)(*read_lats, forward);
1931 
1932  // take care of negative LAT values
1933  {
1934  PetscReal* lp = read_lats->ptr();
1935  for(int i=0; i<read_lats->lsize(); i++)
1936  if(lp[i] < 0.0) lp[i] = param_globals::tend + 10.0;
1937 
1938  read_lats->release_ptr(lp);
1939  }
1940 
1941  // make LATs relative and figure out the first LAT
1942  // so we know when to save state of each point
1943  SF_real LATmin = read_lats->min();
1944 
1945  if(LATmin < 0.0) {
1946  log_msg(0,3,0, "LAT data is not complete. Skipping prepacing.");
1947  return;
1948  }
1950  SF_real offset = floor(LATmin / param_globals::prepacing_bcl) * param_globals::prepacing_bcl;
1951  SF_real last_tm = param_globals::prepacing_bcl * param_globals::prepacing_beats;
1952 
1953  // compute read_lats[i] = last_tm - (read_lats[i] - offset)
1954  *read_lats += -offset;
1955  *read_lats *= -1.;
1956  *read_lats += last_tm;
1957 
1958  miif->getRealData();
1959  SF_real *save_tm = read_lats->ptr();
1960  SF_real *vm = miif->gdata[limpet::Vm]->ptr();
1961 
1962  for (int ii = 0; ii < miif->N_IIF; ii++) {
1963  if (!miif->N_Nodes[ii]) continue;
1964 
1965  // create sorted array of save times.
1966  SF::vector<SF::mixed_tuple<double,int>> sorted_save(miif->N_Nodes[ii]); // v1 = time, v2 = index
1967  for (int kk = 0; kk < miif->N_Nodes[ii]; kk++) {
1968  sorted_save[kk].v1 = save_tm[miif->NodeLists[ii][kk]];
1969  sorted_save[kk].v2 = kk;
1970  }
1971  std::sort(sorted_save.begin(), sorted_save.end());
1972 
1973  size_t lastidx = sorted_save.size() - 1;
1974  int paced = sorted_save[lastidx].v2; // IMP index of latest node
1975  int csav = 0;
1976 
1977  for (double t = 0; t < sorted_save[lastidx].v1; t += param_globals::dt) {
1978  if (fmod(t, param_globals::prepacing_bcl) < param_globals::prepacing_stimdur &&
1979  t < param_globals::prepacing_bcl * param_globals::prepacing_beats - 1)
1980  miif->ldata[ii][limpet::Vm][paced] += param_globals::prepacing_stimstr * param_globals::dt;
1981 
1982  compute_IIF(*miif->IIF[ii], miif->ldata[ii], paced);
1983 
1984  // Vm update always happens now outside of the imp
1985  miif->ldata[ii][limpet::Vm][paced] -= miif->ldata[ii][limpet::Iion][paced] * param_globals::dt;
1986  vm[miif->NodeLists[ii][paced]] = miif->ldata[ii][limpet::Vm][paced];
1987 
1988  while (csav < miif->N_Nodes[ii] - 1 && t >= sorted_save[csav].v1)
1989  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
1990  }
1991 
1992  // get nodes which may be tied for last
1993  while (csav < miif->N_Nodes[ii] - 1)
1994  dup_IMP_node_state(*miif->IIF[ii], paced, sorted_save[csav++].v2, miif->ldata[ii]);
1995  // ipdate global Vm vector
1996  for (int k = 0; k < miif->N_Nodes[ii]; k++) vm[miif->NodeLists[ii][k]] = miif->ldata[ii][limpet::Vm][k];
1997  }
1998 
1999  read_lats->release_ptr(save_tm);
2000  miif->gdata[limpet::Vm]->release_ptr(vm);
2001  miif->releaseRealData();
2002 }
2003 
2004 
2005 void recover_phie_std(sf_vec & vm, phie_recovery_data & rcv)
2006 {
2008  if (!rcv.pts.size())
2009  return;
2010 
2011  int rank = get_rank();
2012 
2013  if(!get_physics(elec_phys)) {
2014  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2015  return;
2016  }
2017 
2018  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2019  sf_mat & Ki = *elec->parab_solver.rhs_parab;
2020 
2021  const sf_mesh & imesh = get_mesh(intra_elec_msh);
2022  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
2023 
2024  SF_int start, end;
2025  vm.get_ownership_range(start, end);
2026 
2027  if(!rcv.Im) {
2028  SF::init_vector(&rcv.Im, &vm);
2029  SF::init_vector(&rcv.dphi, &vm);
2030  }
2031 
2032  SF_int r_start, r_end;
2033  rcv.phie_rec->get_ownership_range(r_start, r_end);
2034 
2035  SF_real *ph_r = rcv.phie_rec->ptr();
2036 
2037  // use minimum distance to ensure r>0
2038  // consistent with the line source approximation, the "cable radius"
2039  // is used as a lower limit for the source-field point distance
2040  float minDist = 2. / param_globals::imp_region[0].cellSurfVolRatio; // radius in um
2041 
2042  Ki.mult(vm, *rcv.Im);
2043  int numpts = rcv.pts.size() / 3;
2044  Point fpt, cpt;
2045 
2046  for (int j=0; j<numpts; j++) {
2047  fpt = rcv.pts.data() + j*3;
2048 
2049  *rcv.dphi = *rcv.Im;
2050  SF_real* dp = rcv.dphi->ptr();
2051 
2052  for (size_t i = 0; i<alg_nod.size(); i++)
2053  {
2054  mesh_int_t loc_nodal_idx = alg_nod[i];
2055  mesh_int_t loc_petsc_idx = local_nodal_to_local_petsc(imesh, rank, loc_nodal_idx);
2056  cpt = imesh.xyz.data()+loc_nodal_idx*3;
2057 
2058  double r = dist(fpt, cpt) + minDist;
2059  dp[loc_petsc_idx] /= r;
2060  }
2061 
2062  rcv.dphi->release_ptr(dp);
2063 
2064  SF_real phi = rcv.dphi->sum() / 4. / M_PI / rcv.gBath;
2065  if ( (j>=r_start) && (j<r_end) )
2066  ph_r[j-r_start] = phi;
2067  }
2068 
2069  rcv.phie_rec->release_ptr(ph_r);
2070 }
2071 
2073 {
2074  int err = 0, rank = get_rank();
2075 
2077  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2078  return 1;
2079  }
2080 
2081  sf_mesh & imesh = get_mesh(intra_elec_msh);
2082  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2083  phie_recovery_data & phie_rcv = elec->phie_rcv;
2084 
2085  // we close the files of the default electrics if there are any open
2086  elec->output_manager.close_files_and_cleanup();
2087 
2088  // register output
2089  set_dir(POSTPROC);
2090  igb_output_manager phie_rec_out;
2091  phie_rec_out.register_output(phie_rcv.phie_rec, phie_recv_msh, 1,
2092  param_globals::phie_recovery_file, "mV");
2093 
2094  // Buffer for Vm data
2095  sf_vec* vm = get_data(vm_vec); assert(vm);
2096 
2097  // set up igb header and point fd to start of Vm file
2098  set_dir(OUTPUT);
2099  IGBheader vm_igb;
2100  if(rank == 0) {
2101  FILE_SPEC file = f_open(param_globals::vofile, "r");
2102  if(file != NULL) {
2103  vm_igb.fileptr(file->fd);
2104  vm_igb.read();
2105 
2106  if(vm_igb.x() != vm->gsize()) {
2107  log_msg(0,4,0, "%s error: Vm dimension does not fit to %s file. Aborting recovery! \n",
2108  __func__, param_globals::vofile);
2109  err++;
2110  }
2111 
2112  delete file;
2113  }
2114  else err++;
2115  }
2116 
2117  err = get_global(err, MPI_MAX);
2118 
2119  if(err == 0) {
2120  FILE* fd = static_cast<FILE*>(vm_igb.fileptr());
2121 
2122  // number of data slices
2123  const int num_io = user_globals::tm_manager->timers[iotm_spacedt]->numIOs;
2124 
2125  // scatterers
2127  assert(petsc_to_canonical != NULL);
2128 
2129  // loop over vm slices and recover phie
2130  for(int i=0; i<num_io; i++) {
2131  log_msg(0,0,0, "Step %d / %d", i+1, num_io);
2132  size_t nread = vm->read_binary<float>(fd);
2133 
2134  if(nread != size_t(vm->gsize())) {
2135  log_msg(0,3,0, "%s warning: read incomplete data slice! Aborting!", __func__);
2136  err++;
2137  break;
2138  }
2139 
2140  // permute vm_buff
2141  bool forward = false;
2142  (*petsc_to_canonical)(*vm, forward);
2143 
2144  // do phie computation
2145  recover_phie_std(*vm, phie_rcv);
2146 
2147  phie_rec_out.write_data();
2148  }
2149 
2150  phie_rec_out.close_files_and_cleanup();
2151  }
2152  return err;
2153 }
2154 
2155 void setup_phie_recovery_data(phie_recovery_data & data)
2156 {
2158  if(!get_physics(elec_phys) ) {
2159  log_msg(0,0,5, "There seems to be no EP is defined. Phie recovery requires active EP! Aborting!");
2160  return;
2161  }
2162 
2163  int rank = get_rank(), size = get_size();
2164  Electrics* elec = static_cast<Electrics*>(get_physics(elec_phys));
2165 
2166  sf_mesh & imesh = get_mesh(intra_elec_msh);
2167  const std::string basename = param_globals::phie_rec_ptf;
2168  SF::vector<mesh_int_t> ptsidx;
2169 
2170  set_dir(INPUT);
2171  SF::read_points(basename, imesh.comm, data.pts, ptsidx);
2172  make_global(data.pts, imesh.comm); // we want all ranks to have all points
2173 
2174  // set up parallel layout of recovery points
2175  SF::vector<mesh_int_t> layout;
2176  layout_from_count(mesh_int_t(ptsidx.size()), layout, imesh.comm);
2177 
2178  // set up petsc_vector for recovered potentials
2179  SF::init_vector(&data.phie_rec, layout[size], layout[rank+1]-layout[rank], 1, sf_vec::algebraic);
2180 
2181  // get conductivty
2182  SF::vector<RegionSpecs> & intra_regions = elec->mtype[Electrics::intra_grid].regions;
2183  data.gBath = static_cast<elecMaterial*>(intra_regions[0].material)->BathVal[0];
2184 }
2185 
2186 void Laplace::initialize()
2187 {
2189  int rank = get_rank();
2190 
2191  assert(param_globals::bidomain == BIDOMAIN);
2192  double t1, t2;
2193  get_time(t1);
2194 
2195  // set up Extracellular tissue
2198  mtype[Electrics::extra_grid].regionIDs, true, "gregion_e");
2199 
2200  // set up a subset of the complete electrical mappings
2201  int dpn = 1;
2203 
2205  // set up Intracellular tissue
2208  mtype[Electrics::intra_grid].regionIDs, true, "gregion_i");
2209 
2212  }
2213 
2214  // set up stimuli
2215  init_stim_info();
2216  stimuli.resize(param_globals::num_stim);
2218  for(int i=0; i<param_globals::num_stim; i++) {
2219  // construct new stimulus
2220  stimulus & s = stimuli[i];
2221 
2223  s.translate(i);
2224 
2225  s.setup(i);
2226 
2227  if(s.phys.type == Phi_ex) {
2228  s.pulse.wform = constPulse;
2229  sample_wave_form(s.pulse, i);
2230  }
2231  }
2232 
2233  set_dir(OUTPUT);
2234 
2235  ellip_solver.init();
2237 
2238  if(param_globals::dump2MatLab) {
2239  std::string bsname = param_globals::dump_basename;
2240  std::string fn;
2241 
2242  set_dir(OUTPUT);
2243  fn = bsname + "_Kie.bin";
2244  ellip_solver.phie_mat->write(fn.c_str());
2245  }
2246 
2247  // the laplace solver executes only once, thus we need a singlestep timer
2248  timer_idx = user_globals::tm_manager->add_singlestep_timer(0.0, 0.0, "laplace trigger", nullptr);
2249 
2250  SF::vector<mesh_int_t>* restr_i = NULL;
2251  SF::vector<mesh_int_t>* restr_e = NULL;
2252 
2253  setup_dataout(param_globals::dataout_e, param_globals::dataout_e_vtx, extra_elec_msh,
2254  restr_e, param_globals::num_io_nodes > 0);
2255  if(param_globals::dataout_e)
2256  output_manager.register_output(ellip_solver.phie, extra_elec_msh, 1, param_globals::phiefile, "mV", restr_e);
2257 
2259  setup_dataout(param_globals::dataout_i, param_globals::dataout_i_vtx, intra_elec_msh,
2260  restr_i, param_globals::num_io_nodes > 0);
2261  if(param_globals::dataout_i)
2262  output_manager.register_output(ellip_solver.phie_i, intra_elec_msh, 1, param_globals::phieifile, "mV", restr_i);
2263  }
2264 
2265  this->initialize_time += timing(t2, t1);
2267  this->compute_step();
2268 }
2269 
2270 void Laplace::destroy()
2271 {}
2272 
2273 void Laplace::compute_step()
2274 {
2276  // Laplace compute might be called multiple times, we want to run only once..
2277  if(!ellip_solver.lin_solver) return;
2278 
2279  double t0, t1, dur;
2280  log_msg(0,0,0, "Solving Laplace problem ..");
2281 
2282  get_time(t0);
2284  dur = timing(t1,t0);
2285 
2286  log_msg(0,0,0, "Done in %.5f seconds.", dur);
2287 
2289  this->compute_time += timing(t1, t0);
2290  set_dir(OUTPUT);
2293 
2294  // we clear the elliptic matrices and solver to save some memory when computing
2295  // the laplace solution on-the-fly
2296  delete ellip_solver.mass_e; ellip_solver.mass_e = NULL;
2297  delete ellip_solver.phie_mat; ellip_solver.phie_mat = NULL;
2299 }
2300 
2301 void Laplace::output_step()
2302 {}
2303 
2304 double Laplace::timer_val(const int timer_id)
2305 {
2306  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2307  double val = 0.0;
2308 
2309  if(sidx != -1) stimuli[sidx].value(val);
2310  else val = std::nan("NaN");
2311  return val;
2312 }
2313 
2314 std::string Laplace::timer_unit(const int timer_id)
2315 {
2316  int sidx = stimidx_from_timeridx(stimuli, timer_id);
2317  std::string s_unit;
2318  if(sidx != -1) s_unit = stimuli[sidx].pulse.wave.f_unit;
2319  return s_unit;
2320 }
2321 
2323  sf_mat & mass_i,
2324  sf_mat & mass_e,
2325  limpet::MULTI_IF *miif,
2326  FILE_SPEC logger)
2327 {
2329 
2330  for(stimulus & s : stimuli) {
2331  if(is_current(s.phys.type) && s.phys.total_current) {
2332  // extracellular current injection
2333  if (s.phys.type == I_ex) {
2334  // compute affected volume in um^3
2335  SF_real vol = get_volume_from_nodes(mass_e, s.electrode.vertices);
2336 
2337  // s->strength holds the total current in uA, compute current density in uA/cm^3
2338  // Theoretically, we don't need to scale the volume to cm^3 here since we later
2339  // multiply with the mass matrix and we get um^3 * uA/um^3 = uA.
2340  // However, for I_ex there is an additional um^3 to cm^3 scaling in phys.scale,
2341  // since I_e is expected to be in uA/cm^3. Therefore, we need to compensate for that to arrive at uA later.
2342  float scale = 1.e12/vol;
2343 
2344  s.pulse.strength *= scale;
2345 
2346  log_msg(logger,0,ECHO,
2347  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^3\n",
2348  s.name.c_str(), s.idx, s.pulse.strength);
2349  }
2350  else if (s.phys.type == I_tm) {
2351  // compute affected volume in um^3
2352  SF_real vol = get_volume_from_nodes(mass_i, s.electrode.vertices);
2353  const sf_mesh & imesh = get_mesh(intra_elec_msh);
2354  const SF::vector<mesh_int_t> & alg_nod = imesh.pl.algebraic_nodes();
2355 
2356  if(alg_idx_map.size() == 0) {
2357  mesh_int_t lidx = 0;
2358  for(mesh_int_t n : alg_nod) {
2359  alg_idx_map[n] = lidx;
2360  lidx++;
2361  }
2362  }
2363 
2364  SF_real surf = 0.0;
2365  for(mesh_int_t n : s.electrode.vertices) {
2366  if(alg_idx_map.count(n)) {
2367  mesh_int_t lidx = alg_idx_map[n];
2368  int r = miif->IIFmask[lidx];
2369  // surf = vol*beta [1/um], surf is in [um^2]
2370  surf = vol * miif->IIF[r]->cgeom().SVratio * param_globals::imp_region[r].volFrac;
2371  //convert to cm^2
2372  surf /= 1.e8;
2373  break;
2374  }
2375  }
2376  surf = get_global(surf, MPI_MAX, PETSC_COMM_WORLD);
2377 
2378  // scale surface density now to result in correct total current
2379  s.pulse.strength /= surf;
2380  log_msg(logger, 0, ECHO,
2381  "%s [Stimulus %d]: current density scaled to %.4g uA/cm^2\n",
2382  s.name.c_str(), s.idx, s.pulse.strength);
2383  }
2384  }
2385  }
2386 }
2387 
2388 
2389 
2390 } // namespace opencarp
#define M_PI
Definition: ION_IF.h:52
int mesh_int_t
Definition: SF_container.h:46
double SF_real
Use the general double as real type.
Definition: SF_globals.h:38
std::int32_t SF_int
Use the general std::int32_t as int type.
Definition: SF_globals.h:37
#define ECHO
Definition: basics.h:308
#define NONL
Definition: basics.h:312
#define CALI_CXX_MARK_FUNCTION
Definition: caliper_hooks.h:6
virtual void mult(const abstract_vector< T, S > &x, abstract_vector< T, S > &b) const =0
virtual void scale(S s)=0
virtual void zero()=0
virtual void get_diagonal(abstract_vector< T, S > &vec) const =0
virtual void mult_LR(const abstract_vector< T, S > &L, const abstract_vector< T, S > &R)=0
virtual void init(T iNRows, T iNCols, T ilrows, T ilcols, T loc_offset, T mxent)
virtual void duplicate(const abstract_matrix< T, S > &M)=0
virtual void add_scaled_matrix(const abstract_matrix< T, S > &A, const S s, const bool same_nnz)=0
virtual void write(const char *filename) const =0
size_t read_ascii(FILE *fd)
virtual S mag() const =0
virtual S * ptr()=0
virtual void release_ptr(S *&p)=0
virtual void deep_copy(const abstract_vector< T, S > &v)=0
virtual void shallow_copy(const abstract_vector< T, S > &v)=0
virtual void add_scaled(const abstract_vector< T, S > &vec, S k)=0
virtual T lsize() const =0
virtual void set(const vector< T > &idx, const vector< S > &vals, const bool additive=false, const bool local=false)=0
const meshdata< mesh_int_t, mesh_real_t > * mesh
the connected mesh
T forward_map(T idx) const
Map one index from a to b.
Definition: SF_container.h:264
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:429
vector< T > con
Definition: SF_container.h:412
size_t g_numelem
global number of elements
Definition: SF_container.h:398
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:404
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
Definition: SF_container.h:464
Container for a PETSc VecScatter.
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
void backward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Backward scattering.
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
const T * end() const
Pointer to the vector's end.
Definition: SF_vector.h:128
void assign(InputIterator s, InputIterator e)
Assign a memory range.
Definition: SF_vector.h:161
const T * begin() const
Pointer to the vector's start.
Definition: SF_vector.h:116
T * data()
Pointer to the vector's start.
Definition: SF_vector.h:91
T & push_back(T val)
Definition: SF_vector.h:283
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
Definition: hashmap.hpp:687
int numNode
local number of nodes
Definition: MULTI_ION_IF.h:210
std::vector< IonIfBase * > IIF
array of IIF's
Definition: MULTI_ION_IF.h:202
opencarp::sf_vec * gdata[NUM_IMP_DATA_TYPES]
data used by all IMPs
Definition: MULTI_ION_IF.h:216
void dump_state(char *, float, opencarp::mesh_t gid, bool, unsigned int)
GlobalData_t *** ldata
data local to each IMP
Definition: MULTI_ION_IF.h:205
int N_IIF
how many different IIF's
Definition: MULTI_ION_IF.h:211
int * N_Nodes
#nodes for each IMP
Definition: MULTI_ION_IF.h:200
int ** NodeLists
local partitioned node lists for each IMP stored
Definition: MULTI_ION_IF.h:201
IIF_Mask_t * IIFmask
region for each node
Definition: MULTI_ION_IF.h:214
int timer_idx
the timer index received from the timer manager
Definition: physics_types.h:66
FILE_SPEC logger
The logger of the physic, each physic should have one.
Definition: physics_types.h:64
SF::vector< stimulus > stimuli
the electrical stimuli
Definition: electrics.h:263
LAT_detector lat
the activation time detector
Definition: electrics.h:275
grid_t
An electrics grid identifier to distinguish between intra and extra grids.
Definition: electrics.h:257
phie_recovery_data phie_rcv
struct holding helper data for phie recovery
Definition: electrics.h:284
generic_timing_stats IO_stats
Definition: electrics.h:286
void destroy()
Currently we only need to close the file logger.
Definition: electrics.cc:389
gvec_data gvec
datastruct holding global IMP state variable output
Definition: electrics.h:278
elliptic_solver ellip_solver
Solver for the elliptic bidomain equation.
Definition: electrics.h:270
MaterialType mtype[2]
the material types of intra_grid and extra_grid grids.
Definition: electrics.h:261
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:786
parabolic_solver parab_solver
Solver for the parabolic bidomain equation.
Definition: electrics.h:272
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:770
void initialize()
Initialize the Electrics.
Definition: electrics.cc:43
igb_output_manager output_manager
class handling the igb output
Definition: electrics.h:281
int read(bool quiet=false)
Definition: IGBheader.cc:761
void fileptr(gzFile f)
Definition: IGBheader.cc:336
limpet::MULTI_IF * miif
Definition: ionics.h:66
void compute_step()
Definition: ionics.cc:35
void initialize()
Definition: ionics.cc:60
void destroy()
Definition: ionics.cc:52
SF::index_mapping< mesh_int_t > petsc_to_nodal
Definition: electrics.h:220
int check_quiescence(double tm, double dt)
check for quiescence
Definition: electrics.cc:1725
void output_initial_activations()
output one nodal vector of initial activation time
Definition: electrics.cc:1834
void init(sf_vec &vm, sf_vec &phie, int offset, enum physic_t=elec_phys)
initializes all datastructs after electric solver setup
Definition: electrics.cc:1550
int check_acts(double tm)
check activations at sim time tm
Definition: electrics.cc:1670
SF::vector< Activation > acts
Definition: electrics.h:219
LAT_detector()
constructor, sets up basic datastructs from global_params
Definition: electrics.cc:1422
SF::vector< stimulus > stimuli
the electrical stimuli
Definition: electrics.h:390
elliptic_solver ellip_solver
Solver for the elliptic bidomain equation.
Definition: electrics.h:393
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
Definition: electrics.cc:2248
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
Definition: electrics.cc:2258
MaterialType mtype[2]
the material types of intra_grid and extra_grid grids.
Definition: electrics.h:388
igb_output_manager output_manager
class handling the igb output
Definition: electrics.h:395
manager for dirichlet boundary conditions
Definition: stimulate.h:205
void enforce_dbc_rhs(sf_vec &rhs)
Definition: stimulate.cc:675
void recompute_dbcs()
recompute the dbc data.
Definition: stimulate.cc:596
bool dbc_update()
check if dbcs have updated
Definition: stimulate.cc:635
sf_mat * phie_mat
lhs matrix to solve elliptic
Definition: electrics.h:54
void rebuild_stiffness(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:972
void rebuild_matrices(MaterialType *mtype, SF::vector< stimulus > &stimuli, FILE_SPEC logger)
Definition: electrics.cc:960
lin_solver_stats stats
Definition: electrics.h:60
sf_vec * phie_i
phi_e on intracellular grid
Definition: electrics.h:49
void solve(sf_mat &Ki, sf_vec &Vmv, sf_vec &tmp_i)
Definition: electrics.cc:1077
sf_vec * phie
phi_e
Definition: electrics.h:48
sf_sol * lin_solver
petsc or ginkgo lin_solver
Definition: electrics.h:57
sf_mat * mass_e
mass matrix for RHS elliptic calc
Definition: electrics.h:53
double tol
CG stopping tolerance.
Definition: electrics.h:67
sf_vec * currtmp
temp vector for phiesrc
Definition: electrics.h:51
dbc_manager * dbc
dbcs require a dbc manager
Definition: electrics.h:63
int max_it
maximum number of iterations
Definition: electrics.h:68
sf_vec * phiesrc
I_e.
Definition: electrics.h:50
void rebuild_mass(FILE_SPEC logger)
Definition: electrics.cc:1033
void write_data()
write registered data to disk
Definition: sim_utils.cc:1494
void register_output_sync(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Definition: sim_utils.cc:1330
void close_files_and_cleanup()
close file descriptors
Definition: sim_utils.cc:1541
void register_output(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Register a data vector for output.
Definition: sim_utils.cc:1461
sf_vec * Ivol
global Vm vector
Definition: electrics.h:107
double tol
CG stopping tolerance.
Definition: electrics.h:132
sf_vec * Iact
global Vm vector
Definition: electrics.h:108
sf_vec * Diff_term
Diffusion current.
Definition: electrics.h:123
sf_mat * rhs_parab
rhs matrix to solve parabolic
Definition: electrics.h:119
sf_vec * kappa_i
scaling vector for intracellular mass matrix, M
Definition: electrics.h:111
lin_solver_stats stats
Definition: electrics.h:129
void rebuild_matrices(MaterialType *mtype, limpet::MULTI_IF &miif, FILE_SPEC logger)
Definition: electrics.cc:1202
parabolic_t parab_tech
manner in which parabolic equations are solved
Definition: electrics.h:134
void solve(sf_vec &phie_i)
Definition: electrics.cc:1315
sf_vec * inv_mass_diag
inverse diagonal of mass matrix, for EXPLICIT solving
Definition: electrics.h:115
sf_mat * mass_i
lumped for parabolic problem
Definition: electrics.h:118
sf_vec * Ic
global Vm vector
Definition: electrics.h:106
sf_vec * tmp_i2
scratch vector for i-grid
Definition: electrics.h:113
int max_it
maximum number of iterations
Definition: electrics.h:133
sf_vec * tmp_i1
scratch vector for i-grid
Definition: electrics.h:112
sf_mat * lhs_parab
lhs matrix (CN) to solve parabolic
Definition: electrics.h:120
sf_vec * Vmv
global Vm vector
Definition: electrics.h:104
sf_vec * Irhs
weighted transmembrane currents
Definition: electrics.h:114
sf_vec * old_vm
older Vm needed for 2nd order dT
Definition: electrics.h:110
sf_sol * lin_solver
petsc or ginkgo lin_solver
Definition: electrics.h:126
sf_vec * IIon
ionic currents
Definition: electrics.h:103
SF::vector< mesh_int_t > vertices
Definition: stimulate.h:152
bool total_current
whether we apply total current scaling
Definition: stimulate.h:140
stim_t type
type of stimulus
Definition: stimulate.h:137
int timer_id
timer for stimulus
Definition: stimulate.h:122
waveform_t wform
wave form of stimulus
Definition: stimulate.h:95
double strength
strength of stimulus
Definition: stimulate.h:93
stim_protocol ptcl
applied stimulation protocol used
Definition: stimulate.h:168
int idx
index in global input stimulus array
Definition: stimulate.h:164
stim_electrode electrode
electrode geometry
Definition: stimulate.h:170
stim_pulse pulse
stimulus wave form
Definition: stimulate.h:167
void translate(int id)
convert legacy definitions to new format
Definition: stimulate.cc:105
void setup(int idx)
Setup from a param stimulus index.
Definition: stimulate.cc:166
stim_physics phys
physics of stimulus
Definition: stimulate.h:169
bool value(double &v) const
Get the current value if the stimulus is active.
Definition: stimulate.cc:437
long d_time
current time instance index
Definition: timer_utils.h:77
double time_step
global reference time step
Definition: timer_utils.h:78
int add_eq_timer(double istart, double iend, int ntrig, double iintv, double idur, const char *iname, const char *poolname=nullptr)
Add a equidistant step timer to the array of timers.
Definition: timer_utils.cc:78
int add_singlestep_timer(double tg, double idur, const char *iname, const char *poolname=nullptr)
Definition: timer_utils.h:143
long d_end
final index in multiples of dt
Definition: timer_utils.h:82
std::vector< base_timer * > timers
vector containing individual timers
Definition: timer_utils.h:84
double time
current time
Definition: timer_utils.h:76
Tissue level electrics, main Electrics physics class.
#define DUMP_IC
Definition: electrics.h:39
#define DUMP_IACT
Definition: electrics.h:41
#define DUMP_IVOL
Definition: electrics.h:40
void init_solver(SF::abstract_linear_solver< T, S > **sol)
Definition: SF_init.h:220
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void read_points(const std::string basename, const MPI_Comm comm, vector< S > &pts, vector< T > &ptsidx)
Read the points and insert them into a list of meshes.
Definition: SF_mesh_io.h:824
void make_global(const vector< T > &vec, vector< T > &out, MPI_Comm comm)
make a parallel vector global
Definition: SF_network.h:225
void unique_resize(vector< T > &_P)
Definition: SF_sort.h:348
void assemble_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
Generalized matrix assembly.
int max_nodal_edgecount(const meshdata< T, S > &mesh)
Compute the maximum number of node-to-node edges for a mesh.
Definition: SF_container.h:608
void local_petsc_to_nodal_mapping(const meshdata< T, S > &mesh, index_mapping< T > &petsc_to_nodal)
T local_nodal_to_local_petsc(const meshdata< T, S > &mesh, int rank, T local_nodal)
size_t root_count_ascii_lines(std::string file, MPI_Comm comm)
count the lines in a ascii file
void assemble_lumped_matrix(abstract_matrix< T, S > &mat, meshdata< mesh_int_t, mesh_real_t > &domain, matrix_integrator< mesh_int_t, mesh_real_t > &integrator)
bool is_init(const abstract_vector< T, S > *v)
void layout_from_count(const T count, vector< T > &layout, MPI_Comm comm)
Definition: SF_network.h:201
void init_vector(SF::abstract_vector< T, S > **vec)
Definition: SF_init.h:99
void binary_sort(vector< T > &_V)
Definition: SF_sort.h:284
void init_matrix(SF::abstract_matrix< T, S > **mat)
Definition: SF_init.h:199
@ NBR_PETSC
PETSc numbering of nodes.
Definition: SF_container.h:203
@ NBR_REF
The nodal numbering of the reference mesh (the one stored on HD).
Definition: SF_container.h:201
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
Definition: SF_container.h:202
void dup_IMP_node_state(IonIfBase &IF, int from, int to, GlobalData_t **localdata)
void dump_trace(MULTI_IF *MIIF, limpet::Real time)
void open_trace(MULTI_IF *MIIF, int n_traceNodes, int *traceNodes, int *label, opencarp::sf_mesh *imesh)
Set up ionic model traces at some global node numbers.
timer_manager * tm_manager
a manager for the various physics timers
Definition: main.cc:58
bool using_legacy_stimuli
flag storing whether legacy stimuli are used
Definition: main.cc:64
void get_kappa(sf_vec &kappa, IMPregion *ir, limpet::MULTI_IF &miif, double k)
compute the vector
Definition: electrics.cc:828
physic_t
Identifier for the different physics we want to set up.
Definition: physics_types.h:51
int stimidx_from_timeridx(const SF::vector< stimulus > &stimuli, const int timer_id)
determine link between timer and stimulus
Definition: electrics.cc:800
@ iotm_chkpt_list
Definition: timer_utils.h:44
@ iotm_console
Definition: timer_utils.h:44
@ iotm_spacedt
Definition: timer_utils.h:44
@ iotm_trace
Definition: timer_utils.h:44
@ iotm_chkpt_intv
Definition: timer_utils.h:44
sf_vec * get_data(datavec_t d)
Retrieve a petsc data vector from the data registry.
Definition: sim_utils.cc:887
SF::scattering * get_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Get a scattering from the global scatter registry.
void set_cond_type(MaterialType &m, cond_t type)
Definition: electrics.cc:852
void sample_wave_form(stim_pulse &sp, int idx)
sample a signal given in analytic form
Definition: stimulate.cc:337
SF_real get_volume_from_nodes(sf_mat &mass, SF::vector< mesh_int_t > &local_idx)
Definition: fem_utils.cc:217
sf_mesh & get_mesh(const mesh_t gt)
Get a mesh by specifying the gridID.
Definition: sf_interface.cc:33
SF::scattering * register_scattering(const int from, const int to, const SF::SF_nbr nbr, const int dpn)
Register a scattering between to grids, or between algebraic and nodal representation of data on the ...
Definition: sf_interface.cc:69
cond_t
description of electrical tissue properties
Definition: fem_types.h:42
@ sum_cond
Definition: fem_types.h:43
@ intra_cond
Definition: fem_types.h:43
@ para_cond
Definition: fem_types.h:43
void print_act_log(FILE_SPEC logger, const SF::vector< Activation > &acts, int idx)
Definition: electrics.cc:1525
SF::scattering * get_permutation(const int mesh_id, const int perm_id, const int dpn)
Get the PETSC to canonical permutation scattering for a given mesh and number of dpn.
bool is_dbc(stim_t type)
whether stimulus is a dirichlet type. implies boundary conditions on matrix
Definition: stimulate.cc:77
SF::meshdata< mesh_int_t, mesh_real_t > sf_mesh
Definition: sf_interface.h:48
@ constPulse
Definition: stimulate.h:74
void compute_restr_idx_async(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:596
void apply_stim_to_vector(const stimulus &s, sf_vec &vec, bool add)
Definition: electrics.cc:471
void recover_phie_std(sf_vec &vm, phie_recovery_data &rcv)
Definition: electrics.cc:1949
int set_dir(IO_t dest)
Definition: sim_utils.cc:490
@ ACT_THRESH
Definition: electrics.h:172
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:276
T get_global(T in, MPI_Op OP, MPI_Comm comm=PETSC_COMM_WORLD)
Do a global reduction on a variable.
Definition: basics.h:233
V dist(const vec3< V > &p1, const vec3< V > &p2)
Definition: vect.h:114
@ Phi_ex
Definition: stimulate.h:78
@ Vm_clmp
Definition: stimulate.h:78
const char * get_mesh_type_name(mesh_t t)
get a char* to the name of a mesh type
Definition: sf_interface.cc:46
void region_mask(mesh_t meshspec, SF::vector< RegionSpecs > &regspec, SF::vector< int > &regionIDs, bool mask_elem, const char *reglist)
classify elements/points as belonging to a region
Definition: ionics.cc:363
void init_stim_info(void)
uses potential for stimulation
Definition: stimulate.cc:49
int output_all_activations(FILE_SPEC fp, int *ibuf, double *act_tbuf, int nlacts)
Definition: electrics.cc:1624
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.
Definition: basics.cc:135
void savequit()
save state and quit simulator
Definition: sim_utils.cc:1660
bool have_dbc_stims(const SF::vector< stimulus > &stimuli)
return wheter any stimuli require dirichlet boundary conditions
Definition: electrics.cc:875
SF::scattering * register_permutation(const int mesh_id, const int perm_id, const int dpn)
Register a permutation between two orderings for a mesh.
bool is_current(stim_t type)
uses current as stimulation
Definition: stimulate.cc:72
void setup_dataout(const int dataout, std::string dataout_vtx, mesh_t grid, SF::vector< mesh_int_t > *&restr, bool async)
Definition: electrics.cc:631
char * get_file_dir(const char *file)
Definition: sim_utils.cc:1289
@ POSTPROC
Definition: sim_utils.h:53
@ CURDIR
Definition: sim_utils.h:53
@ OUTPUT
Definition: sim_utils.h:53
void init_sv_gvec(gvec_data &GVs, limpet::MULTI_IF *miif, sf_vec &tmpl, igb_output_manager &output_manager)
Definition: ionics.cc:567
void assemble_sv_gvec(gvec_data &gvecs, limpet::MULTI_IF *miif)
Definition: ionics.cc:638
void constant_total_stimulus_current(SF::vector< stimulus > &stimuli, sf_mat &mass_i, sf_mat &mass_e, limpet::MULTI_IF *miif, FILE_SPEC logger)
Scales stimulus current to maintain constant total current across affected regions.
Definition: electrics.cc:2266
int postproc_recover_phie()
Definition: electrics.cc:2016
char * dupstr(const char *old_str)
Definition: basics.cc:44
void balance_electrode(elliptic_solver &ellip, SF::vector< stimulus > &stimuli, int balance_from, int balance_to)
Definition: electrics.cc:405
void set_elec_tissue_properties(MaterialType *mtype, Electrics::grid_t g, FILE_SPEC logger)
Fill the RegionSpec of an electrics grid with the associated inputs from the param parameters.
Definition: electrics.cc:108
void compute_restr_idx(sf_mesh &mesh, SF::vector< mesh_int_t > &inp_idx, SF::vector< mesh_int_t > &idx)
Definition: electrics.cc:563
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
Definition: basics.cc:72
mesh_t
The enum identifying the different meshes we might want to load.
Definition: sf_interface.h:59
@ extra_elec_msh
Definition: sf_interface.h:61
@ phie_recv_msh
Definition: sf_interface.h:70
@ intra_elec_msh
Definition: sf_interface.h:60
void get_time(double &tm)
Definition: basics.h:436
bool mesh_is_registered(const mesh_t gt)
check wheter a SF mesh is set
Definition: sf_interface.cc:63
void setup_phie_recovery_data(phie_recovery_data &data)
Definition: electrics.cc:2099
SF::abstract_vector< SF_int, SF_real > sf_vec
Definition: sf_interface.h:50
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Definition: basics.h:290
Basic_physic * get_physics(physic_t p, bool error_if_missing)
Convinience function to get a physics.
Definition: sim_utils.cc:871
const char * get_tsav_ext(double time)
Definition: electrics.cc:884
SF::abstract_matrix< SF_int, SF_real > sf_mat
Definition: sf_interface.h:52
void compute_IIF(limpet::IonIfBase &pIF, limpet::GlobalData_t **impdata, int n)
Definition: ionics.cc:465
V timing(V &t2, const V &t1)
Definition: basics.h:448
void read_indices(SF::vector< T > &idx, const std::string filename, const hashmap::unordered_map< mesh_int_t, mesh_int_t > &dd_map, MPI_Comm comm)
Read indices from a file.
Definition: fem_utils.h:120
void update_cwd()
save the current working directory to curdir so that we can switch back to it if needed.
Definition: sim_utils.cc:485
void f_close(FILE_SPEC &f)
Close a FILE_SPEC.
Definition: basics.cc:162
@ ElecMat
Definition: fem_types.h:39
vec3< POINT_REAL > Point
Definition: vect.h:93
file_desc * FILE_SPEC
Definition: basics.h:138
#define UM2_to_CM2
convert um^2 to cm^2
Definition: physics_types.h:35
#define PETSC_TO_CANONICAL
Permute algebraic data from PETSC to canonical ordering.
Definition: sf_interface.h:79
#define ALG_TO_NODAL
Scatter algebraic to nodal.
Definition: sf_interface.h:77
#define ELEM_PETSC_TO_CANONICAL
Permute algebraic element data from PETSC to canonical ordering.
Definition: sf_interface.h:81
#define DATAOUT_SURF
Definition: sim_utils.h:58
#define BIDOMAIN
Definition: sim_utils.h:145
#define DATAOUT_VOL
Definition: sim_utils.h:59
#define MONODOMAIN
Definition: sim_utils.h:144
#define EXP_POSTPROCESS
Definition: sim_utils.h:162
#define DATAOUT_NONE
Definition: sim_utils.h:57
#define PSEUDO_BIDM
Definition: sim_utils.h:146
#define DATAOUT_VTX
Definition: sim_utils.h:60
Electrical stimulation functions.
SF_int niter
number of iterations
SF_int reason
number of iterations
std::string name
the solver name
virtual void setup_solver(abstract_matrix< T, S > &mat, double tol, int max_it, short norm, std::string name, bool has_nullspace, void *logger, const char *solver_opts_file, const char *default_opts)=0
event detection data structures
Definition: electrics.h:175
description of materal properties in a mesh
Definition: fem_types.h:121
SF::vector< RegionSpecs > regions
array with region params
Definition: fem_types.h:126
SF::vector< double > el_scale
optionally provided per-element params scale
Definition: fem_types.h:127
region based variations of arbitrary material parameters
Definition: fem_types.h:93
physMaterial * material
material parameter description
Definition: fem_types.h:98
int nsubregs
#subregions forming this region
Definition: fem_types.h:96
int * subregtags
FEM tags forming this region.
Definition: fem_types.h:97
char * regname
name of region
Definition: fem_types.h:94
int regID
region ID
Definition: fem_types.h:95
bool activated
flag sentinel activation
Definition: electrics.h:199
int ID
ID of LAT detector used as sentinel.
Definition: electrics.h:203
double t_start
start of observation window
Definition: electrics.h:200
double t_window
duration of observation window
Definition: electrics.h:201
double t_quiesc
measure current duration of quiescence
Definition: electrics.h:202
double ExVal[3]
extracellular conductivity eigenvalues
Definition: fem_types.h:62
cond_t g
rule to build conductivity tensor
Definition: fem_types.h:64
double InVal[3]
intracellular conductivity eigenvalues
Definition: fem_types.h:61
double BathVal[3]
bath conductivity eigenvalues
Definition: fem_types.h:63
File descriptor struct.
Definition: basics.h:133
void log_stats(double tm, bool cflg)
Definition: timers.cc:93
void init_logger(const char *filename)
Definition: timers.cc:77
int calls
# calls for this interval, this is incremented externally
Definition: timers.h:70
double tot_time
total time, this is incremented externally
Definition: timers.h:72
void init_logger(const char *filename)
Definition: timers.cc:11
void log_stats(double tm, bool cflg)
Definition: timers.cc:27
void update_iter(const int curiter)
Definition: timers.cc:69
double slvtime
total solver time
Definition: timers.h:21
sf_vec * phie_rec
The phie recovery output vector buffer.
Definition: electrics.h:242
SF::vector< mesh_real_t > pts
The phie recovery locations.
Definition: electrics.h:241
physMat_t material_type
ID of physics material.
Definition: fem_types.h:53