41 static char input_dir[1024],
52 for(
int i=0; i < cpy_argc; i++) {
53 if(strcmp(argv[i],
"+") != 0) {
54 cpy_argv[i] =
dupstr(argv[i]);
63 int param_argc = cpy_argc;
67 status = param(PARAMETERS, ¶m_argc, cpy_argv.
data());
68 if ( status==PrMERROR||status==PrMFATAL )
69 fprintf( stderr,
"\n*** Error reading parameters\n\n");
70 else if ( status == PrMQUIT )
71 fprintf( stderr,
"\n*** Quitting by user's request\n\n");
72 }
while (status == PrMERROR);
76 if (param_globals::output_setup)
77 param_output(PARAMETERS, 1);
80 for(
int i=0; i<param_argc; i++)
84 if (status & PrMERROR) exit(status);
105 log_msg(0,0,0,
"\n *** Initializing physics ***\n");
109 for (
int ii = 0; ii < param_globals::num_external_imp; ii++) {
111 assert(loading_succeeded);
114 if(param_globals::num_external_imp)
115 log_msg(NULL, 4,
ECHO,
"Loading of external LIMPET modules not enabled.\n"
116 "Recompile with DLOPEN set.\n" );
122 log_msg(NULL, 0, 0,
"Initializing %s ..", p->
name);
129 log_msg(0,0,0,
"\n *** Destroying physics ***\n");
157 for (
int i=0; i<ns; i++ ) {
165 log_msg( NULL, 1, 0,
"Extracellular stimulus %d ignored for monodomain", i );
168 log_msg( NULL, 1, 0,
"Intracellular stimulus %d converted to transmembrane", i );
204 if(param_globals::floating_ground)
207 Stimulus* s = param_globals::stimulus;
209 for(
int i=0; i < param_globals::num_stim; i++) {
217 log_msg( NULL, 4, 0,
"Elliptic system is singular!\n"
218 "Either set floating_ground=1 or use an explicit ground:voltage (stimulus[X].stimtype=3)\n"
219 "Do not trust the elliptic solution of this simulation run!\n");
224 printf(
"\n""*** GIT tag: %s\n", GIT_COMMIT_TAG);
225 printf(
"*** GIT hash: %s\n", GIT_COMMIT_HASH);
226 printf(
"*** GIT repo: %s\n", GIT_PATH);
227 printf(
"*** dependency commits: %s\n\n", SUBREPO_COMMITS);
233 param_globals::dt /= 1000.;
236 if(param_globals::mass_lumping == 0 && param_globals::parab_solve==0) {
238 "Warning: explicit solve not possible without mass lumping. \n"
239 "Switching to Crank-Nicolson!\n\n");
241 param_globals::parab_solve = 1;
245 if(!param_globals::extracell_monodomain_stim)
254 if(param_globals::t_sentinel > 0 && param_globals::sentinel_ID < 0 ) {
255 log_msg(0,4,0,
"Warning: t_sentinel is set but no sentinel_ID has been specified; check_quiescence() behavior may not be as expected");
258 if(param_globals::num_external_imp > 0 ) {
259 for(
int ext_imp_i = 0; ext_imp_i < param_globals::num_external_imp; ext_imp_i++) {
260 if(param_globals::external_imp[ext_imp_i][0] !=
'/') {
261 log_msg(0,5,0,
"external_imp[%d] error: absolute paths must be used for .so file loading (\'%s\')",
262 ext_imp_i, param_globals::external_imp[ext_imp_i]);
268 if(param_globals::experiment ==
EXP_LAPLACE && param_globals::bidomain != 1) {
269 log_msg(0,4,0,
"Warning: Laplace experiment mode requires bidomain = 1. Setting bidomain = 1.");
270 param_globals::bidomain = 1;
273 if(param_globals::num_phys_regions == 0) {
274 log_msg(0,4,0,
"Warning: No physics region defined! Please set phys_region parameters to correctly define physics.");
277 log_msg(0,4,0,
"Intra-elec and Extra-elec domains will be derived from fibers.\n");
278 param_globals::num_phys_regions = param_globals::bidomain ? 2 : 1;
279 param_globals::phys_region = (p_region*) malloc(param_globals::num_phys_regions *
sizeof(p_region));
281 param_globals::phys_region[0].name = strdup(
"Autogenerated intracellular Electrics");
282 param_globals::phys_region[0].num_IDs = 0;
284 if(param_globals::bidomain) {
286 param_globals::phys_region[1].name = strdup(
"Autogenerated extracellular Electrics");
287 param_globals::phys_region[1].num_IDs = 0;
290 log_msg(0,4,0,
"Laplace domain will be derived from fibers.\n");
291 param_globals::num_phys_regions = 1;
292 param_globals::phys_region = (p_region*) malloc(param_globals::num_phys_regions *
sizeof(p_region));
294 param_globals::phys_region[0].name = strdup(
"Autogenerated Laplace");
295 param_globals::phys_region[0].num_IDs = 0;
300 log_msg(0,4,0,
"Warning: Laplace experiment mode requires a laplace physics regions defined.");
304 log_msg(0,4,0,
"Converting the defined extracellular-electrics-region to laplace-region.");
307 log_msg(0,4,0,
"Converting the defined intracellular-electrics-region to laplace-region.");
310 param_globals::num_phys_regions += 1;
311 param_globals::phys_region = (p_region*) realloc(param_globals::phys_region, param_globals::num_phys_regions *
sizeof(p_region));
313 param_globals::phys_region[param_globals::num_phys_regions - 1].ptype =
PHYSREG_LAPLACE;
314 param_globals::phys_region[param_globals::num_phys_regions - 1].name = strdup(
"Autogenerated Laplace");
315 param_globals::phys_region[param_globals::num_phys_regions - 1].num_IDs = 0;
319 #ifndef WITH_PARMETIS
320 if(param_globals::pstrat == 1) {
321 log_msg(0,3,0,
"openCARP was built without Parmetis support. Swithing to KDtree.");
322 param_globals::pstrat = 2;
327 bool legacy_stim_set =
false, new_stim_set =
false;
329 for(
int i=0; i<param_globals::num_stim; i++) {
330 Stimulus & legacy_stim = param_globals::stimulus[i];
331 Stim & new_stim = param_globals::stim[i];
333 if(legacy_stim.stimtype || legacy_stim.strength)
334 legacy_stim_set =
true;
336 if(new_stim.crct.type || new_stim.pulse.strength)
340 if(legacy_stim_set || new_stim_set) {
341 if(legacy_stim_set && new_stim_set) {
342 log_msg(0,4,0,
"Warning: Legacy stimuli and default stimuli are defined. Only default stimuli will be used!");
344 else if (legacy_stim_set) {
345 log_msg(0,1,0,
"Warning: Legacy stimuli defined. Please consider switching to stimulus definition \"stim[]\"!");
350 log_msg(0,4,0,
"Warning: No potential or current stimuli found!");
356 int flg = 0, err = 0, rank =
get_rank();
358 char *ptr = getcwd(current_dir, 1024);
359 if (ptr == NULL) err++;
360 ptr = getcwd(input_dir, 1024);
361 if (ptr == NULL) err++;
367 if (strcmp(sim_ID,
"OUTPUT_DIR")) {
368 if (mkdir(sim_ID, 0775)) {
369 if (errno == EEXIST ) {
370 log_msg(NULL, 2, 0,
"Output directory exists: %s\n", sim_ID);
372 log_msg(NULL, 5, 0,
"Unable to make output directory\n");
376 }
else if (mkdir(sim_ID, 0775) && errno != EEXIST) {
377 log_msg(NULL, 5, 0,
"Unable to make output directory\n");
385 err += chdir(sim_ID);
386 ptr = getcwd(output_dir, 1024);
387 if (ptr == NULL) err++;
392 err += chdir(output_dir);
397 if (strcmp(param_globals::ppID,
"POSTPROC_DIR")) {
398 if (mkdir(param_globals::ppID, 0775)) {
399 if (errno == EEXIST ) {
400 log_msg(NULL, 2,
ECHO,
"Postprocessing directory exists: %s\n\n", param_globals::ppID);
402 log_msg(NULL, 5,
ECHO,
"Unable to make postprocessing directory\n\n");
406 }
else if (mkdir(param_globals::ppID, 0775) && errno != EEXIST) {
407 log_msg(NULL, 5,
ECHO,
"Unable to make postprocessing directory\n\n");
415 err += chdir(param_globals::ppID);
416 ptr = getcwd(postproc_dir, 1024);
417 if (ptr == NULL) err++;
418 err = chdir(output_dir);
427 bool io_node =
false;
430 if (param_globals::num_io_nodes > 0) {
433 log_msg(NULL, 5, 0,
"You cannot run with async IO on only one core.\n");
437 if (2 * param_globals::num_io_nodes >= psize) {
438 log_msg(NULL, 5, 0,
"The number of IO cores be less " "than the number of compute cores.");
442 if (param_globals::num_PS_nodes && param_globals::num_io_nodes > param_globals::num_PS_nodes) {
444 "The number of IO cores (%d) should not "
445 "exceed the number of PS compute cores (%d).\n",
446 param_globals::num_io_nodes, param_globals::num_PS_nodes);
451 io_node = prank < param_globals::num_io_nodes;
454 MPI_Comm_split(PETSC_COMM_WORLD, io_node,
get_rank(), &comm);
455 MPI_Comm_set_name(comm, io_node ?
"IO" :
"compute");
457 PETSC_COMM_WORLD = comm;
461 MPI_Intercomm_create(comm, 0, MPI_COMM_WORLD, io_node ? param_globals::num_io_nodes : 0,
465 log_msg(NULL, 4, 0,
"Global node %d, Comm rank %d != Intercomm rank %d\n",
469 MPI_Comm_set_name(PETSC_COMM_WORLD,
"compute");
473 if((io_node || !param_globals::num_io_nodes) && !prank)
480 getcwd(current_dir, 1024);
487 if (dest==
OUTPUT) err = chdir(output_dir);
488 else if (dest==
POSTPROC) err = chdir(postproc_dir);
489 else if (dest==
CURDIR) err = chdir(current_dir);
490 else err = chdir(input_dir);
498 double start_time = 0.0;
501 double end_time = param_globals::tend;
515 if(param_globals::num_tsav) {
516 std::vector<double> trig(param_globals::num_tsav);
517 for(
size_t i=0; i<trig.size(); i++) trig[i] = param_globals::tsav[i];
522 if(param_globals::chkpt_intv)
524 param_globals::chkpt_intv, 0,
iotm_chkpt_intv,
"interval checkpointing");
526 if(param_globals::num_trace)
533 const short padding = 4;
538 if(col_width[0] <
int(strlen(buff)+padding))
539 col_width[0] = strlen(buff)+padding;
542 if(col_width[1] <
int(strlen(buff)+padding))
543 col_width[1] = strlen(buff)+padding;
546 for (
size_t tid = 0; tid < used_timer_ids.size(); tid++)
548 int timer_id = used_timer_ids[tid];
558 snprintf(buff,
sizeof buff,
"%.3lf", val);
559 if(col_width[col] <
int(strlen(buff)+padding))
560 col_width[col] = strlen(buff)+padding;
577 const char* smpl_endl =
"\n";
585 log_msg(0,5,0,
"Protocol file %s could not be opened for writing!\n", fname);
598 std::vector<std::string> col_labels = {
"time",
"tick"};
599 std::vector<std::string> col_short_labels = {
"A",
"B"};
600 std::vector<std::string> col_unit_labels = {
"ms",
"--" };
601 std::vector<int> col_width = {4, 4};
603 char c_label = {
'C'};
604 std::string label = {
""};
605 std::string unit = {
""};
609 std::vector<int> used_timer_ids;
610 std::vector<int> used_stim_ids;
620 used_stim_ids.push_back(sidx);
630 for (
size_t tid = 0; tid < used_timer_ids.size(); tid++)
632 int timer_id = used_timer_ids[tid];
635 int llen = strlen(t->
name);
636 mx_llen = llen > mx_llen ? llen : mx_llen;
639 for (
size_t tid = 0; tid < used_timer_ids.size(); tid++)
641 int timer_id = used_timer_ids[tid];
644 col_labels.push_back(t->
name);
646 col_short_labels.push_back(label);
651 if(unit.empty()) unit =
"--";
652 col_unit_labels.push_back(unit);
653 col_width.push_back(4);
661 fh <<
"# Protocol header\n#\n" <<
"# Legend:\n";
662 for(
size_t i = 0; i<col_short_labels.size(); i++)
664 fh <<
"#" << std::setw(2) << col_short_labels[i] <<
" = " << std::setw(mx_llen) << col_labels[i];
665 fh <<
" [" << std::setw(10) << col_unit_labels[i] <<
"]";
667 if(i >= 2 && used_stim_ids[i-2] > -1) {
672 fh <<
" ground stim" << smpl_endl;
674 fh <<
" applied: " << std::to_string(s.
pulse.
strength) << smpl_endl;
685 for(
size_t i = 0; i<col_short_labels.size(); i++)
686 fh << std::setw(col_width[i] - 3) << col_short_labels[i].c_str() << std::setw(3) <<
" ";
689 fh << smpl_endl <<
"#";
690 for(
size_t i = 0; i<col_unit_labels.size(); i++)
691 fh <<
"[" << std::setw(col_width[i]-2) << col_unit_labels[i].c_str() <<
"]";
694 fh << smpl_endl << std::fixed;
702 for (
size_t tid = 0; tid < used_timer_ids.size(); tid++)
704 int timer_id = used_timer_ids[tid];
710 fh << std::setw(col_width[col]) << On;
718 fh << std::setw(col_width[col]) << std::setprecision(3) << val;
740 const char* h1_prog =
"PROG\t----- \t----\t-------\t-------|";
741 const char* h2_prog =
"time\t%%comp\ttime\t ctime \t ETA |";
742 const char* h1_wc =
"\tELAPS |";
743 const char* h2_wc =
"\twc |";
748 log_msg(NULL, 0, 0,
"%s", h1_prog );
756 int req_hours = ((int)(time)) / 3600;
757 int req_min = (((int)(time)) % 3600) / 60;
758 int req_sec = (((int)(time)) % 3600) % 60;
760 snprintf(str, str_size,
"%d:%02d:%02d", req_hours, req_min, req_sec);
773 char elapsed_time_str[256];
774 char req_time_str[256];
778 log_msg( NULL, 0,
NONL,
"%.2f\t%.1f\t%.1f\t%s\t%s",
796 if(!have_timedependent_phys) {
797 log_msg(0,0,0,
"\n no time-dependent physics region registered, skipping simulate loop..\n");
801 log_msg(0,0,0,
"\n *** Launching simulation ***\n");
805 if(param_globals::dump_protocol)
824 it.second->output_step();
838 log_msg(0,0,0,
"\n\nTimings of individual physics:");
839 log_msg(0,0,0,
"------------------------------\n");
850 if(param_globals::post_processing_opts &
RECOVER_PHIE) {
851 log_msg(NULL,0,
ECHO,
"\nPOSTPROCESSOR: Recovering Phie ...");
852 log_msg(NULL,0,
ECHO,
"----------------------------------\n");
858 log_msg(NULL,0,
ECHO,
"\n-----------------------------------------");
859 log_msg(NULL,0,
ECHO,
"POSTPROCESSOR: Successfully recoverd Phie.\n");
871 if(error_if_missing) {
872 log_msg(0,5,0,
"%s error: required physic is not active! Usually this is due to an inconsistent experiment configuration. Aborting!", __func__);
896 log_msg(0,5,0,
"%s warning: trying to register already registered data vector.", __func__);
910 auto register_new_mesh = [&] (
mesh_t mt,
int pidx) {
911 if(!mesh_registry.count(mt)) {
913 mesh_registry[mt].name = param_globals::phys_region[pidx].name;
915 return &mesh_registry[mt];
919 for(
int i=0; i<param_globals::num_phys_regions; i++)
923 switch(param_globals::phys_region[i].ptype) {
935 log_msg(0,5,0,
"Unsupported mesh type %d! Aborting!", param_globals::phys_region[i].ptype);
941 for(
int j=0; j<param_globals::phys_region[i].num_IDs; j++)
964 for (
int i=0; i<ntr; i++) {
973 shape.
p0.
x = tagRegs[i].p0[0];
974 shape.
p0.
y = tagRegs[i].p0[1];
975 shape.
p0.
z = tagRegs[i].p0[2];
976 shape.
p1.
x = tagRegs[i].p1[0];
977 shape.
p1.
y = tagRegs[i].p1[1];
978 shape.
p1.
z = tagRegs[i].p1[2];
979 shape.
radius = tagRegs[i].radius;
986 log_msg(0,3,0,
"Tag region %d is empty", i);
988 for(
size_t j=0; j<elem_indices.
size(); j++)
989 mesh.
tag[elem_indices[j]] = tagRegs[i].tag;
993 if(strlen(param_globals::retagfile))
1008 size_t renormalised_count = 0;
1014 for (
size_t i = 0; i < l_numelem; i++)
1016 const mesh_real_t f0 = fib[3*i+0], f1 = fib[3*i+1], f2 = fib[3*i+2];
1017 mesh_real_t fibre_len = sqrt(f0*f0 + f1*f1 + f2*f2);
1019 if (fibre_len && fabs(fibre_len - 1) > 1e-3) {
1020 fib[3 * i + 0] /= fibre_len;
1021 fib[3 * i + 1] /= fibre_len;
1022 fib[3 * i + 2] /= fibre_len;
1023 renormalised_count++;
1027 return renormalised_count;
1032 log_msg(0,0,0,
"\n *** Processing meshes ***\n");
1034 const std::string basename = param_globals::meshname;
1035 const int verb = param_globals::output_level;
1043 MPI_Comm comm = ref_mesh.
comm;
1046 double t1, t2, s1, s2;
1047 MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank);
1053 std::list< sf_mesh* > ptsread_list;
1056 t1 = MPI_Wtime(); s1 = t1;
1057 if(verb)
log_msg(NULL, 0, 0,
"Reading reference mesh: %s.*", basename.c_str());
1063 if(verb)
log_msg(NULL, 0, 0,
"Done in %f sec.",
float(t2 - t1));
1065 bool check_fibre_normality =
true;
1066 if (check_fibre_normality) {
1072 size_t l_num_fixed_she = 0;
1076 unsigned long fixed[2] = {(
unsigned long) l_num_fixed_fib, (
unsigned long) l_num_fixed_she};
1077 MPI_Allreduce(MPI_IN_PLACE, fixed, 2, MPI_UNSIGNED_LONG, MPI_SUM, comm);
1079 if (fixed[0] + fixed[1] > 0)
1080 log_msg(NULL, 0, 0,
"Renormalised %ld longitudinal and %ld sheet-transverse fibre vectors.", fixed[0], fixed[1]);
1083 if(verb)
log_msg(NULL, 0, 0,
"Done in %f sec.",
float(t2 - t1));
1086 if(param_globals::numtagreg > 0) {
1087 log_msg(0, 0, 0,
"Re-tagging reference mesh");
1091 ptsread_list.push_back(&ref_mesh);
1094 retag_elements(ref_mesh, param_globals::tagreg, param_globals::numtagreg);
1097 ptsread_list.clear();
1100 if(verb)
log_msg(NULL, 0, 0,
"Processing submeshes");
1102 for(
auto it = mesh_registry.begin(); it != mesh_registry.end(); ++it) {
1103 mesh_t grid_type = it->first;
1104 sf_mesh & submesh = it->second;
1107 if(verb > 1)
log_msg(NULL, 0, 0,
"\nSubmesh name: %s", submesh.
name.c_str());
1123 if(verb > 1)
log_msg(NULL, 0, 0,
"Extraction done in %f sec.",
float(t2 - t1));
1125 ptsread_list.push_back(&submesh);
1130 if(param_globals::pstrat == 2)
1133 for(
auto it = mesh_registry.begin(); it != mesh_registry.end(); ++it)
1135 mesh_t grid_type = it->first;
1136 sf_mesh & submesh = it->second;
1138 if(verb > 2)
log_msg(NULL, 0, 0,
"\nSubmesh name: %s", submesh.
name.c_str());
1143 switch(param_globals::pstrat) {
1145 if(verb > 2)
log_msg(NULL, 0, 0,
"Using linear partitioning ..");
1148 #ifdef WITH_PARMETIS
1151 if(verb > 2)
log_msg(NULL, 0, 0,
"Using Parmetis partitioner ..");
1152 SF::parmetis_partitioner<mesh_int_t, mesh_real_t> partitioner(param_globals::pstrat_imbalance, 2);
1153 partitioner(submesh, part);
1159 if(verb > 2)
log_msg(NULL, 0, 0,
"Using KDtree partitioner ..");
1161 partitioner(submesh, part);
1166 if(verb > 2)
log_msg(NULL, 0, 0,
"Partitioning done in %f sec.",
float(t2 - t1));
1168 if(param_globals::pstrat > 0) {
1169 if(param_globals::gridout_p) {
1170 std::string out_name =
get_basename(param_globals::meshname);
1175 log_msg(0,0,0,
"Writing \"%s\" partitioning to: %s", submesh.
name.c_str(), out_name.c_str());
1182 if(verb > 2)
log_msg(NULL, 0, 0,
"Redistributing done in %f sec.",
float(t2 - t1));
1187 sm_numbering(submesh);
1189 if(verb > 2)
log_msg(NULL, 0, 0,
"Canonical numbering done in %f sec.",
float(t2 - t1));
1194 p_numbering(submesh);
1196 if(verb > 2)
log_msg(NULL, 0, 0,
"PETSc numbering done in %f sec.",
float(t2 - t1));
1205 if(verb)
log_msg(NULL, 0, 0,
"All done in %f sec.",
float(s2 - s1));
1214 std::string output_base =
get_basename(param_globals::meshname);
1216 if(write_intra_elec) {
1219 if(param_globals::gridout_i & 1) {
1221 if(param_globals::output_level > 1)
1222 log_msg(0,0,0,
"Computing \"%s\" surface ..", mesh.
name.c_str());
1225 std::string output_file = output_base +
"_i.surf";
1226 log_msg(0,0,0,
"Writing \"%s\" surface: %s", mesh.
name.c_str(), output_file.c_str());
1229 if(param_globals::gridout_i & 2) {
1230 bool write_binary =
false;
1232 std::string output_file = output_base +
"_i";
1233 log_msg(0,0,0,
"Writing \"%s\" mesh: %s", mesh.
name.c_str(), output_file.c_str());
1238 if(write_extra_elec) {
1241 if(param_globals::gridout_e & 1) {
1243 if(param_globals::output_level > 1)
1244 log_msg(0,0,0,
"Computing \"%s\" surface ..", mesh.
name.c_str());
1247 std::string output_file = output_base +
"_e.surf";
1248 log_msg(0,0,0,
"Writing \"%s\" surface: %s", mesh.
name.c_str(), output_file.c_str());
1251 if(param_globals::gridout_e & 2) {
1252 bool write_binary =
false;
1253 std::string output_file = output_base +
"_e";
1254 log_msg(0,0,0,
"Writing \"%s\" mesh: %s", mesh.
name.c_str(), output_file.c_str());
1277 char* filecopy =
dupstr(file);
1278 char* dir =
dupstr(dirname(filecopy));
1295 PetscErrorPrintf = PetscErrorPrintfNone;
1305 for(
size_t eidx = 0; eidx < mesh.
l_numelem; eidx++) {
1308 if(mindim < cdim) mindim = cdim;
1335 int gsize = inp_data->
gsize();
1341 regigb.
x(gsize / dpn);
1342 regigb.
dim_x(regigb.
x()-1);
1345 regigb.
y(1); regigb.
z(1);
1361 regigb.
inc_t(param_globals::spacedt);
1375 log_msg(0,5,0,
"%s error: Could not set up data output! Aborting!", __func__);
1382 IO.
spec = mesh_spec;
1392 buffmap[mesh_spec] = inp_copy;
1399 void igb_output_manager::register_output_async(
sf_vec* inp_data,
1427 for(
size_t i=0; i<alg_nod.
size(); i++)
1428 ioidx[i] = nbr[alg_nod[i]];
1433 for(
size_t i=0; i<idx->
size(); i++) {
1435 ioidx[i] = nbr[loc_nodal];
1455 if(param_globals::num_io_nodes == 0)
1458 register_output_async(inp_data, inp_meshid, dpn, name,
units, idx, elem_data);
1473 sc->
forward(*data_vec, *perm_vec);
1488 FILE* fd =
static_cast<FILE*
>(it.
igb.
fileptr());
1491 sf_vec* buff = fill_output_buffer(it);
1504 root_write(fd, restr_buff, PETSC_COMM_WORLD);
1532 FILE* fd =
static_cast<FILE*
>(it.
igb.
fileptr());
1563 const int max_line_len = 128;
1564 const char* file_sep =
"#=======================================================";
1571 fprintf(out->
fd,
"# CARP GIT commit hash: %s\n", GIT_COMMIT_HASH);
1572 fprintf(out->
fd,
"# dependency hashes: %s\n", SUBREPO_COMMITS);
1573 fprintf(out->
fd,
"\n");
1576 char line[8196] =
"# ";
1578 for (
int j=0; j<argc; j++) {
1579 strcat(line, argv[j]);
1580 if(strlen(line) > max_line_len) {
1581 fprintf(out->
fd,
"%s\n", line);
1587 fprintf(out->
fd,
"%s\n\n", line);
1591 for (
int i=1; i<argc; i++) {
1592 std::string argument(argv[i]);
1593 if (argument ==
"+F" || argument.find(
"_options_file")!= std::string::npos) {
1595 std::string init =
"";
1596 if (argument.find(
"_options_file")!= std::string::npos) {
1597 fprintf(out->
fd,
"%s = %s\n", argument.substr(1).c_str(), argv[i+1]);
1600 fprintf(out->
fd,
"%s>>\n", file_sep);
1603 fprintf(out->
fd,
"## %s ##\n", argv[i]);
1604 FILE *in = fopen(argv[i],
"r");
1605 while (fgets(line, 8196, in))
1606 fprintf(out->
fd,
"%s%s", init.c_str(), line);
1608 fprintf(out->
fd,
"\n##END of %s\n", argv[i]);
1609 fprintf(out->
fd,
"%s<<\n\n", file_sep);
1611 else if(argv[i][0] ==
'-')
1613 bool prelast = (i==argc-1);
1614 bool paramFollows = !prelast && ((argv[i+1][0] !=
'-') ||
1615 ((argv[i+1][1] >=
'0') && (argv[i+1][1] <=
'9')));
1621 char *optcpy = strdup(argv[i+1]);
1622 char *front = optcpy;
1624 while(*front==
' ' && *front) front++;
1626 while(*++front ==
' ');
1627 char *back = optcpy+strlen(optcpy)-1;
1628 while(*back==
' ' && back>front) back--;
1632 if (strstr(front,
"=") !=
nullptr)
1633 fprintf(out->
fd,
"%-40s= \"%s\"\n", argv[i]+1, front);
1635 fprintf(out->
fd,
"%-40s= %s\n", argv[i]+1, front);
1640 fprintf(out->
fd,
"%-40s= 1\n", argv[i]);
1655 snprintf(save_fn,
sizeof save_fn,
"exit.save.%.3f.roe", time);
1656 log_msg(NULL, 0, 0,
"savequit called at time %g\n", time);
double SF_real
Use the general double as real type.
Basic utility structs and functions, mostly IO related.
virtual void release_ptr(S *&p)=0
virtual T gsize() const =0
size_t write_binary(FILE *fd)
Write a vector to HD in binary. File descriptor is already set up.
virtual T lsize() const =0
Comfort class. Provides getter functions to access the mesh member variables more comfortably.
void set_elem(size_t eidx)
Set the view to a new element.
void clear_data()
Clear the mesh data from memory.
overlapping_layout< T > pl
nodal parallel layout
vector< S > she
sheet direction
vector< S > fib
fiber direction
size_t l_numelem
local number of elements
std::string name
the mesh name
void generate_par_layout()
Set up the parallel layout.
MPI_Comm comm
the parallel mesh is defined on a MPI world
vector< T > & get_numbering(SF_nbr nbr_type)
Get the vector defining a certain numbering.
vector< T > tag
element tag
hashmap::unordered_set< int > extr_tag
the element tags based on which the mesh has been extracted
Functor class generating a numbering optimized for PETSc.
void free_scatterings()
Free the registered scatterings.
Container for a PETSc VecScatter.
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
Functor class applying a submesh renumbering.
A vector storing arbitrary data.
size_t size() const
The current size of the vector.
void resize(size_t n)
Resize a vector.
T * data()
Pointer to the vector's start.
void insert(InputIterator first, InputIterator last)
void dump_state(char *, float, opencarp::mesh_t gid, bool, unsigned int)
The abstract physics interface we can use to trigger all physics.
int timer_idx
the timer index received from the timer manager
virtual void output_timings()
virtual void compute_step()=0
virtual void initialize()=0
const char * name
The name of the physic, each physic should have one.
SF::vector< stimulus > stimuli
the electrical stimuli
std::string timer_unit(const int timer_id)
figure out units of a signal linked to a given timer
double timer_val(const int timer_id)
figure out current value of a signal linked to a given timer
class to store shape definitions
std::map< SF::mixed_tuple< mesh_t, int >, sf_vec * > buffmap_elem
IGBheader * get_igb_header(const sf_vec *vec)
Get the pointer to the igb header for a vector that was registered for output.
void write_data()
write registered data to disk
SF::vector< async_io_item > async_IOs
void register_output_sync(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
std::map< SF::mixed_tuple< mesh_t, int >, sf_vec * > buffmap
map data spec -> PETSc vector buffer
void close_files_and_cleanup()
close file descriptors
void register_output(sf_vec *inp_data, const mesh_t inp_meshid, const int dpn, const char *name, const char *units, const SF::vector< mesh_int_t > *idx=NULL, bool elem_data=false)
Register a data vector for output.
SF::vector< sync_io_item > sync_IOs
stim_t type
type of stimulus
int timer_id
timer for stimulus
double strength
strength of stimulus
stim_protocol ptcl
applied stimulation protocol used
stim_pulse pulse
stimulus wave form
stim_physics phys
physics of stimulus
centralize time managment and output triggering
void initialize_neq_timer(const std::vector< double > &itrig, double idur, int ID, const char *iname, const char *poolname=nullptr)
long d_time
current time instance index
bool trigger(int ID) const
void initialize_eq_timer(double istart, double iend, int ntrig, double iintv, double idur, int ID, const char *iname, const char *poolname=nullptr)
void reset_timers()
Reset time in timer_manager and then reset registered timers.
double start
initial time (nonzero when restarting)
void initialize_singlestep_timer(double tg, double idur, int ID, const char *iname, const char *poolname=nullptr)
std::vector< base_timer * > timers
vector containing individual timers
Base class for tracking progress.
Top-level header of FEM module.
void extract_tagbased(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract a submesh based on element tags.
void write_data_ascii(const MPI_Comm comm, const vector< T > &idx, const vector< S > &data, std::string file, short dpn=1)
void compute_surface_mesh(const meshdata< T, S > &mesh, const SF_nbr numbering, const hashmap::unordered_set< T > &tags, meshdata< T, S > &surfmesh)
Compute the surface of a given mesh.
void print_DD_info(const meshdata< T, S > &mesh)
Print some basic information on the domain decomposition of a mesh.
void read_points(const std::string basename, const MPI_Comm comm, vector< S > &pts, vector< T > &ptsidx)
Read the points and insert them into a list of meshes.
void read_elements(meshdata< T, S > &mesh, std::string basename)
Read the element data (elements and fibers) of a CARP mesh.
void write_surface(const meshdata< T, S > &surfmesh, std::string surffile)
void count(const vector< T > &data, vector< S > &cnt)
Count number of occurrences of indices.
void insert_points(const vector< S > &pts, const vector< T > &ptsidx, std::list< meshdata< T, S > * > &meshlist)
Insert the points from the read-in buffers into a list of distributed meshes.
void redistribute_elements(meshdata< T, S > &mesh, meshdata< T, S > &sendbuff, vector< T > &part)
Redistribute the element data of a parallel mesh among the ranks based on a partitioning.
T local_nodal_to_local_petsc(const meshdata< T, S > &mesh, int rank, T local_nodal)
void init_vector(SF::abstract_vector< T, S > **vec)
void write_mesh_parallel(const meshdata< T, S > &mesh, bool binary, std::string basename)
Write a parallel mesh to harddisk without gathering it on one rank.
size_t root_write(FILE *fd, const vector< V > &vec, MPI_Comm comm)
Write vector data binary to disk.
void extract_myocardium(const meshdata< T, S > &mesh, meshdata< T, S > &submesh)
Extract the myocardium submesh.
@ NBR_PETSC
PETSc numbering of nodes.
@ NBR_ELEM_REF
The element numbering of the reference mesh (the one stored on HD).
@ NBR_SUBMESH
Submesh nodal numbering: The globally ascending sorted reference indices are reindexed.
@ NBR_ELEM_SUBMESH
Submesh element numbering: The globally ascending sorted reference indices are reindexed.
int load_ionic_module(const char *)
void COMPUTE_do_output(SF_real *dat, const int lsize, const int IO_id)
int COMPUTE_register_output(const SF::vector< mesh_int_t > &idx, const int dpn, const char *name, const char *units)
std::map< int, std::string > units
MPI_Comm IO_Intercomm
Communicator between IO and compute worlds.
FILE * petsc_error_fd
file descriptor for petsc error output
timer_manager * tm_manager
a manager for the various physics timers
std::map< datavec_t, sf_vec * > datavec_reg
important solution vectors from different physics
bool using_legacy_stimuli
flag storing whether legacy stimuli are used
SF::scatter_registry scatter_reg
Registry for the different scatter objects.
std::map< mesh_t, sf_mesh > mesh_reg
Registry for the different meshes used in a multi-physics simulation.
std::map< physic_t, Basic_physic * > physics_reg
the physics
void time_to_string(float time, char *str, short str_size)
physic_t
Identifier for the different physics we want to set up.
sf_vec * get_data(datavec_t d)
Retrieve a petsc data vector from the data registry.
void output_parameter_file(const char *fname, int argc, char **argv)
void initialize_physics()
Initialize all physics in the registry.
bool setup_IO(int argc, char **argv)
void retag_elements(sf_mesh &mesh, TagRegion *tagRegs, int ntr)
void parse_params_cpy(int argc, char **argv)
Initialize input parameters on a copy of the real command line parameters.
sf_mesh & get_mesh(const mesh_t gt)
Get a mesh by specifying the gridID.
void setup_meshes()
Read in the reference mesh and use its data to populate all meshes registered in the mesh registry.
int set_ignore_flags(int mode)
SF::scattering * get_permutation(const int mesh_id, const int perm_id, const int dpn)
Get the PETSC to canonical permutation scattering for a given mesh and number of dpn.
SF::meshdata< mesh_int_t, mesh_real_t > sf_mesh
datavec_t
Enum used to adress the different data vectors stored in the data registry.
void register_physics()
Register physics to the physics registry.
void post_process()
do postprocessing
void check_and_convert_params()
Here we want to put all parameter checks, conversions and modifications that have been littered throu...
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
short get_mesh_dim(mesh_t id)
get (lowest) dimension of the mesh used in the experiment
T get_global(T in, MPI_Op OP, MPI_Comm comm=PETSC_COMM_WORLD)
Do a global reduction on a variable.
bool is_potential(stim_t type)
uses current for stimulation
bool phys_defined(int physreg)
function to check if certain physics are defined
void update_console_output(const timer_manager &tm, prog_stats &p)
void show_build_info()
show the build info, exit if -buildinfo was provided. This code runs before MPI_Init().
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.
void savequit()
save state and quit simulator
bool have_permutation(const int mesh_id, const int perm_id, const int dpn)
void set_io_dirs(char *sim_ID, char *pp_ID, IO_t init)
void register_data(sf_vec *dat, datavec_t d)
Register a data vector in the global registry.
void basic_timer_setup()
Here we set up the timers that we always want to have, independent of physics.
char * get_file_dir(const char *file)
IO_t
The different output (directory) types.
void get_protocol_column_widths(std::vector< int > &col_width, std::vector< int > &used_timer_ids)
int get_phys_index(int physreg)
get index in param_globals::phys_region array for a certain phys region
void check_nullspace_ok()
int postproc_recover_phie()
char * dupstr(const char *old_str)
int plot_protocols(const char *fname)
plot simulation protocols (I/O timers, stimuli, boundary conditions, etc)
void indices_from_geom_shape(SF::vector< mesh_int_t > &idx, const sf_mesh &mesh, const geom_shape shape, const bool nodal)
Populate vertex data with the vertices inside a defined box shape.
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
mesh_t
The enum identifying the different meshes we might want to load.
void get_time(double &tm)
bool mesh_is_registered(const mesh_t gt)
check wheter a SF mesh is set
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Basic_physic * get_physics(physic_t p, bool error_if_missing)
Convinience function to get a physics.
size_t renormalise_fibres(SF::vector< mesh_real_t > &fib, size_t l_numelem)
void destroy_physics()
Destroy all physics in the registry.
void ignore_extracellular_stim(Stimulus *st, int ns, int ignore)
void init_console_output(const timer_manager &tm, prog_stats &p)
tagreg_t
tag regions types. must be in line with carp.prm
V timing(V &t2, const V &t1)
void read_indices(SF::vector< T > &idx, const std::string filename, const hashmap::unordered_map< mesh_int_t, mesh_int_t > &dd_map, MPI_Comm comm)
Read indices from a file.
std::string get_basename(const std::string &path)
void update_cwd()
save the current working directory to curdir so that we can switch back to it if needed.
void f_close(FILE_SPEC &f)
Close a FILE_SPEC.
void setup_petsc_err_log()
set up error logs for PETSc, so that it doesnt print errors to stderr.
void simulate()
Main simulate loop.
void parse_mesh_types()
Parse the phys_type CLI parameters and set up (empty) SF::meshdata meshes.
Top-level header of physics module.
#define PHYSREG_INTRA_ELEC
#define PETSC_TO_CANONICAL
Permute algebraic data from PETSC to canonical ordering.
#define ELEM_PETSC_TO_CANONICAL
Permute algebraic element data from PETSC to canonical ordering.
#define PHYSREG_EXTRA_ELEC
Simulator-level utility execution control functions.
#define STM_IGNORE_PSEUDO_BIDM
#define STM_IGNORE_MONODOMAIN
#define STM_IGNORE_BIDOMAIN
#define Extracellular_Ground
#define Extracellular_V_OL
const SF::vector< mesh_int_t > * restr_idx
when using asyncIO, here we store the different IDs associated to the vectors we output
SF::vector< mesh_int_t > restr_petsc_idx
pointer to index vector with nodal indices we restrict to.
int IO_id
pointer to data registered for output
long d_trigger_dur
discrete duration
const char * name
timer name
bool triggered
flag indicating trigger at current time step
for display execution progress and statistical data of electrical solve
double curr
current output wallclock time
double start
output start wallclock time
double last
last output wallclock time
bool elem_flag
igb header we use for output
IGBheader igb
pointer to index vector used for restricting output.
const SF::vector< mesh_int_t > * restr_idx
pointer to data registered for output
SF::mixed_tuple< mesh_t, int > spec
flag whether the data is elements-wise