openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
SF_abstract_vector.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
19 #ifndef _SF_ABSTRACT_VECTOR_H
20 #define _SF_ABSTRACT_VECTOR_H
21 
22 #include <mpi.h>
23 
24 #include <petscvec.h> // TODO: For scattering, VecScatter and friends
25 #include <petscis.h>
26 #include <petscsys.h> // TODO: For PETSC_COMM_WORLD
27 
28 #include "SF_container.h"
29 #include "SF_globals.h"
30 #include "SF_parallel_layout.h"
31 #include "SF_parallel_utils.h"
32 
33 #include "hashmap.hpp"
34 
35 namespace SF {
36 
37 // Forward declare the scattering class
38 class scattering;
39 
53 template<class T, class S>
55 
56  public:
57 
60 
62  int dpn = 0;
64 
66  virtual ~abstract_vector() = default;
67 
75  virtual void init(const meshdata<mesh_int_t, mesh_real_t>& imesh,
76  int idpn,
77  ltype inp_layout) = 0;
78 
84  virtual void init(const abstract_vector<T, S>& vec) = 0;
85 
94  virtual void init(int igsize, int ilsize, int idpn = 1, ltype ilayout = unset) = 0;
95 
105  inline std::tuple<int, int> init_common(const meshdata<mesh_int_t, mesh_real_t>& imesh,
106  int idpn,
107  ltype inp_layout)
108  {
109  mesh = &imesh;
110  dpn = idpn;
111  int N = 0, n = 0;
112 
113  switch(inp_layout) {
114  case algebraic:
115  N = mesh->pl.num_global_idx()*dpn;
116  n = mesh->pl.num_algebraic_idx()*dpn;
117  layout = algebraic;
118  break;
119 
120  case nodal:
121  N = mesh->l_numpts*dpn;
122  MPI_Allreduce(MPI_IN_PLACE, &N, 1, MPI_INT, MPI_SUM, mesh->comm);
123  n = mesh->l_numpts*dpn;
124  layout = nodal;
125  break;
126 
127  case elemwise:
128  N = mesh->g_numelem;
129  n = mesh->l_numelem;
130  layout = elemwise;
131  break;
132 
133  default: break;
134  }
135 
136  return std::tuple<int, int>(N, n);
137  }
138 
146  virtual void set(const vector<T>& idx, const vector<S>& vals, const bool additive = false) = 0;
147 
154  virtual void set(const vector<T>& idx, const S val) = 0;
155 
161  virtual void set(const S val) = 0;
162 
171  virtual void set(const T idx, const S val) = 0;
172 
180  virtual void get(const vector<T> & idx, S *out) = 0;
181 
189  virtual S get(const T idx) = 0;
190 
196  virtual void operator*=(const S sca) = 0;
197 
203  virtual void operator /= (const S sca) = 0;
204 
210  virtual void operator*=(const abstract_vector<T, S>& vec) = 0;
211 
218  virtual void add_scaled(const abstract_vector<T, S>& vec, S k) = 0;
219 
225  virtual void operator += (const abstract_vector<T,S> & vec) = 0;
226 
232  virtual void operator-=(const abstract_vector<T, S>& vec) = 0;
233 
239  virtual void operator+=(S c) = 0;
240 
246  virtual void operator=(const vector<S>& rhs) = 0;
247 
253  virtual void operator=(const abstract_vector<T, S>& rhs) = 0;
254 
262  virtual void shallow_copy(const abstract_vector<T, S>& v) = 0;
263 
271  virtual void deep_copy(const abstract_vector<T, S>& v) = 0;
272 
285  virtual void overshadow(const abstract_vector<T, S>& sub, bool member, int offset, int sz, bool share) = 0;
286 
292  virtual T lsize() const = 0;
293 
299  virtual T gsize() const = 0;
300 
307  virtual void get_ownership_range(T& start, T& stop) const = 0;
308 
316  virtual S* ptr() = 0;
317 
326  virtual const S* const_ptr() const = 0;
327 
335  virtual void release_ptr(S*& p) = 0;
336 
344  virtual void const_release_ptr(const S*& p) const = 0;
345 
351  virtual S mag() const = 0;
352 
358  virtual S sum() const = 0;
359 
365  virtual S min() const = 0;
366 
374  virtual S dot(const abstract_vector<T, S>& v) const = 0;
375 
381  virtual bool is_init() const = 0;
382 
388  virtual std::string to_string() const = 0;
389 
395  virtual bool equals(const abstract_vector<T,S> & rhs) const = 0;
396 
400  virtual void finish_assembly() = 0;
401 
409  virtual void forward(abstract_vector<T, S>& out, scattering &sc, bool add = false) = 0;
410 
418  virtual void backward(abstract_vector<T,S> & out, scattering &sc, bool add = false) = 0;
419 
426  virtual void apply_scattering(scattering& sc, bool fwd) = 0;
427 
436  inline size_t write_ascii(const char* file, bool write_header)
437  {
438  int size, rank;
439  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
440  MPI_Comm_rank(comm, &rank);
441  MPI_Comm_size(comm, &size);
442 
443  int glb_size = this->gsize();
444  int loc_size = this->lsize();
445 
446  int err = 0;
447  FILE* fd = NULL;
448  long int nwr = 0;
449 
450  if(rank == 0) {
451  fd = fopen(file, "w");
452  if(!fd) err = 1;
453  }
454 
455  MPI_Allreduce(MPI_IN_PLACE, &err, 1, MPI_INT, MPI_MAX, comm);
456  if(err) {
457  treat_file_open_error(file, __func__, errno, false, rank);
458  return nwr;
459  }
460 
461  S* p = this->ptr();
462 
463  if(rank == 0) {
464  if(write_header)
465  fprintf(fd, "%d\n", glb_size);
466 
467  for(int i=0; i<loc_size/this->dpn; i++) {
468  for(int j=0; j<this->dpn; j++)
469  nwr += fprintf(fd, "%f ", p[i*this->dpn+j]);
470  nwr += fprintf(fd, "\n");
471  }
472 
473  vector<S> wbuff;
474  for(int pid=1; pid < size; pid++)
475  {
476  int rsize;
477  MPI_Status stat;
478 
479  MPI_Recv(&rsize, 1, MPI_INT, pid, SF_MPITAG, comm, &stat);
480  wbuff.resize(rsize);
481  MPI_Recv(wbuff.data(), rsize*sizeof(S), MPI_BYTE, pid, SF_MPITAG, comm, &stat);
482 
483  for(int i=0; i<rsize/this->dpn; i++) {
484  for(int j=0; j<this->dpn; j++)
485  nwr += fprintf(fd, "%f ", wbuff[i*this->dpn+j]);
486  nwr += fprintf(fd, "\n");
487  }
488  }
489  fclose(fd);
490  }
491  else {
492  MPI_Send(&loc_size, 1, MPI_INT, 0, SF_MPITAG, comm);
493  MPI_Send(p, loc_size*sizeof(S), MPI_BYTE, 0, SF_MPITAG, comm);
494  }
495 
496  this->release_ptr(p);
497  MPI_Bcast(&nwr, 1, MPI_LONG, 0, comm);
498 
499  return nwr;
500  }
501 
509  template<typename V>
510  inline size_t write_binary(FILE* fd)
511  {
512  int size, rank;
513  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
514  MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size);
515 
516  long int loc_size = this->lsize();
517  S* p = this->ptr();
518 
519  vector<V> buff(loc_size);
520  for(long int i=0; i<loc_size; i++) buff[i] = p[i];
521 
522  long int nwr = root_write(fd, buff, comm);
523 
524  this->release_ptr(p);
525  return nwr;
526  }
527 
529  template<typename V>
530  inline size_t write_binary(std::string file)
531  {
532  size_t nwr = 0;
533  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
534  int rank; MPI_Comm_rank(comm, &rank);
535 
536  FILE* fd = NULL;
537  int error = 0;
538 
539  if(rank == 0) {
540  fd = fopen(file.c_str(), "w");
541  if(fd == NULL)
542  error++;
543  }
544 
545  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
546 
547  if(error == 0) {
548  nwr = this->write_binary<V>(fd);
549  fclose(fd);
550  }
551  else {
552  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
553  }
554 
555  return nwr;
556  }
557 
558  template<typename V>
559  inline size_t read_binary(FILE* fd)
560  {
561  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
562 
563  size_t loc_size = this->lsize();
564  S* p = this->ptr();
565  vector<V> buff(loc_size);
566 
567  size_t nrd = root_read(fd, buff, comm);
568 
569  for(size_t i=0; i<loc_size; i++)
570  p[i] = buff[i];
571 
572  this->release_ptr(p);
573 
574  return nrd;
575  }
576 
577  template<typename V>
578  inline size_t read_binary(std::string file)
579  {
580  MPI_Comm comm = mesh != NULL ? mesh->comm : PETSC_COMM_WORLD;
581 
582  size_t nrd = 0;
583  int rank; MPI_Comm_rank(comm, &rank);
584 
585  FILE* fd = NULL;
586  int error = 0;
587 
588  if(rank == 0) {
589  fd = fopen(file.c_str(), "r");
590  if(fd == NULL)
591  error++;
592  }
593 
594  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
595 
596  if(error == 0) {
597  nrd = read_binary<V>(fd);
598  fclose(fd);
599  }
600  else {
601  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
602  }
603 
604  return nrd;
605  }
606 
607  inline size_t read_ascii(FILE* fd)
608  {
609  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
610 
611  size_t loc_size = this->lsize();
612  S* p = this->ptr();
613 
614  size_t nrd = root_read_ascii(fd, p, loc_size, comm, false);
615  return nrd;
616  }
617 
618  inline size_t read_ascii(std::string file)
619  {
620  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
621  int rank; MPI_Comm_rank(comm, &rank);
622 
623  size_t nrd = 0;
624 
625  FILE* fd = NULL;
626  int error = 0;
627 
628  if(rank == 0) {
629  fd = fopen(file.c_str(), "r");
630  if(fd == NULL)
631  error++;
632  }
633 
634  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
635 
636  if(error == 0) {
637  nrd = read_ascii(fd);
638  if(fd) fclose(fd);
639  }
640  else {
641  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
642  }
643 
644  return nrd;
645  }
646 };
647 
648 
651 template<class T, class S>
652 inline bool is_init(const abstract_vector<T,S>* v)
653 {
654  return (v && v->is_init());
655 }
656 
657 
658 // TODO: This is a type of Vector and can most likely be integrated into the
659 // `abstract_vector` type in some way. Everything here and also in
660 // `SF_parallel_layout.h` probably need to be abstracted away, or made less
661 // PETSc specific?
670 {
671  public:
672  Vec b_buff;
673  VecScatter vec_sc;
674 
676 
678  scattering() : b_buff(NULL), vec_sc(NULL)
679  {}
680 
683  {
684  if(b_buff) VecDestroy(&b_buff);
685  if(vec_sc) VecScatterDestroy(&vec_sc);
686  }
693  template<class T, class S>
694  inline void forward(abstract_vector<T,S> & in, abstract_vector<T,S> & out, bool add = false)
695  {
696  in.forward(out, *this, add);
697  }
698 
705  template<class T, class S>
706  inline void backward(abstract_vector<T,S> & in, abstract_vector<T,S> & out, bool add = false)
707  {
708  in.backward(out, *this, add);
709  }
710 
717  template<class T, class S>
718  inline void operator()(abstract_vector<T,S> & v, bool fwd)
719  {
720  v.apply_scattering(*this, fwd);
721  }
722 };
723 
732 template<class T>
734 {
735  private:
738 
739  public:
752  inline
754  const vector<T> & nbr_a,
755  const vector<T> & nbr_b,
756  const size_t gsize_a,
757  const size_t gsize_b,
758  const short dpn)
759  {
760  assert(_registry.count(spec) == 0);
761 
762  scattering* sc = new scattering();
763  _registry[spec] = sc;
764 
765  IS is_a, is_b;
766  int err = 0;
767  vector<SF_int> idx_a(nbr_a.size() * dpn);
768  vector<SF_int> idx_b(nbr_b.size() * dpn);
769 
770  // we copy indexing into new containers because of dpn and also
771  // because of the implicit typecast between T and SF_int
772  for(size_t i=0; i<nbr_a.size(); i++)
773  for(short j=0; j<dpn; j++) idx_a[i*dpn+j] = nbr_a[i]*dpn+j;
774 
775  for(size_t i=0; i<nbr_b.size(); i++)
776  for(short j=0; j<dpn; j++) idx_b[i*dpn+j] = nbr_b[i]*dpn+j;
777 
778  err += ISCreateGeneral(PETSC_COMM_WORLD, idx_a.size(), idx_a.data(), PETSC_COPY_VALUES, &is_a);
779  err += ISCreateGeneral(PETSC_COMM_WORLD, idx_b.size(), idx_b.data(), PETSC_COPY_VALUES, &is_b);
780 
781  err += ISSetPermutation(is_b);
782 
783  Vec a;
784  err += VecCreateMPI(PETSC_COMM_WORLD, idx_a.size(), gsize_a*dpn, &a);
785  err += VecCreateMPI(PETSC_COMM_WORLD, idx_b.size(), gsize_b*dpn, &sc->b_buff);
786 
787  err += VecScatterCreate(a, is_a, sc->b_buff, is_b, &sc->vec_sc);
788 
789  err += VecDestroy(&a);
790  err += ISDestroy (&is_a);
791  err += ISDestroy (&is_b);
792 
793  if(err)
794  fprintf(stderr, "%s : an error has occurred! Scattering spec: Mesh ID %d, permutation ID %d, numbering %d, dpn %d!\n",
795  __func__, int(spec.v1), int(spec.v2), int(spec.v3), int(spec.v4));
796 
797  sc->idx_a = idx_a;
798  sc->idx_b = idx_b;
799 
800  return sc;
801  }
802 
803 
809  inline
811  const vector<T> & layout_a,
812  const vector<T> & layout_b,
813  const vector<T> & idx_a,
814  const vector<T> & idx_b,
815  const int rank,
816  const int dpn)
817  {
818  // scattering spec must not exist yet
819  assert(_registry.count(spec) == 0);
820 
821  // the local index sets have to be of equal size.
822  assert(idx_a.size() == idx_b.size());
823 
824  scattering* sc = new scattering();
825  _registry[spec] = sc;
826 
827  IS is_a, is_b;
828  Vec a;
829  int err = 0;
830  vector<SF_int> sidx_a(idx_a.size() * dpn);
831  vector<SF_int> sidx_b(idx_b.size() * dpn);
832 
833  T lsize_a = layout_a[rank+1] - layout_a[rank];
834  T lsize_b = layout_b[rank+1] - layout_b[rank];
835  T gsize_a = layout_a[layout_a.size()-1];
836  T gsize_b = layout_b[layout_a.size()-1];
837 
838  // we copy indexing into new containers because of dpn and also
839  // because of the implicit typecast between T and SF_int
840  for(size_t i=0; i<idx_a.size(); i++)
841  for(short j=0; j<dpn; j++) sidx_a[i*dpn+j] = idx_a[i]*dpn+j;
842 
843  for(size_t i=0; i<idx_b.size(); i++)
844  for(short j=0; j<dpn; j++) sidx_b[i*dpn+j] = idx_b[i]*dpn+j;
845 
846  err += ISCreateGeneral(PETSC_COMM_WORLD, sidx_a.size(), sidx_a.data(), PETSC_COPY_VALUES, &is_a);
847  err += ISCreateGeneral(PETSC_COMM_WORLD, sidx_b.size(), sidx_b.data(), PETSC_COPY_VALUES, &is_b);
848 
849  err += VecCreateMPI(PETSC_COMM_WORLD, lsize_a*dpn, gsize_a*dpn, &a);
850  err += VecCreateMPI(PETSC_COMM_WORLD, lsize_b*dpn, gsize_b*dpn, &sc->b_buff);
851 
852  err += VecScatterCreate(a, is_a, sc->b_buff, is_b, &sc->vec_sc);
853 
854  err += VecDestroy(&a);
855  err += ISDestroy (&is_a);
856  err += ISDestroy (&is_b);
857 
858  if(err)
859  fprintf(stderr, "%s : an error has occurred! Scattering spec: To ID %d, From ID %d, numbering %d, dpn %d!\n",
860  __func__, int(spec.v1), int(spec.v2), int(spec.v3), int(spec.v4));
861 
862  sc->idx_a = idx_a;
863  sc->idx_b = idx_b;
864 
865  return sc;
866  }
867 
879  {
880  scattering* ret = NULL;
881 
882  // scattering must be present
883  if(_registry.count(spec))
884  ret = _registry[spec];
885 
886  return ret;
887  }
888 
892  inline void free_scatterings()
893  {
894  typename hashmap::unordered_map<quadruple<int>,scattering*>::iterator it;
895  for(it = _registry.begin(); it != _registry.end(); ++it) {
896  if(it->second) {
897  delete it->second;
898  it->second = NULL;
899  }
900  }
901  }
902 };
903 
904 
905 } // namespace SF
906 
907 
908 #endif // _SF_ABSTRACT_VECTOR_H
scattering()
Constructor.
Definition: dense_mat.hpp:34
virtual void overshadow(const abstract_vector< T, S > &sub, bool member, int offset, int sz, bool share)=0
void operator()(abstract_vector< T, S > &v, bool fwd)
Apply the scattering on a data vector.
virtual void operator-=(const abstract_vector< T, S > &vec)=0
virtual void release_ptr(S *&p)=0
virtual S min() const =0
int dpn
d.o.f. per mesh vertex.
virtual void add_scaled(const abstract_vector< T, S > &vec, S k)=0
virtual void finish_assembly()=0
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:392
size_t root_read_ascii(FILE *fd, vector< V > &vec, MPI_Comm comm, bool int_data)
Read binary data into a vector.
virtual void forward(abstract_vector< T, S > &out, scattering &sc, bool add=false)=0
VecScatter vec_sc
The scatterer.
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:417
virtual T lsize() const =0
virtual ~abstract_vector()=default
virtual void operator*=(const S sca)=0
Basic containers.
#define SF_MPITAG
the MPI tag when communicating
Definition: SF_globals.h:29
virtual void const_release_ptr(const S *&p) const =0
size_t read_ascii(FILE *fd)
void free_scatterings()
Free the registered scatterings.
scattering * register_scattering(const quadruple< int > spec, const vector< T > &layout_a, const vector< T > &layout_b, const vector< T > &idx_a, const vector< T > &idx_b, const int rank, const int dpn)
Register a scattering.
Simple utility functions for parallel data.
~scattering()
Destructor.
ltype layout
used vector layout (nodal, algebraic, unset)
T * data()
Pointer to the vector&#39;s start.
Definition: SF_vector.h:91
size_t write_ascii(const char *file, bool write_header)
virtual S sum() const =0
vector< SF_int > idx_b
virtual const S * const_ptr() const =0
size_t root_read(FILE *fd, vector< V > &vec, MPI_Comm comm)
Read binary data into a vector.
virtual S * ptr()=0
virtual void operator+=(const abstract_vector< T, S > &vec)=0
virtual std::string to_string() const =0
virtual void init(const meshdata< mesh_int_t, mesh_real_t > &imesh, int idpn, ltype inp_layout)=0
virtual S mag() const =0
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
virtual S dot(const abstract_vector< T, S > &v) const =0
virtual bool equals(const abstract_vector< T, S > &rhs) const =0
std::tuple< int, int > init_common(const meshdata< mesh_int_t, mesh_real_t > &imesh, int idpn, ltype inp_layout)
size_t write_binary(FILE *fd)
Write a vector to HD in binary. File descriptor is already set up.
size_t read_binary(FILE *fd)
size_t read_ascii(std::string file)
virtual void apply_scattering(scattering &sc, bool fwd)=0
virtual bool is_init() const =0
size_t write_binary(std::string file)
write binary. Open file descriptor myself.
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
size_t num_global_idx() const
Retrieve the global number of indices.
virtual void shallow_copy(const abstract_vector< T, S > &v)=0
size_t l_numpts
local number of points
Definition: SF_container.h:389
virtual void operator=(const vector< S > &rhs)=0
A vector storing arbitrary data.
Definition: SF_vector.h:42
virtual void get_ownership_range(T &start, T &stop) const =0
size_t l_numelem
local number of elements
Definition: SF_container.h:387
size_t read_binary(std::string file)
Classes and algorithms related to the layout of distributed meshes.
const meshdata< mesh_int_t, mesh_real_t > * mesh
the connected mesh
virtual void backward(abstract_vector< T, S > &out, scattering &sc, bool add=false)=0
size_t num_algebraic_idx() const
Retrieve the number of local algebraic indices.
size_t g_numelem
global number of elements
Definition: SF_container.h:386
void treat_file_open_error(const char *file, const char *caller, const int errnum, const bool do_exit, int rank)
treat a file open error by displaying the errnum string interpretation and the caller ...
Definition: SF_io_base.h:83
size_t root_write(FILE *fd, const vector< V > &vec, MPI_Comm comm)
Write vector data binary to disk.
virtual void deep_copy(const abstract_vector< T, S > &v)=0
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
virtual void operator/=(const S sca)=0
scattering * get_scattering(const quadruple< int > spec)
Access an previously registered scattering.
Classes similar to unordered_set and unordered_map, but with better performance.
void backward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Backward scattering.
The scatterer registry class.
virtual T gsize() const =0
Vec b_buff
A buffer vector which also defines the parallel layout of the "b" side.
scattering * register_permutation(const quadruple< int > spec, const vector< T > &nbr_a, const vector< T > &nbr_b, const size_t gsize_a, const size_t gsize_b, const short dpn)
Register a permutation scattering.
Container for a PETSc VecScatter.