openCARP
Doxygen code documentation for the open cardiac electrophysiology simulator openCARP
SF_abstract_vector.h
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // openCARP is an open cardiac electrophysiology simulator.
3 //
4 // Copyright (C) 2020 openCARP project
5 //
6 // This program is licensed under the openCARP Academic Public License (APL)
7 // v1.0: You can use and redistribute it and/or modify it in non-commercial
8 // academic environments under the terms of APL as published by the openCARP
9 // project v1.0, or (at your option) any later version. Commercial use requires
10 // a commercial license (info@opencarp.org).
11 //
12 // This program is distributed without any warranty; see the openCARP APL for
13 // more details.
14 //
15 // You should have received a copy of the openCARP APL along with this program
16 // and can find it online: http://www.opencarp.org/license
17 // ----------------------------------------------------------------------------
18 
19 #ifndef _SF_ABSTRACT_VECTOR_H
20 #define _SF_ABSTRACT_VECTOR_H
21 
22 #include <mpi.h>
23 
24 #include <petscvec.h> // TODO: For scattering, VecScatter and friends
25 #include <petscis.h>
26 #include <petscsys.h> // TODO: For PETSC_COMM_WORLD
27 
28 #include "SF_container.h"
29 #include "SF_globals.h"
30 #include "SF_parallel_layout.h"
31 #include "SF_parallel_utils.h"
32 
33 #include "hashmap.hpp"
34 
35 namespace SF {
36 
37 // Forward declare the scattering class
38 class scattering;
39 
53 template<class T, class S>
55 
56  public:
57 
60 
62  int dpn = 0;
64 
66  virtual ~abstract_vector() = default;
67 
75  virtual void init(const meshdata<mesh_int_t, mesh_real_t>& imesh,
76  int idpn,
77  ltype inp_layout) = 0;
78 
84  virtual void init(const abstract_vector<T, S>& vec) = 0;
85 
94  virtual void init(int igsize, int ilsize, int idpn = 1, ltype ilayout = unset) = 0;
95 
105  inline std::tuple<int, int> init_common(const meshdata<mesh_int_t, mesh_real_t>& imesh,
106  int idpn,
107  ltype inp_layout)
108  {
109  mesh = &imesh;
110  dpn = idpn;
111  int N = 0, n = 0;
112 
113  switch(inp_layout) {
114  case algebraic:
115  N = mesh->pl.num_global_idx()*dpn;
116  n = mesh->pl.num_algebraic_idx()*dpn;
117  layout = algebraic;
118  break;
119 
120  case nodal:
121  N = mesh->l_numpts*dpn;
122  MPI_Allreduce(MPI_IN_PLACE, &N, 1, MPI_INT, MPI_SUM, mesh->comm);
123  n = mesh->l_numpts*dpn;
124  layout = nodal;
125  break;
126 
127  case elemwise:
128  N = mesh->g_numelem;
129  n = mesh->l_numelem;
130  layout = elemwise;
131  break;
132 
133  default: break;
134  }
135 
136  return std::tuple<int, int>(N, n);
137  }
138 
147  virtual void set(const vector<T>& idx, const vector<S>& vals, const bool additive = false, const bool local = false) = 0;
148 
157  virtual void set(const vector<T>& idx, const S val, const bool additive = false, const bool local = false) = 0;
158 
164  virtual void set(const S val) = 0;
165 
174  virtual void set(const T idx, const S val) = 0;
175 
183  virtual void get(const vector<T> & idx, S *out) = 0;
184 
192  virtual S get(const T idx) = 0;
193 
199  virtual void operator*=(const S sca) = 0;
200 
206  virtual void operator /= (const S sca) = 0;
207 
213  virtual void operator*=(const abstract_vector<T, S>& vec) = 0;
214 
221  virtual void add_scaled(const abstract_vector<T, S>& vec, S k) = 0;
222 
228  virtual void operator += (const abstract_vector<T,S> & vec) = 0;
229 
235  virtual void operator-=(const abstract_vector<T, S>& vec) = 0;
236 
242  virtual void operator+=(S c) = 0;
243 
249  virtual void operator=(const vector<S>& rhs) = 0;
250 
256  virtual void operator=(const abstract_vector<T, S>& rhs) = 0;
257 
265  virtual void shallow_copy(const abstract_vector<T, S>& v) = 0;
266 
274  virtual void deep_copy(const abstract_vector<T, S>& v) = 0;
275 
288  virtual void overshadow(const abstract_vector<T, S>& sub, bool member, int offset, int sz, bool share) = 0;
289 
295  virtual T lsize() const = 0;
296 
302  virtual T gsize() const = 0;
303 
310  virtual void get_ownership_range(T& start, T& stop) const = 0;
311 
319  virtual S* ptr() = 0;
320 
329  virtual const S* const_ptr() const = 0;
330 
339  virtual S* device_ptr() = 0;
340 
348  virtual void release_ptr(S*& p) = 0;
349 
357  virtual void const_release_ptr(const S*& p) const = 0;
358 
366  virtual void release_device_ptr(S*& p) = 0;
367 
373  virtual S mag() const = 0;
374 
380  virtual S sum() const = 0;
381 
387  virtual S min() const = 0;
388 
396  virtual S dot(const abstract_vector<T, S>& v) const = 0;
397 
403  virtual bool is_init() const = 0;
404 
410  virtual std::string to_string() const = 0;
411 
417  virtual bool equals(const abstract_vector<T,S> & rhs) const = 0;
418 
422  virtual void finish_assembly() = 0;
423 
431  virtual void forward(abstract_vector<T, S>& out, scattering &sc, bool add = false) = 0;
432 
440  virtual void backward(abstract_vector<T,S> & out, scattering &sc, bool add = false) = 0;
441 
448  virtual void apply_scattering(scattering& sc, bool fwd) = 0;
449 
458  inline size_t write_ascii(const char* file, bool write_header)
459  {
460  int size, rank;
461  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
462  MPI_Comm_rank(comm, &rank);
463  MPI_Comm_size(comm, &size);
464 
465  int glb_size = this->gsize();
466  int loc_size = this->lsize();
467 
468  int err = 0;
469  FILE* fd = NULL;
470  long int nwr = 0;
471 
472  if(rank == 0) {
473  fd = fopen(file, "w");
474  if(!fd) err = 1;
475  }
476 
477  MPI_Allreduce(MPI_IN_PLACE, &err, 1, MPI_INT, MPI_MAX, comm);
478  if(err) {
479  treat_file_open_error(file, __func__, errno, false, rank);
480  return nwr;
481  }
482 
483  S* p = this->ptr();
484 
485  if(rank == 0) {
486  if(write_header)
487  fprintf(fd, "%d\n", glb_size);
488 
489  for(int i=0; i<loc_size/this->dpn; i++) {
490  for(int j=0; j<this->dpn; j++)
491  nwr += fprintf(fd, "%f ", p[i*this->dpn+j]);
492  nwr += fprintf(fd, "\n");
493  }
494 
495  vector<S> wbuff;
496  for(int pid=1; pid < size; pid++)
497  {
498  int rsize;
499  MPI_Status stat;
500 
501  MPI_Recv(&rsize, 1, MPI_INT, pid, SF_MPITAG, comm, &stat);
502  wbuff.resize(rsize);
503  MPI_Recv(wbuff.data(), rsize*sizeof(S), MPI_BYTE, pid, SF_MPITAG, comm, &stat);
504 
505  for(int i=0; i<rsize/this->dpn; i++) {
506  for(int j=0; j<this->dpn; j++)
507  nwr += fprintf(fd, "%f ", wbuff[i*this->dpn+j]);
508  nwr += fprintf(fd, "\n");
509  }
510  }
511  fclose(fd);
512  }
513  else {
514  MPI_Send(&loc_size, 1, MPI_INT, 0, SF_MPITAG, comm);
515  MPI_Send(p, loc_size*sizeof(S), MPI_BYTE, 0, SF_MPITAG, comm);
516  }
517 
518  this->release_ptr(p);
519  MPI_Bcast(&nwr, 1, MPI_LONG, 0, comm);
520 
521  return nwr;
522  }
523 
531  template<typename V>
532  inline size_t write_binary(FILE* fd)
533  {
534  int size, rank;
535  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
536  MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size);
537 
538  long int loc_size = this->lsize();
539  S* p = this->ptr();
540 
541  vector<V> buff(loc_size);
542  for(long int i=0; i<loc_size; i++) buff[i] = p[i];
543 
544  long int nwr = root_write(fd, buff, comm);
545 
546  this->release_ptr(p);
547  return nwr;
548  }
549 
551  template<typename V>
552  inline size_t write_binary(std::string file)
553  {
554  size_t nwr = 0;
555  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
556  int rank; MPI_Comm_rank(comm, &rank);
557 
558  FILE* fd = NULL;
559  int error = 0;
560 
561  if(rank == 0) {
562  fd = fopen(file.c_str(), "w");
563  if(fd == NULL)
564  error++;
565  }
566 
567  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
568 
569  if(error == 0) {
570  nwr = this->write_binary<V>(fd);
571  fclose(fd);
572  }
573  else {
574  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
575  }
576 
577  return nwr;
578  }
579 
580  template<typename V>
581  inline size_t read_binary(FILE* fd)
582  {
583  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
584 
585  size_t loc_size = this->lsize();
586  S* p = this->ptr();
587  vector<V> buff(loc_size);
588 
589  size_t nrd = root_read(fd, buff, comm);
590 
591  for(size_t i=0; i<loc_size; i++)
592  p[i] = buff[i];
593 
594  this->release_ptr(p);
595 
596  return nrd;
597  }
598 
599  template<typename V>
600  inline size_t read_binary(std::string file)
601  {
602  MPI_Comm comm = mesh != NULL ? mesh->comm : PETSC_COMM_WORLD;
603 
604  size_t nrd = 0;
605  int rank; MPI_Comm_rank(comm, &rank);
606 
607  FILE* fd = NULL;
608  int error = 0;
609 
610  if(rank == 0) {
611  fd = fopen(file.c_str(), "r");
612  if(fd == NULL)
613  error++;
614  }
615 
616  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
617 
618  if(error == 0) {
619  nrd = read_binary<V>(fd);
620  fclose(fd);
621  }
622  else {
623  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
624  }
625 
626  return nrd;
627  }
628 
629  inline size_t read_ascii(FILE* fd)
630  {
631  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
632 
633  size_t loc_size = this->lsize();
634  S* p = this->ptr();
635 
636  size_t nrd = root_read_ascii(fd, p, loc_size, comm, false);
637  this->release_ptr(p);
638  return nrd;
639  }
640 
641  inline size_t read_ascii(std::string file)
642  {
643  MPI_Comm comm = this->mesh != NULL ? this->mesh->comm : PETSC_COMM_WORLD;
644  int rank; MPI_Comm_rank(comm, &rank);
645 
646  size_t nrd = 0;
647 
648  FILE* fd = NULL;
649  int error = 0;
650 
651  if(rank == 0) {
652  fd = fopen(file.c_str(), "r");
653  if(fd == NULL)
654  error++;
655  }
656 
657  MPI_Allreduce(MPI_IN_PLACE, &error, 1, MPI_INT, MPI_SUM, comm);
658 
659  if(error == 0) {
660  nrd = read_ascii(fd);
661  if(fd) fclose(fd);
662  }
663  else {
664  treat_file_open_error(file.c_str(), __func__, errno, false, rank);
665  }
666 
667  return nrd;
668  }
669 };
670 
671 
674 template<class T, class S>
675 inline bool is_init(const abstract_vector<T,S>* v)
676 {
677  return (v && v->is_init());
678 }
679 
680 
681 // TODO: This is a type of Vector and can most likely be integrated into the
682 // `abstract_vector` type in some way. Everything here and also in
683 // `SF_parallel_layout.h` probably need to be abstracted away, or made less
684 // PETSc specific?
693 {
694  public:
695  Vec b_buff;
696  VecScatter vec_sc;
697 
699 
701  scattering() : b_buff(NULL), vec_sc(NULL)
702  {}
703 
706  {
707  if(b_buff) VecDestroy(&b_buff);
708  if(vec_sc) VecScatterDestroy(&vec_sc);
709  }
716  template<class T, class S>
717  inline void forward(abstract_vector<T,S> & in, abstract_vector<T,S> & out, bool add = false)
718  {
719  in.forward(out, *this, add);
720  }
721 
728  template<class T, class S>
729  inline void backward(abstract_vector<T,S> & in, abstract_vector<T,S> & out, bool add = false)
730  {
731  in.backward(out, *this, add);
732  }
733 
740  template<class T, class S>
741  inline void operator()(abstract_vector<T,S> & v, bool fwd)
742  {
743  v.apply_scattering(*this, fwd);
744  }
745 };
746 
756 {
757  private:
760 
761  public:
774  template<class T> inline scattering*
776  const vector<T> & nbr_a,
777  const vector<T> & nbr_b,
778  const size_t gsize_a,
779  const size_t gsize_b,
780  const short dpn)
781  {
782  assert(_registry.count(spec) == 0);
783 
784  scattering* sc = new scattering();
785  _registry[spec] = sc;
786 
787  IS is_a, is_b;
788  int err = 0;
789  vector<SF_int> idx_a(nbr_a.size() * dpn);
790  vector<SF_int> idx_b(nbr_b.size() * dpn);
791 
792  // we copy indexing into new containers because of dpn and also
793  // because of the implicit typecast between T and SF_int
794  for(size_t i=0; i<nbr_a.size(); i++)
795  for(short j=0; j<dpn; j++) idx_a[i*dpn+j] = nbr_a[i]*dpn+j;
796 
797  for(size_t i=0; i<nbr_b.size(); i++)
798  for(short j=0; j<dpn; j++) idx_b[i*dpn+j] = nbr_b[i]*dpn+j;
799 
800  err += ISCreateGeneral(PETSC_COMM_WORLD, idx_a.size(), idx_a.data(), PETSC_COPY_VALUES, &is_a);
801  err += ISCreateGeneral(PETSC_COMM_WORLD, idx_b.size(), idx_b.data(), PETSC_COPY_VALUES, &is_b);
802 
803  err += ISSetPermutation(is_b);
804 
805  Vec a;
806  err += VecCreateMPI(PETSC_COMM_WORLD, idx_a.size(), gsize_a*dpn, &a);
807  err += VecCreateMPI(PETSC_COMM_WORLD, idx_b.size(), gsize_b*dpn, &sc->b_buff);
808 
809  err += VecScatterCreate(a, is_a, sc->b_buff, is_b, &sc->vec_sc);
810 
811  err += VecDestroy(&a);
812  err += ISDestroy (&is_a);
813  err += ISDestroy (&is_b);
814 
815  if(err)
816  fprintf(stderr, "%s : an error has occurred! Scattering spec: Mesh ID %d, permutation ID %d, numbering %d, dpn %d!\n",
817  __func__, int(spec.v1), int(spec.v2), int(spec.v3), int(spec.v4));
818 
819  sc->idx_a.assign(idx_a.begin(), idx_a.end());
820  sc->idx_b.assign(idx_b.begin(), idx_b.end());
821 
822  return sc;
823  }
824 
825 
831  template<class T> inline scattering*
833  const vector<T> & layout_a,
834  const vector<T> & layout_b,
835  const vector<T> & idx_a,
836  const vector<T> & idx_b,
837  const int rank,
838  const int dpn)
839  {
840  // scattering spec must not exist yet
841  assert(_registry.count(spec) == 0);
842 
843  // the local index sets have to be of equal size.
844  assert(idx_a.size() == idx_b.size());
845 
846  scattering* sc = new scattering();
847  _registry[spec] = sc;
848 
849  IS is_a, is_b;
850  Vec a;
851  int err = 0;
852  vector<SF_int> sidx_a(idx_a.size() * dpn);
853  vector<SF_int> sidx_b(idx_b.size() * dpn);
854 
855  T lsize_a = layout_a[rank+1] - layout_a[rank];
856  T lsize_b = layout_b[rank+1] - layout_b[rank];
857  T gsize_a = layout_a[layout_a.size()-1];
858  T gsize_b = layout_b[layout_a.size()-1];
859 
860  // we copy indexing into new containers because of dpn and also
861  // because of the implicit typecast between T and SF_int
862  for(size_t i=0; i<idx_a.size(); i++)
863  for(short j=0; j<dpn; j++) sidx_a[i*dpn+j] = idx_a[i]*dpn+j;
864 
865  for(size_t i=0; i<idx_b.size(); i++)
866  for(short j=0; j<dpn; j++) sidx_b[i*dpn+j] = idx_b[i]*dpn+j;
867 
868  err += ISCreateGeneral(PETSC_COMM_WORLD, sidx_a.size(), sidx_a.data(), PETSC_COPY_VALUES, &is_a);
869  err += ISCreateGeneral(PETSC_COMM_WORLD, sidx_b.size(), sidx_b.data(), PETSC_COPY_VALUES, &is_b);
870 
871  err += VecCreateMPI(PETSC_COMM_WORLD, lsize_a*dpn, gsize_a*dpn, &a);
872  err += VecCreateMPI(PETSC_COMM_WORLD, lsize_b*dpn, gsize_b*dpn, &sc->b_buff);
873 
874  err += VecScatterCreate(a, is_a, sc->b_buff, is_b, &sc->vec_sc);
875 
876  err += VecDestroy(&a);
877  err += ISDestroy (&is_a);
878  err += ISDestroy (&is_b);
879 
880  if(err)
881  fprintf(stderr, "%s : an error has occurred! Scattering spec: To ID %d, From ID %d, numbering %d, dpn %d!\n",
882  __func__, int(spec.v1), int(spec.v2), int(spec.v3), int(spec.v4));
883 
884  sc->idx_a.assign(idx_a.begin(), idx_a.end());
885  sc->idx_b.assign(idx_b.begin(), idx_b.end());
886 
887  return sc;
888  }
889 
901  {
902  scattering* ret = NULL;
903 
904  // scattering must be present
905  if(_registry.count(spec))
906  ret = _registry[spec];
907 
908  return ret;
909  }
910 
914  inline void free_scatterings()
915  {
916  typename hashmap::unordered_map<quadruple<int>,scattering*>::iterator it;
917  for(it = _registry.begin(); it != _registry.end(); ++it) {
918  if(it->second) {
919  delete it->second;
920  it->second = NULL;
921  }
922  }
923  }
924 };
925 
926 
927 } // namespace SF
928 
929 
930 #endif // _SF_ABSTRACT_VECTOR_H
scattering()
Constructor.
Definition: dense_mat.hpp:34
virtual void overshadow(const abstract_vector< T, S > &sub, bool member, int offset, int sz, bool share)=0
void operator()(abstract_vector< T, S > &v, bool fwd)
Apply the scattering on a data vector.
virtual void operator-=(const abstract_vector< T, S > &vec)=0
virtual void release_ptr(S *&p)=0
virtual S min() const =0
int dpn
d.o.f. per mesh vertex.
virtual void add_scaled(const abstract_vector< T, S > &vec, S k)=0
virtual void finish_assembly()=0
MPI_Comm comm
the parallel mesh is defined on a MPI world
Definition: SF_container.h:392
virtual S * device_ptr()=0
size_t root_read_ascii(FILE *fd, vector< V > &vec, MPI_Comm comm, bool int_data)
Read binary data into a vector.
virtual void forward(abstract_vector< T, S > &out, scattering &sc, bool add=false)=0
scattering * register_permutation(const quadruple< int > spec, const vector< T > &nbr_a, const vector< T > &nbr_b, const size_t gsize_a, const size_t gsize_b, const short dpn)
Register a permutation scattering.
VecScatter vec_sc
The scatterer.
scattering * get_scattering(const quadruple< int > spec)
Access an previously registered scattering.
overlapping_layout< T > pl
nodal parallel layout
Definition: SF_container.h:417
void free_scatterings()
Free the registered scatterings.
virtual T lsize() const =0
virtual ~abstract_vector()=default
virtual void operator*=(const S sca)=0
Basic containers.
#define SF_MPITAG
the MPI tag when communicating
Definition: SF_globals.h:29
virtual void const_release_ptr(const S *&p) const =0
size_t read_ascii(FILE *fd)
Simple utility functions for parallel data.
~scattering()
Destructor.
ltype layout
used vector layout (nodal, algebraic, unset)
T * data()
Pointer to the vector&#39;s start.
Definition: SF_vector.h:91
size_t write_ascii(const char *file, bool write_header)
virtual S sum() const =0
vector< SF_int > idx_b
virtual const S * const_ptr() const =0
const T * end() const
Pointer to the vector&#39;s end.
Definition: SF_vector.h:128
size_t root_read(FILE *fd, vector< V > &vec, MPI_Comm comm)
Read binary data into a vector.
scattering * register_scattering(const quadruple< int > spec, const vector< T > &layout_a, const vector< T > &layout_b, const vector< T > &idx_a, const vector< T > &idx_b, const int rank, const int dpn)
Register a scattering.
virtual S * ptr()=0
virtual void operator+=(const abstract_vector< T, S > &vec)=0
virtual std::string to_string() const =0
virtual void init(const meshdata< mesh_int_t, mesh_real_t > &imesh, int idpn, ltype inp_layout)=0
virtual S mag() const =0
void forward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Forward scattering.
virtual S dot(const abstract_vector< T, S > &v) const =0
virtual bool equals(const abstract_vector< T, S > &rhs) const =0
std::tuple< int, int > init_common(const meshdata< mesh_int_t, mesh_real_t > &imesh, int idpn, ltype inp_layout)
size_t write_binary(FILE *fd)
Write a vector to HD in binary. File descriptor is already set up.
size_t read_binary(FILE *fd)
size_t read_ascii(std::string file)
virtual void apply_scattering(scattering &sc, bool fwd)=0
virtual bool is_init() const =0
size_t write_binary(std::string file)
write binary. Open file descriptor myself.
hm_int count(const K &key) const
Check if key exists.
Definition: hashmap.hpp:579
size_t size() const
The current size of the vector.
Definition: SF_vector.h:104
size_t num_global_idx() const
Retrieve the global number of indices.
virtual void shallow_copy(const abstract_vector< T, S > &v)=0
size_t l_numpts
local number of points
Definition: SF_container.h:389
virtual void operator=(const vector< S > &rhs)=0
A vector storing arbitrary data.
Definition: SF_vector.h:42
virtual void get_ownership_range(T &start, T &stop) const =0
size_t l_numelem
local number of elements
Definition: SF_container.h:387
size_t read_binary(std::string file)
const T * begin() const
Pointer to the vector&#39;s start.
Definition: SF_vector.h:116
Classes and algorithms related to the layout of distributed meshes.
const meshdata< mesh_int_t, mesh_real_t > * mesh
the connected mesh
virtual void release_device_ptr(S *&p)=0
virtual void backward(abstract_vector< T, S > &out, scattering &sc, bool add=false)=0
size_t num_algebraic_idx() const
Retrieve the number of local algebraic indices.
size_t g_numelem
global number of elements
Definition: SF_container.h:386
void treat_file_open_error(const char *file, const char *caller, const int errnum, const bool do_exit, int rank)
treat a file open error by displaying the errnum string interpretation and the caller ...
Definition: SF_io_base.h:83
size_t root_write(FILE *fd, const vector< V > &vec, MPI_Comm comm)
Write vector data binary to disk.
virtual void deep_copy(const abstract_vector< T, S > &v)=0
void resize(size_t n)
Resize a vector.
Definition: SF_vector.h:209
virtual void operator/=(const S sca)=0
Classes similar to unordered_set and unordered_map, but with better performance.
void backward(abstract_vector< T, S > &in, abstract_vector< T, S > &out, bool add=false)
Backward scattering.
The scatterer registry class.
virtual T gsize() const =0
Vec b_buff
A buffer vector which also defines the parallel layout of the "b" side.
Container for a PETSc VecScatter.