43 for(
int pid = 0; pid < size; pid++) {
45 printf(
"IO rank %d / %d: polling ..\n", rank+1, size);
47 MPI_Barrier(PETSC_COMM_WORLD);
51 bool do_continue =
true;
59 MPI_Bcast(&cmd_buff, 1, MPI_INT, 0, PETSC_COMM_WORLD);
84 for(
int pid = 0; pid < size; pid++) {
86 printf(
"IO rank %d / %d: exiting ..\n", rank+1, size);
88 MPI_Barrier(PETSC_COMM_WORLD);
112 long int loc_size = idx.
size();
121 const double dimt = tm.
end - tm.
start;
123 snprintf(header,
sizeof header,
"%d %d %lf %s %s", dpn, num_io, dimt, name, units);
132 MPI_Barrier(PETSC_COMM_WORLD);
145 MPI_Bcast(&
id, 1, MPI_INT, 0, PETSC_COMM_WORLD);
169 for(
size_t i=0; i<dest.size(); i++)
170 dest[i] = (idx[i] - gmin) / bsize;
177 for(
size_t i=0; i<perm_before_comm.
size(); i++)
178 snd_idx[i] = idx[perm_before_comm[i]];
186 MPI_Exchange(grph, snd_idx, recv_idx, PETSC_COMM_WORLD);
195 const size_t gsize,
const int dpn,
196 const char* name,
const char*
units)
222 igb.
inc_t(param_globals::spacedt);
236 log_msg(0,5,0,
"%s error: Could not set up data output! Aborting!", __func__);
259 MPI_Bcast(header, 2048, MPI_CHAR, 0, PETSC_COMM_WORLD);
260 MPI_Bcast(data_layout.
data(), data_layout.
size(), MPI_LONG, 0, PETSC_COMM_WORLD);
262 int dpn = 0, numIOs = 0;
265 char name_str[2048], units_str[2048];
266 sscanf(header,
"%d %d %lf %s %s", &dpn, &numIOs, &dimt, name_str, units_str);
268 size_t gsize = data_layout[data_layout.
size()-1];
276 num_recv += data_layout[s+1] - data_layout[s];
283 for(
size_t i=0, dsp=0; i<senders.size(); i++) {
284 int send_rank = senders[i];
285 size_t send_size = data_layout[send_rank+1] - data_layout[send_rank];
286 MPI_Irecv(idx_buff.
data() + dsp, send_size*
sizeof(
mesh_int_t), MPI_BYTE, send_rank,
291 MPI_Waitall(senders.size(), req.data(), stat.data());
297 int id = io_queue.
add(igb, data_layout, cg, pbefore, pafter);
310 int rank_cnt = remote_dist[remote_idx];
312 while(remote_idx <
int(remote_dist.
size()) && il.
loc_rank >= rank_cnt) {
314 rank_cnt += remote_dist[remote_idx];
330 for(
int i=start; i<stop; i++)
340 for(
size_t i=0; i<perm_b.
size(); i++)
341 snd_data[i] = data[perm_b[i]];
347 for(
size_t i=0; i<perm_a.
size(); i++)
348 data[i] = recv_data[perm_a[i]];
361 MPI_Bcast(&
id, 1, MPI_INT, 0, PETSC_COMM_WORLD);
363 assert(
id > -1 &&
id <
int(io_queue.
IGBs.size()));
376 num_recv += layout[s+1] - layout[s];
382 for(
size_t i=0, dsp=0; i<senders.size(); i++) {
383 int send_rank = senders[i];
384 size_t send_size = layout[send_rank+1] - layout[send_rank];
386 MPI_Irecv(buff.
data() + dsp, send_size, MPI_FLOAT, send_rank,
391 MPI_Waitall(senders.size(), req.data(), stat.data());
411 data.
assign(dat, dat+lsize);
minimal information needed for communication between MPI_Comms
T global_min(const vector< T > &vec, MPI_Comm comm)
Compute the global minimum of a distributed vector.
int COMPUTE_get_receive_rank(const intercomm_layout &il)
get the IO node rank that will receive our data chunk
void IO_register_output(async_IO_queue &io_queue)
void layout_from_count(const T count, vector< T > &layout, MPI_Comm comm)
void MPI_Exchange(commgraph< T > &grph, vector< S > &send, vector< S > &recv, MPI_Comm comm)
Exchange data in parallel over MPI.
void IO_sort_data(SF::vector< float > &data, const SF::vector< mesh_int_t > &perm_b, const SF::vector< mesh_int_t > &perm_a, SF::commgraph< size_t > &cg)
void interval(vector< T > &vec, size_t start, size_t end)
Create an integer interval between start and end.
void IO_do_output(async_IO_queue &io_queue)
timer_manager * tm_manager
a manager for the various physics timers
void divide(const size_t gsize, const size_t num_parts, vector< T > &loc_sizes)
divide gsize into num_parts local parts with even distribution of the remainder
void COMPUTE_send_exit_flag()
this function sends the exit flag from a compute node to an io node.
void IO_poll_for_output(async_IO_queue &io_queue)
T * data()
Pointer to the vector's start.
SF::vector< SF::vector< mesh_int_t > > perm_a
permutation after MPI_Exchange
int loc_rank
the local rank
#define ASYNC_CMD_REGISTER_OUTPUT
SF::vector< SF::vector< mesh_int_t > > perm_b
permutation before MPI_Exchange
void COMPUTE_do_output(SF_real *dat, const int lsize, const int IO_id)
SF::vector< SF::vector< long int > > layouts
data layouts
int loc_size
the local communicator size
int COMPUTE_register_output(const SF::vector< mesh_int_t > &idx, const int dpn, const char *name, const char *units)
Top-level header of FEM module.
int rem_size
the remote communicator size
int add(IGBheader *igb, const SF::vector< long int > <, const SF::commgraph< size_t > &c, const SF::vector< mesh_int_t > &pb, const SF::vector< mesh_int_t > &pa)
add one slice of IO info to the queue
SF::vector< SF::commgraph< size_t > > cg
commgraphs for MPI_Exchange
void IO_get_sender_ranks(const intercomm_layout &il, SF::vector< int > &sender)
get the compute node ranks that will send their data chunk to us
void setup(MPI_Comm ic)
setup routine for the members
void configure(const vector< V > &dest, MPI_Comm comm)
Set up the communication graph.
T sum(const vector< T > &vec)
Compute sum of a vector's entries.
T get_global(T in, MPI_Op OP, MPI_Comm comm=PETSC_COMM_WORLD)
Do a global reduction on a variable.
MPI_Comm IO_Intercomm
Communicator between IO and compute worlds.
IGBheader * IO_open_igb(const int numIOs, const double dimt, const size_t gsize, const int dpn, const char *name, const char *units)
void binary_sort_copy(vector< T > &_V, vector< S > &_W)
T global_max(const vector< T > &vec, MPI_Comm comm)
Compute the global maximum of a distributed vector.
size_t size() const
The current size of the vector.
Top-level header of physics module.
void log_msg(FILE_SPEC out, int level, unsigned char flag, const char *fmt,...)
std::vector< base_timer * > timers
vector containing individual timers
void assign(InputIterator s, InputIterator e)
Assign a memory range.
std::map< int, std::string > units
double SF_real
Use the general double as real type.
Basic utility structs and functions, mostly IO related.
vector< T > rcnt
Number of elements received from each rank.
size_t root_write(FILE *fd, const vector< V > &vec, MPI_Comm comm)
Write vector data binary to disk.
void resize(size_t n)
Resize a vector.
int get_size(MPI_Comm comm=PETSC_COMM_WORLD)
Simulator-level utility execution control functions.
double start
initial time (nonzero when restarting)
void dsp_from_cnt(const vector< T > &cnt, vector< T > &dsp)
Compute displacements from counts.
centralize time managment and output triggering
int get_rank(MPI_Comm comm=PETSC_COMM_WORLD)
void IO_prepare_sort(const SF::vector< mesh_int_t > &inp_idx, SF::commgraph< size_t > &grph, SF::vector< mesh_int_t > &perm_before_comm, SF::vector< mesh_int_t > &perm_after_comm)
SF::vector< IGBheader * > IGBs
IGBs with open filehandles on rank 0.
queue with the data required for performing async IO writes to IGB
FILE_SPEC f_open(const char *fname, const char *mode)
Open a FILE_SPEC.