Commit ac74d8fd authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

wrap tag IDs to prevent going into bad territory, clean out runner_dosend.


Former-commit-id: 67253fd5d24b593bdd7822863d0b25781cb1e744
parent 324bd359
......@@ -181,7 +181,7 @@ int cell_pack ( struct cell *c , struct pcell *pc ) {
pc->dt_min = c->dt_min;
pc->dt_max = c->dt_max;
pc->count = c->count;
c->tag = pc->tag = cell_next_tag++;
c->tag = pc->tag = atomic_inc(cell_next_tag) % cell_max_tag;
/* Fill in the progeny, depth-first recursion. */
for ( k = 0 ; k < 8 ; k++ )
......
......@@ -19,6 +19,7 @@
/* Some constants. */
#define cell_sid_dt 13
#define cell_max_tag (1 << 16)
/* Global variables. */
......
......@@ -101,43 +101,6 @@ const char runner_flip[27] = { 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1
#include "runner_doiact_grav.h"
/**
* @brief Send a local cell's particle data to another node.
*
* @param r The #runner.
* @param c The #cell.
* @param nodeID The destination node's ID.
* @param tag bit to distinguish between xv and rho sends.
*/
void runner_dosend ( struct runner *r , struct cell *c , int nodeID , int tag ) {
#ifdef WITH_MPI
MPI_Request req;
/* First check if all the density tasks have been run. */
if ( tag & 1 )
if ( c->parts[0].rho == 0.0 )
error( "Attempting to send rhos before ghost task completed." );
/* Emit the isend. */
if ( MPI_Isend( c->parts , sizeof(struct part) * c->count , MPI_BYTE , nodeID , tag , MPI_COMM_WORLD , &req ) != MPI_SUCCESS )
error( "Failed to isend particle data." );
message( "sending %i parts with tag=%i from %i to %i." ,
c->count , tag , r->e->nodeID , nodeID ); fflush(stdout);
/* Free the request handler as we don't care what happens next. */
MPI_Request_free( &req );
#else
error( "SWIFT was not compiled with MPI support." );
#endif
}
/**
* @brief Sort the entries in ascending order using QuickSort.
*
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment