Skip to content
Snippets Groups Projects
Commit 1687d3bb authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

only communicate strays if there are any.

Former-commit-id: 2b8a6bc19c45b2dc4ca2c8275420bf203370b086
parent 0d1a109b
No related branches found
No related tags found
No related merge requests found
......@@ -609,7 +609,7 @@ int engine_exchange_strays ( struct engine *e , struct part *parts , struct xpar
#ifdef WITH_MPI
int k, pid, count = 0;
int k, pid, count = 0, nr_in = 0, nr_out = 0;
MPI_Request reqs_in[ engine_maxproxies ];
MPI_Request reqs_out[ engine_maxproxies ];
MPI_Status status;
......@@ -644,13 +644,23 @@ int engine_exchange_strays ( struct engine *e , struct part *parts , struct xpar
/* Set the requests for the particle data. */
for ( k = 0 ; k < e->nr_proxies ; k++ ) {
reqs_in[k] = e->proxies[k].req_xparts_in;
reqs_out[k] = e->proxies[k].req_xparts_out;
if ( e->proxies[k].nr_parts_in > 0 ) {
reqs_in[k] = e->proxies[k].req_xparts_in;
nr_in += 1;
}
else
reqs_in[k] = MPI_REQUEST_NULL;
if ( e->proxies[k].nr_parts_out > 0 ) {
reqs_out[k] = e->proxies[k].req_xparts_out;
nr_out += 1;
}
else
reqs_out[k] = MPI_REQUEST_NULL;
}
/* Wait for each part array to come in and collect the new
parts from the proxies. */
for ( k = 0 ; k < e->nr_proxies ; k++ ) {
for ( k = 0 ; k < nr_in ; k++ ) {
if ( MPI_Waitany( e->nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
pid == MPI_UNDEFINED )
error( "MPI_Waitany failed." );
......@@ -666,8 +676,9 @@ int engine_exchange_strays ( struct engine *e , struct part *parts , struct xpar
}
/* Wait for all the sends to have finnished too. */
if ( MPI_Waitall( e->nr_proxies , reqs_out , &status ) != MPI_SUCCESS )
error( "MPI_Waitall on sends failed." );
if ( nr_out > 0 )
if ( MPI_Waitall( e->nr_proxies , reqs_out , &status ) != MPI_SUCCESS )
error( "MPI_Waitall on sends failed." );
/* Return the number of harvested parts. */
return count;
......
......@@ -211,15 +211,17 @@ void proxy_parts_exch1 ( struct proxy *p ) {
// message( "isent particle count (%i) from node %i to node %i." , p->nr_parts_out , p->mynodeID , p->nodeID ); fflush(stdout);
/* Send the particle buffers. */
if ( MPI_Isend( p->parts_out , sizeof(struct part)*p->nr_parts_out , MPI_BYTE , p->nodeID , p->mynodeID*proxy_tag_shift + proxy_tag_parts , MPI_COMM_WORLD , &p->req_parts_out ) != MPI_SUCCESS ||
MPI_Isend( p->xparts_out , sizeof(struct xpart)*p->nr_parts_out , MPI_BYTE , p->nodeID , p->mynodeID*proxy_tag_shift + proxy_tag_xparts , MPI_COMM_WORLD , &p->req_xparts_out ) != MPI_SUCCESS )
error( "Failed to isend part data." );
MPI_Request_free( &p->req_parts_out );
// message( "isent particle data (%i) to node %i." , p->nr_parts_out , p->nodeID ); fflush(stdout);
/* for ( int k = 0 ; k < p->nr_parts_out ; k++ )
message( "sending particle %lli, x=[%.3e %.3e %.3e], h=%.3e, to node %i." ,
p->parts_out[k].id , p->parts_out[k].x[0] , p->parts_out[k].x[1] , p->parts_out[k].x[2] ,
p->parts_out[k].h , p->nodeID ); */
if ( p->nr_parts_out > 0 ) {
if ( MPI_Isend( p->parts_out , sizeof(struct part)*p->nr_parts_out , MPI_BYTE , p->nodeID , p->mynodeID*proxy_tag_shift + proxy_tag_parts , MPI_COMM_WORLD , &p->req_parts_out ) != MPI_SUCCESS ||
MPI_Isend( p->xparts_out , sizeof(struct xpart)*p->nr_parts_out , MPI_BYTE , p->nodeID , p->mynodeID*proxy_tag_shift + proxy_tag_xparts , MPI_COMM_WORLD , &p->req_xparts_out ) != MPI_SUCCESS )
error( "Failed to isend part data." );
MPI_Request_free( &p->req_parts_out );
// message( "isent particle data (%i) to node %i." , p->nr_parts_out , p->nodeID ); fflush(stdout);
/* for ( int k = 0 ; k < p->nr_parts_out ; k++ )
message( "sending particle %lli, x=[%.3e %.3e %.3e], h=%.3e, to node %i." ,
p->parts_out[k].id , p->parts_out[k].x[0] , p->parts_out[k].x[1] , p->parts_out[k].x[2] ,
p->parts_out[k].h , p->nodeID ); */
}
/* Receive the number of particles. */
if ( MPI_Irecv( &p->nr_parts_in , 1 , MPI_INT , p->nodeID , p->nodeID*proxy_tag_shift + proxy_tag_count , MPI_COMM_WORLD , &p->req_parts_count_in ) != MPI_SUCCESS )
......@@ -249,10 +251,12 @@ void proxy_parts_exch2 ( struct proxy *p ) {
}
/* Receive the particle buffers. */
if ( MPI_Irecv( p->parts_in , sizeof(struct part)*p->nr_parts_in , MPI_BYTE , p->nodeID , p->nodeID*proxy_tag_shift + proxy_tag_parts , MPI_COMM_WORLD , &p->req_parts_in ) != MPI_SUCCESS ||
MPI_Irecv( p->xparts_in , sizeof(struct xpart)*p->nr_parts_in , MPI_BYTE , p->nodeID , p->nodeID*proxy_tag_shift + proxy_tag_xparts , MPI_COMM_WORLD , &p->req_xparts_in ) != MPI_SUCCESS )
error( "Failed to irecv part data." );
// message( "irecv particle data (%i) from node %i." , p->nr_parts_in , p->nodeID ); fflush(stdout);
if ( p->nr_parts_in > 0 ) {
if ( MPI_Irecv( p->parts_in , sizeof(struct part)*p->nr_parts_in , MPI_BYTE , p->nodeID , p->nodeID*proxy_tag_shift + proxy_tag_parts , MPI_COMM_WORLD , &p->req_parts_in ) != MPI_SUCCESS ||
MPI_Irecv( p->xparts_in , sizeof(struct xpart)*p->nr_parts_in , MPI_BYTE , p->nodeID , p->nodeID*proxy_tag_shift + proxy_tag_xparts , MPI_COMM_WORLD , &p->req_xparts_in ) != MPI_SUCCESS )
error( "Failed to irecv part data." );
// message( "irecv particle data (%i) from node %i." , p->nr_parts_in , p->nodeID ); fflush(stdout);
}
#else
error( "SWIFT was not compiled with MPI support." );
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment