engine.c 79.8 KB
Newer Older
Pedro Gonnet's avatar
Pedro Gonnet committed
1
/*******************************************************************************
2
 * This file is part of SWIFT.
Pedro Gonnet's avatar
Pedro Gonnet committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
 * Coypright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 * 
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdio.h>
#include <stdlib.h>
26
#include <unistd.h>
Pedro Gonnet's avatar
Pedro Gonnet committed
27
28
29
30
31
32
33
34
#include <string.h>
#include <pthread.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <omp.h>
#include <sched.h>

35
36
37
38
39
/* MPI headers. */
#ifdef WITH_MPI
    #include <mpi.h>
#endif

40
41
42
43
44
/* METIS headers. */
#ifdef HAVE_METIS
    #include <metis.h>
#endif

Pedro Gonnet's avatar
Pedro Gonnet committed
45
/* Local headers. */
46
#include "const.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
47
#include "cycle.h"
48
#include "atomic.h"
49
#include "timers.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
50
#include "const.h"
51
#include "vector.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
52
53
54
#include "lock.h"
#include "task.h"
#include "part.h"
55
#include "debug.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
56
#include "space.h"
57
#include "multipole.h"
58
#include "cell.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
59
#include "queue.h"
60
#include "scheduler.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
61
62
#include "engine.h"
#include "runner.h"
63
#include "proxy.h"
64
#include "error.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
65

66
67
68
69
70
71
72
#ifdef LEGACY_GADGET2_SPH
#include "runner_iact_legacy.h"
#else
#include "runner_iact.h"
#endif


Pedro Gonnet's avatar
Pedro Gonnet committed
73
74
75
76
/* Convert cell location to ID. */
#define cell_getid( cdim , i , j , k ) ( (int)(k) + (cdim)[2]*( (int)(j) + (cdim)[1]*(int)(i) ) )


77
78
79
80
/** The rank of the engine as a global variable (for messages). */
int engine_rank;


81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/**
 * @brief Link a density/force task to a cell.
 *
 * @param e The #engine.
 * @param l The #link.
 * @param t The #task.
 *
 * @return The new #link pointer.
 */
 
struct link *engine_addlink( struct engine *e , struct link *l , struct task *t ) {

    struct link *res = &e->links[ atomic_inc( &e->nr_links ) ];
    res->next = l;
    res->t = t;
    return res;

    }


/**
 * @brief Generate the ghost and kick tasks for a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The #cell.
 * @param super The super #cell.
 */
 
void engine_mkghosts ( struct engine *e , struct cell *c , struct cell *super ) {

    int k;
    struct scheduler *s = &e->sched;

    /* Am I the super-cell? */
    if ( super == NULL && c->nr_tasks > 0 ) {
    
        /* Remember me. */
        super = c;
        
        /* Local tasks only... */
        if ( c->nodeID == e->nodeID ) {
        
            /* Generate the ghost task. */
            c->ghost = scheduler_addtask( s , task_type_ghost , task_subtype_none , 0 , 0 , c , NULL , 0 );

            /* Add the kick2 task. */
            c->kick2 = scheduler_addtask( s , task_type_kick2 , task_subtype_none , 0 , 0 , c , NULL , 0 );

            /* Add the kick1 task if needed. */
            if ( !(e->policy & engine_policy_fixdt) )
                c->kick1 = scheduler_addtask( s , task_type_kick1 , task_subtype_none , 0 , 0 , c , NULL , 0 );
                
            }
            
        }
        
    /* Set the super-cell. */
    c->super = super;
        
    /* Recurse. */
    if ( c->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_mkghosts( e , c->progeny[k] , super );
    
    }


149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/**
 * @brief Redistribute the particles amongst the nodes accorind
 *      to their cell's node IDs.
 *
 * @param e The #engine.
 */
 
void engine_redistribute ( struct engine *e ) {

#ifdef WITH_MPI

    int i, j, k, cid;
    int nr_nodes = e->nr_nodes, nodeID = e->nodeID;
    struct space *s = e->s;
    int my_cells = 0;
    int *cdim = s->cdim;
    struct cell *cells = s->cells;
166
    int nr_cells = s->nr_cells;
167
168
169
170
171

    /* Start by sorting the particles according to their nodes and
       getting the counts. */
    int *counts, *dest;
    struct part *parts = s->parts;
172
    double ih[3], dim[3];
173
    ih[0] = s->ih[0]; ih[1] = s->ih[1]; ih[2] = s->ih[2];
174
    dim[0] = s->dim[0]; dim[1] = s->dim[1]; dim[2] = s->dim[2];
175
176
177
178
179
    if ( ( counts = (int *)malloc( sizeof(int) * nr_nodes * nr_nodes ) ) == NULL ||
         ( dest = (int *)malloc( sizeof(int) * s->nr_parts ) ) == NULL )
        error( "Failed to allocate count and dest buffers." );
    bzero( counts , sizeof(int) * nr_nodes * nr_nodes );
    for ( k = 0 ; k < s->nr_parts ; k++ ) {
180
181
182
183
        for ( j = 0 ; j < 3 ; j++ ) {
            if ( parts[k].x[j] < 0.0 ) parts[k].x[j] += dim[j];
            else if ( parts[k].x[j] >= dim[j] ) parts[k].x[j] -= dim[j];
            }
184
185
186
187
188
189
190
191
192
        cid = cell_getid( cdim , parts[k].x[0]*ih[0] , parts[k].x[1]*ih[1] , parts[k].x[2]*ih[2] );
        dest[k] = cells[ cid ].nodeID;
        counts[ nodeID*nr_nodes + dest[k] ] += 1;
        }
    parts_sort( s->parts , s->xparts , dest , s->nr_parts , 0 , nr_nodes-1 );
    
    /* Get all the counts from all the nodes. */
    if ( MPI_Allreduce( MPI_IN_PLACE , counts , nr_nodes * nr_nodes , MPI_INT , MPI_SUM , MPI_COMM_WORLD ) != MPI_SUCCESS )
        error( "Failed to allreduce particle transfer counts." );
193

194
195
196
197
198
199
    /* Get the new number of parts for this node, be generous in allocating. */
    int nr_parts = 0;
    for ( k = 0 ; k < nr_nodes ; k++ )
        nr_parts += counts[ k*nr_nodes + nodeID ];
    struct part *parts_new;
    struct xpart *xparts_new, *xparts = s->xparts;
200
201
    if ( posix_memalign( (void **)&parts_new , part_align , sizeof(struct part) * nr_parts * 1.2 ) != 0 ||
         posix_memalign( (void **)&xparts_new , part_align , sizeof(struct xpart) * nr_parts * 1.2 ) != 0 )
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
        error( "Failed to allocate new part data." );
        
    /* Emit the sends and recvs for the particle data. */
    MPI_Request *reqs;
    if ( ( reqs = (MPI_Request *)malloc( sizeof(MPI_Request) * 4 * nr_nodes ) ) == NULL )
        error( "Failed to allocate MPI request list." );
    for ( k = 0 ; k < 4*nr_nodes ; k++ )
        reqs[k] = MPI_REQUEST_NULL;
    for ( i = 0 , j = 0 , k = 0 ; k < nr_nodes ; k++ ) {
        if ( k == nodeID && counts[ nodeID*nr_nodes + k ] > 0 ) {
            memcpy( &parts_new[j] , &parts[i] , sizeof(struct part) * counts[ k*nr_nodes + nodeID ] );
            memcpy( &xparts_new[j] , &xparts[i] , sizeof(struct xpart) * counts[ k*nr_nodes + nodeID ] );
            i += counts[ nodeID*nr_nodes + k ];
            j += counts[ k*nr_nodes + nodeID ];
            }
        if ( k != nodeID && counts[ nodeID*nr_nodes + k ] > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
218
            if ( MPI_Isend( &parts[i] , sizeof(struct part) * counts[ nodeID*nr_nodes + k ] , MPI_BYTE , k , 2*(nodeID*nr_nodes + k) + 0 , MPI_COMM_WORLD , &reqs[4*k] ) != MPI_SUCCESS )
219
                error( "Failed to isend parts to node %i." , k );
Pedro Gonnet's avatar
Pedro Gonnet committed
220
            if ( MPI_Isend( &xparts[i] , sizeof(struct xpart) * counts[ nodeID*nr_nodes + k ] , MPI_BYTE , k , 2*(nodeID*nr_nodes + k) + 1 , MPI_COMM_WORLD , &reqs[4*k+1] ) != MPI_SUCCESS )
221
222
223
224
                error( "Failed to isend xparts to node %i." , k );
            i += counts[ nodeID*nr_nodes + k ];
            }
        if ( k != nodeID && counts[ k*nr_nodes + nodeID ] > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
225
            if ( MPI_Irecv( &parts_new[j] , sizeof(struct part) * counts[ k*nr_nodes + nodeID ] , MPI_BYTE , k , 2*(k*nr_nodes + nodeID) + 0 , MPI_COMM_WORLD , &reqs[4*k+2] ) != MPI_SUCCESS )
226
                error( "Failed to emit irecv of parts from node %i." , k );
Pedro Gonnet's avatar
Pedro Gonnet committed
227
            if ( MPI_Irecv( &xparts_new[j] , sizeof(struct xpart) * counts[ k*nr_nodes + nodeID ] , MPI_BYTE , k , 2*(k*nr_nodes + nodeID) + 1 , MPI_COMM_WORLD , &reqs[4*k+3] ) != MPI_SUCCESS )
228
229
230
231
232
                error( "Failed to emit irecv of parts from node %i." , k );
            j += counts[ k*nr_nodes + nodeID ];
            }
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
233
    /* Wait for all the sends and recvs to tumble in. */
234
235
236
237
    MPI_Status stats[4*nr_nodes];
    int res;
    if ( ( res = MPI_Waitall( 4*nr_nodes , reqs , stats ) ) != MPI_SUCCESS ) {
        for ( k = 0 ; k < 4*nr_nodes ; k++ ) {
238
239
240
241
242
243
        char buff[ MPI_MAX_ERROR_STRING ];
        int res;
        MPI_Error_string( stats[k].MPI_ERROR , buff , &res );
        message( "request %i has error '%s'." , k , buff );
        }
    message( "counts is [ %i %i %i %i ]." , counts[0] , counts[1] , counts[2] , counts[3] );
244
        error( "Failed during waitall for part data." );
245
246
        }

247
248
249
250
251
252
253
254
255
256
257
258
259
    /* Verify that all parts are in the right place. */
    /* for ( k = 0 ; k < nr_parts ; k++ ) {
        cid = cell_getid( cdim , parts_new[k].x[0]*ih[0] , parts_new[k].x[1]*ih[1] , parts_new[k].x[2]*ih[2] );
        if ( cells[ cid ].nodeID != nodeID )
            error( "Received particle (%i) that does not belong here (nodeID=%i)." , k , cells[ cid ].nodeID );
        } */
        
    /* Set the new part data, free the old. */
    free( parts );
    free( xparts );
    s->parts = parts_new;
    s->xparts = xparts_new;
    s->nr_parts = nr_parts;
260
    s->size_parts = 1.2*nr_parts;
261
262
    
    /* Be verbose about what just happened. */
263
264
265
    for ( k = 0 ; k < nr_cells ; k++ )
        if ( cells[k].nodeID == nodeID )
            my_cells += 1;
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    message( "node %i now has %i parts in %i cells." , nodeID , nr_parts , my_cells );
    
    /* Clean up other stuff. */
    free( reqs );
    free( counts );
    free( dest );
        
#else
    error( "SWIFT was not compiled with MPI and METIS support." );
#endif

    }


280
/**
281
 * @brief Repartition the cells amongst the nodes.
282
283
284
285
286
287
288
289
 *
 * @param e The #engine.
 */
 
void engine_repartition ( struct engine *e ) {

#if defined(WITH_MPI) && defined(HAVE_METIS)

290
    int i, j, k, l, cid, cjd, ii, jj, kk, res, w;
291
    idx_t *inds, *nodeIDs;
292
293
    idx_t *weights_v, *weights_e;
    struct space *s = e->s;
294
    int nr_cells = s->nr_cells, my_cells = 0;
295
296
297
298
    struct cell *cells = s->cells;
    int ind[3], *cdim = s->cdim;
    struct task *t, *tasks = e->sched.tasks;
    struct cell *ci, *cj;
299
    int nr_nodes = e->nr_nodes, nodeID = e->nodeID;
300
301
302
    float wscale = 1.0, vscale = 1e-3, wscale_buff;
    idx_t wtot = 0;
    const idx_t wmax = 1e9 / e->nr_nodes;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
    
    /* Clear the repartition flag. */
    e->forcerepart = 0;
    
    /* Allocate the inds and weights. */
    if ( ( inds = (idx_t *)malloc( sizeof(idx_t) * 26*nr_cells ) ) == NULL ||
         ( weights_v = (idx_t *)malloc( sizeof(idx_t) * nr_cells ) ) == NULL ||
         ( weights_e = (idx_t *)malloc( sizeof(idx_t) * 26*nr_cells ) ) == NULL ||
         ( nodeIDs = (idx_t *)malloc( sizeof(idx_t) * nr_cells ) ) == NULL )
        error( "Failed to allocate inds and weights arrays." );
        
    /* Fill the inds array. */
    for ( cid = 0 ; cid < nr_cells ; cid++ ) {
        ind[0] = cells[cid].loc[0] / s->cells[cid].h[0] + 0.5;
        ind[1] = cells[cid].loc[1] / s->cells[cid].h[1] + 0.5;
        ind[2] = cells[cid].loc[2] / s->cells[cid].h[2] + 0.5;
        l = 0;
        for ( i = -1 ; i <= 1 ; i++ ) {
            ii = ind[0] + i;
            if ( ii < 0 ) ii += cdim[0];
            else if ( ii >= cdim[0] ) ii -= cdim[0];
            for ( j = -1 ; j <= 1 ; j++ ) {
                jj = ind[1] + j;
                if ( jj < 0 ) jj += cdim[1];
                else if ( jj >= cdim[1] ) jj -= cdim[1];
                for ( k = -1 ; k <= 1 ; k++ ) {
                    kk = ind[2] + k;
                    if ( kk < 0 ) kk += cdim[2];
                    else if ( kk >= cdim[2] ) kk -= cdim[2];
                    if ( i || j || k ) {
                        inds[ cid*26 + l ] = cell_getid( cdim , ii , jj , kk );
                        l += 1;
                        }
                    }
                }
            }
        }
        
    /* Init the weights arrays. */
342
343
    bzero( weights_e , sizeof(idx_t) * 26*nr_cells );
    bzero( weights_v , sizeof(idx_t) * nr_cells );
344
345
346
347
348
349
350
351
352
353
    
    /* Loop over the tasks... */
    for ( j = 0 ; j < e->sched.nr_tasks ; j++ ) {
    
        /* Get a pointer to the kth task. */
        t = &tasks[j];
        
        /* Skip un-interesting tasks. */
        if ( t->type != task_type_self &&
             t->type != task_type_pair &&
354
355
356
357
             t->type != task_type_sub &&
             t->type != task_type_ghost &&
             t->type != task_type_kick1 &&
             t->type != task_type_kick2 )
358
            continue;
359
360
361
362
363
            
        /* Get the task weight. */
        w = ( t->toc - t->tic ) * wscale;
        if ( w < 0 )
            error( "Bad task weight (%i)." , w );
364
365
366
367
368
369
370
371
372
            
        /* Do we need to re-scale? */
        wtot += w;
        if (wtot > wmax) {
          wscale /= 2;
          wtot /= 2;
          for (k = 0; k < 26 * nr_cells; k++) weights_e[k] *= 0.5;
          for (k = 0; k < nr_cells; k++) weights_v[k] *= 0.5;
        }
373
374
375
376
377
378
379
380
381
382
383
384
        
        /* Get the top-level cells involved. */
        for ( ci = t->ci ; ci->parent != NULL ; ci = ci->parent );
        if ( t->cj != NULL )
            for ( cj = t->cj ; cj->parent != NULL ; cj = cj->parent );
        else
            cj = NULL;
            
        /* Get the cell IDs. */
        cid = ci - cells;
            
        /* Different weights for different tasks. */
385
386
387
388
389
390
391
392
393
394
395
        if ( t->type == task_type_ghost ||
             t->type == task_type_kick1 || 
             t->type == task_type_kick2 ) {
             
            /* Particle updates add only to vertex weight. */
            weights_v[cid] += w;
            
            }
        
        /* Self interaction? */     
        else if ( ( t->type == task_type_self && ci->nodeID == nodeID ) ||
396
397
398
             ( t->type == task_type_sub && cj == NULL && ci->nodeID == nodeID ) ) {
        
            /* Self interactions add only to vertex weight. */
399
            weights_v[cid] += w;
400
401
402
403
404
405
406
407
408
409
410
            
            }
            
        /* Pair? */
        else if ( t->type == task_type_pair ||
                  ( t->type == task_type_sub && cj != NULL ) ) {
                  
            /* In-cell pair? */
            if ( ci == cj ) {
            
                /* Add weight to vertex for ci. */
411
                weights_v[cid] += w;
412
413
414
415
416
417
418
419
420
421
422
            
                }
                
            /* Distinct cells with local ci? */
            else if ( ci->nodeID == nodeID ) {
            
                /* Index of the jth cell. */
                cjd = cj - cells;
                
                /* Add half of weight to each cell. */
                if ( ci->nodeID == nodeID )
423
                    weights_v[cid] += 0.5 * w;
424
                if ( cj->nodeID == nodeID )
425
                    weights_v[cjd] += 0.5 * w;
426
427
428
                    
                /* Add Weight to edge. */
                for ( k = 26*cid ; inds[k] != cjd ; k++ );
429
                weights_e[ k ] += w;
430
                for ( k = 26*cjd ; inds[k] != cid ; k++ );
431
                weights_e[ k ] += w;
432
433
434
435
436
437
438
            
                }
                  
            }
    
        }
        
439
440
441
442
443
444
445
    /* Get the minimum scaling and re-scale if necessary. */
    if ( ( res = MPI_Allreduce( &wscale , &wscale_buff , 1 , MPI_FLOAT , MPI_MIN , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
        char buff[ MPI_MAX_ERROR_STRING ];
        MPI_Error_string( res , buff , &i );
        error( "Failed to allreduce the weight scales (%s)." , buff );
    }
    if (wscale_buff != wscale) {
446
      float scale = wscale_buff / wscale;
447
448
449
450
      for (k = 0; k < 26 * nr_cells; k++) weights_e[k] *= scale;
      for (k = 0; k < nr_cells; k++) weights_v[k] *= scale;
    }
        
451
    /* Merge the weights arrays accross all nodes. */
452
#if IDXTYPEWIDTH==32
453
    if ( ( res = MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_v , weights_v , nr_cells , MPI_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
454
455
456
#else
    if ( ( res = MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_v , weights_v , nr_cells , MPI_LONG_LONG_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
#endif
457
458
459
460
        char buff[ MPI_MAX_ERROR_STRING ];
        MPI_Error_string( res , buff , &i );
        error( "Failed to allreduce vertex weights (%s)." , buff );
        }
461
#if IDXTYPEWIDTH==32
462
    if ( MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_e , weights_e , 26*nr_cells , MPI_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
463
464
465
466
#else
    if ( MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_e , weights_e , 26*nr_cells , MPI_LONG_LONG_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
#endif
       error( "Failed to allreduce edge weights." );
467
468
469
470
        
    /* As of here, only one node needs to compute the partition. */
    if ( nodeID == 0 ) {
    
471
        /* Check that the edge weights are fully symmetric. */
472
        /* for ( cid = 0 ; cid < nr_cells ; cid++ )
473
474
475
476
477
            for ( k = 0 ; k < 26 ; k++ ) {
                cjd = inds[ cid*26 + k ];
                for ( j = 26*cjd ; inds[j] != cid ; j++ );
                if ( weights_e[ cid*26+k ] != weights_e[ j ] )
                    error( "Unsymmetric edge weights detected (%i vs %i)." , weights_e[ cid*26+k ] , weights_e[ j ] );
478
                } */
479
480
481
        /* int w_min = weights_e[0], w_max = weights_e[0], w_tot = weights_e[0];
        for ( k = 1 ; k < 26*nr_cells ; k++ ) {
            w_tot += weights_e[k];
482
483
484
485
            if ( weights_e[k] < w_min )
                w_min = weights_e[k];
            else if ( weights_e[k] > w_max )
                w_max = weights_e[k];
486
487
488
489
490
            }
        message( "edge weights in [ %i , %i ], tot=%i." , w_min , w_max , w_tot );
        w_min = weights_e[0], w_max = weights_e[0]; w_tot = weights_v[0];
        for ( k = 1 ; k < nr_cells ; k++ ) {
            w_tot += weights_v[k];
491
492
493
494
            if ( weights_v[k] < w_min )
                w_min = weights_v[k];
            else if ( weights_v[k] > w_max )
                w_max = weights_v[k];
495
496
            }
        message( "vertex weights in [ %i , %i ], tot=%i." , w_min , w_max , w_tot ); */
497
498
499
500
                
        /* Make sure there are no zero weights. */
        for ( k = 0 ; k < 26*nr_cells ; k++ )
            if ( weights_e[k] == 0 )
Pedro Gonnet's avatar
typo.    
Pedro Gonnet committed
501
                weights_e[k] = 1;
502
        for ( k = 0 ; k < nr_cells ; k++ )
503
            if ( ( weights_v[k] *= vscale ) == 0 )
504
                weights_v[k] = 1;
505
    
506
507
508
509
510
511
512
513
514
515
516
517
518
519
        /* Allocate and fill the connection array. */
        idx_t *offsets;
        if ( ( offsets = (idx_t *)malloc( sizeof(idx_t) * (nr_cells + 1) ) ) == NULL )
            error( "Failed to allocate offsets buffer." );
        offsets[0] = 0;
        for ( k = 0 ; k < nr_cells ; k++ )
            offsets[k+1] = offsets[k] + 26;
            
        /* Set the METIS options. */
        idx_t options[METIS_NOPTIONS];
        METIS_SetDefaultOptions( options );
        options[ METIS_OPTION_OBJTYPE ] = METIS_OBJTYPE_CUT;
        options[ METIS_OPTION_NUMBERING ] = 0;
        options[ METIS_OPTION_CONTIG ] = 1;
520
521
522
        options[ METIS_OPTION_NCUTS ] = 10;
        options[ METIS_OPTION_NITER ] = 20;
        // options[ METIS_OPTION_UFACTOR ] = 1;
523
        
524
525
526
527
        /* Set the initial partition, although this is probably ignored. */
        for ( k = 0 ; k < nr_cells ; k++ )
            nodeIDs[k] = cells[k].nodeID;
            
528
        /* Call METIS. */
529
        idx_t one = 1, idx_nr_cells = nr_cells, idx_nr_nodes = nr_nodes;
530
        idx_t objval;
531
        if ( METIS_PartGraphRecursive( &idx_nr_cells , &one , offsets , inds , weights_v , NULL , weights_e , &idx_nr_nodes , NULL , NULL , options , &objval , nodeIDs ) != METIS_OK )
532
            error( "Call to METIS_PartGraphKway failed." );
533

534
        /* Dump the 3d array of cell IDs. */
535
        /* printf( "engine_repartition: nodeIDs = reshape( [" );
536
        for ( i = 0 ; i < cdim[0]*cdim[1]*cdim[2] ; i++ )
Pedro Gonnet's avatar
Pedro Gonnet committed
537
            printf( "%i " , (int)nodeIDs[ i ] );
538
        printf("] ,%i,%i,%i);\n",cdim[0],cdim[1],cdim[2]); */
539
540
541
542
543
544
545
546
    
        }
        
    /* Broadcast the result of the partition. */
    if ( MPI_Bcast( nodeIDs , nr_cells , MPI_INT , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
        error( "Failed to bcast the node IDs." );
        
    /* Set the cell nodeIDs and clear any non-local parts. */
547
    for ( k = 0 ; k < nr_cells ; k++ ) {
548
        cells[k].nodeID = nodeIDs[k];
549
550
551
        if ( nodeIDs[k] == nodeID )
            my_cells += 1;
        }
552
553
554
555
556
557
558
559
560
561
562
563
564
        
    /* Clean up. */
    free( inds );
    free( weights_v );
    free( weights_e );
    free( nodeIDs );
        
    /* Now comes the tricky part: Exchange particles between all nodes.
       This is done in two steps, first allreducing a matrix of 
       how many particles go from where to where, then re-allocating
       the parts array, and emiting the sends and receives.
       Finally, the space, tasks, and proxies need to be rebuilt. */
       
565
566
    /* Redistribute the particles between the nodes. */
    engine_redistribute( e );
567
568
569
570
571
572
573
574
575
576
577
578
        
    /* Make the proxies. */
    engine_makeproxies( e );
        
    /* Tell the engine it should re-build whenever possible */
    e->forcerebuild = 1;
    
#else
    error( "SWIFT was not compiled with MPI and METIS support." );
#endif

    }
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
    
    
/**
 * @brief Add up/down gravity tasks to a cell hierarchy.
 *
 * @param e The #engine.
 * @param c The #cell
 * @param up The upward gravity #task.
 * @param down The downward gravity #task.
 */
 
void engine_addtasks_grav ( struct engine *e , struct cell *c , struct task *up , struct task *down ) {

    /* Link the tasks to this cell. */
    c->grav_up = up;
    c->grav_down = down;
    
    /* Recurse? */
    if ( c->split )
        for ( int k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_addtasks_grav( e , c->progeny[k] , up , down );

    }
603
604


605
606
607
608
/**
 * @brief Add send tasks to a hierarchy of cells.
 *
 * @param e The #engine.
609
610
 * @param ci The sending #cell.
 * @param cj The receiving #cell
611
612
613
614
 */

void engine_addtasks_send ( struct engine *e , struct cell *ci , struct cell *cj ) {

Pedro Gonnet's avatar
Pedro Gonnet committed
615
    int k;
616
    struct link *l = NULL;
617
    struct scheduler *s = &e->sched;
618
619

    /* Check if any of the density tasks are for the target node. */
620
621
622
    for ( l = ci->density ; l != NULL ; l = l->next )
        if ( l->t->ci->nodeID == cj->nodeID ||
             ( l->t->cj != NULL && l->t->cj->nodeID == cj->nodeID ) )
623
624
625
            break;

    /* If so, attach send tasks. */
626
    if ( l != NULL ) {
627
628

        /* Create the tasks. */
629
630
        struct task *t_xv = scheduler_addtask( &e->sched , task_type_send , task_subtype_none , 2*ci->tag , 0 , ci , cj , 0 );
        struct task *t_rho = scheduler_addtask( &e->sched , task_type_send , task_subtype_none , 2*ci->tag + 1 , 0 , ci , cj , 0 );
631
632

        /* The send_rho task depends on the cell's ghost task. */
633
        scheduler_addunlock( s , ci->super->ghost , t_rho );
634
635

        /* The send_rho task should unlock the super-cell's kick2 task. */
636
        scheduler_addunlock( s , t_rho , ci->super->kick2 );
637
638

        /* The send_xv task should unlock the super-cell's ghost task. */
639
        scheduler_addunlock( s , t_xv , ci->super->ghost );
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662

        }
        
    /* Recurse? */
    else if ( ci->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( ci->progeny[k] != NULL )
                engine_addtasks_send( e , ci->progeny[k] , cj );

    }


/**
 * @brief Add recv tasks to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 */

void engine_addtasks_recv ( struct engine *e , struct cell *c , struct task *t_xv , struct task *t_rho ) {

Pedro Gonnet's avatar
Pedro Gonnet committed
663
    int k;
664
    struct scheduler *s = &e->sched;
665
666

    /* Do we need to construct a recv task? */
667
    if ( t_xv == NULL && c->nr_density > 0 ) {
668
669
    
        /* Create the tasks. */
670
671
        t_xv = c->recv_xv = scheduler_addtask( &e->sched , task_type_recv , task_subtype_none , 2*c->tag , 0 , c , NULL , 0 );
        t_rho = c->recv_rho = scheduler_addtask( &e->sched , task_type_recv , task_subtype_none , 2*c->tag + 1 , 0 , c , NULL , 0 );
672
673
674
        
        }
        
675
676
677
678
679
680
681
682
683
684
    /* Add dependencies. */
    for ( struct link *l = c->density ; l != NULL ; l = l->next ) {
        scheduler_addunlock( s , t_xv , l->t );
        scheduler_addunlock( s , l->t , t_rho );
        }
    for ( struct link *l = c->force ; l != NULL ; l = l->next )
        scheduler_addunlock( s , t_rho , l->t );
    if ( c->sorts != NULL )
        scheduler_addunlock( s , t_xv , c->sorts );
    
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
    /* Recurse? */
    if ( c->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_addtasks_recv( e , c->progeny[k] , t_xv , t_rho );

    }


/**
 * @brief Exchange cell structures with other nodes.
 *
 * @param e The #engine.
 */
 
void engine_exchange_cells ( struct engine *e ) {

#ifdef WITH_MPI

    int j, k, pid, count = 0;
    struct pcell *pcells;
706
707
708
709
    struct space *s = e->s;
    struct cell *cells = s->cells;
    int nr_cells = s->nr_cells;
    int nr_proxies = e->nr_proxies;
710
    int offset[ nr_cells ];
711
712
    MPI_Request reqs_in[ engine_maxproxies ];
    MPI_Request reqs_out[ engine_maxproxies ];
713
    MPI_Status status;
714
    struct part *parts = &s->parts[ s->nr_parts ];
715
716
717
718
719
720
721
722
723
724
725
726
727
    
    /* Run through the cells and get the size of the ones that will be sent off. */
    for ( k = 0 ; k < nr_cells ; k++ ) {
        offset[k] = count;
        if ( cells[k].sendto )
            count += ( cells[k].pcell_size = cell_getsize( &cells[k] ) );
        }
        
    /* Allocate the pcells. */
    if ( ( pcells = (struct pcell *)malloc( sizeof(struct pcell) * count ) ) == NULL )
        error( "Failed to allocate pcell buffer." );
        
    /* Pack the cells. */
728
    cell_next_tag = 0;
729
730
731
732
733
734
735
    for ( k = 0 ; k < nr_cells ; k++ )
        if ( cells[k].sendto ) {
            cell_pack( &cells[k] , &pcells[ offset[k] ] );
            cells[k].pcell = &pcells[ offset[k] ];
            }

    /* Launch the proxies. */
736
    for ( k = 0 ; k < nr_proxies ; k++ ) {
737
        proxy_cells_exch1( &e->proxies[k] );
738
        reqs_in[k] = e->proxies[k].req_cells_count_in;
Pedro Gonnet's avatar
Pedro Gonnet committed
739
        reqs_out[k] = e->proxies[k].req_cells_count_out;
740
741
742
        }
        
    /* Wait for each count to come in and start the recv. */
743
744
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        if ( MPI_Waitany( nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
745
746
747
748
749
750
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
        // message( "request from proxy %i has arrived." , pid );
        proxy_cells_exch2( &e->proxies[pid] );
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
751
752
753
754
    /* Wait for all the sends to have finnished too. */
    if ( MPI_Waitall( nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
        error( "MPI_Waitall on sends failed." );
        
755
    /* Set the requests for the cells. */
756
757
758
759
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        reqs_in[k] = e->proxies[k].req_cells_in;
        reqs_out[k] = e->proxies[k].req_cells_out;
        }
760
761
    
    /* Wait for each pcell array to come in from the proxies. */
762
763
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        if ( MPI_Waitany( nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
764
765
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
766
767
768
769
770
771
        // message( "cell data from proxy %i has arrived." , pid );
        for ( count = 0 , j = 0 ; j < e->proxies[pid].nr_cells_in ; j++ )
            count += cell_unpack( &e->proxies[pid].pcells_in[count] , e->proxies[pid].cells_in[j] , e->s );
        }
        
    /* Wait for all the sends to have finnished too. */
772
    if ( MPI_Waitall( nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
        error( "MPI_Waitall on sends failed." );
        
    /* Count the number of particles we need to import and re-allocate
       the buffer if needed. */
    for ( count = 0 , k = 0 ; k < nr_proxies ; k++ )
        for ( j = 0 ; j < e->proxies[k].nr_cells_in ; j++ )
            count += e->proxies[k].cells_in[j]->count;
    if ( count > s->size_parts_foreign ) {
        if ( s->parts_foreign != NULL )
            free( s->parts_foreign );
        s->size_parts_foreign = 1.1 * count;
        if ( posix_memalign( (void **)&s->parts_foreign , part_align , sizeof(struct part) * s->size_parts_foreign ) != 0 )
            error( "Failed to allocate foreign part data." );
        }
        
    /* Unpack the cells and link to the particle data. */
    parts = s->parts_foreign;
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        for ( count = 0 , j = 0 ; j < e->proxies[k].nr_cells_in ; j++ ) {
            count += cell_link( e->proxies[k].cells_in[j] , parts );
            parts = &parts[ e->proxies[k].cells_in[j]->count ];
794
795
            }
        }
796
797
798
799
800
    s->nr_parts_foreign = parts - s->parts_foreign;
        
    /* Is the parts buffer large enough? */
    if ( s->nr_parts_foreign > s->size_parts_foreign )
        error( "Foreign parts buffer too small." );
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
        
    /* Free the pcell buffer. */
    free( pcells );
    
#else
    error( "SWIFT was not compiled with MPI support." );
#endif

    }


/**
 * @brief Exchange straying parts with other nodes.
 *
 * @param e The #engine.
816
 * @param offset The index in the parts array as of which the foreign parts reside.
817
818
819
820
821
822
 * @param ind The ID of the foreign #cell.
 * @param N The number of stray parts.
 *
 * @return The number of arrived parts copied to parts and xparts.
 */
 
823
int engine_exchange_strays ( struct engine *e , int offset , int *ind , int N ) {
824
825
826

#ifdef WITH_MPI

827
    int k, pid, count = 0, nr_in = 0, nr_out = 0;
Pedro Gonnet's avatar
Pedro Gonnet committed
828
829
    MPI_Request reqs_in[ 2*engine_maxproxies ];
    MPI_Request reqs_out[ 2*engine_maxproxies ];
830
831
    MPI_Status status;
    struct proxy *p;
832
    struct space *s = e->s;
833
834
835
836
837
838
839
840
841
842

    /* Re-set the proxies. */
    for ( k = 0 ; k < e->nr_proxies ; k++ )
        e->proxies[k].nr_parts_out = 0;
    
    /* Put the parts into the corresponding proxies. */
    for ( k = 0 ; k < N ; k++ ) {
        pid = e->proxy_ind[ e->s->cells[ ind[k] ].nodeID ];
        if ( pid < 0 )
            error( "Do not have a proxy for the requested nodeID." );
843
        proxy_parts_load( &e->proxies[pid] , &s->parts[offset + k] , &s->xparts[offset + k] , 1 );
844
845
846
847
848
        }
    
    /* Launch the proxies. */
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
        proxy_parts_exch1( &e->proxies[k] );
849
        reqs_in[k] = e->proxies[k].req_parts_count_in;
Pedro Gonnet's avatar
Pedro Gonnet committed
850
        reqs_out[k] = e->proxies[k].req_parts_count_out;
851
852
853
854
        }
        
    /* Wait for each count to come in and start the recv. */
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
855
        if ( MPI_Waitany( e->nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
856
857
858
859
860
861
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
        // message( "request from proxy %i has arrived." , pid );
        proxy_parts_exch2( &e->proxies[pid] );
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
862
863
864
865
    /* Wait for all the sends to have finnished too. */
    if ( MPI_Waitall( e->nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
        error( "MPI_Waitall on sends failed." );
        
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
    /* Count the total number of incomming particles and make sure we have
       enough space to accommodate them. */
    int count_in = 0;
    for ( k = 0 ; k < e->nr_proxies ; k++ )
      count_in += e->proxies[k].nr_parts_in;
    message("sent out %i particles, got %i back.", N, count_in);
    if ( offset + count_in > s->size_parts ) {
      s->size_parts = (offset + count_in) * 1.05;
      struct part *parts_new;
      struct xpart *xparts_new;
      if ( posix_memalign( (void **)&parts_new , part_align , sizeof(struct part) * s->size_parts ) != 0 ||
           posix_memalign( (void **)&xparts_new , part_align , sizeof(struct xpart) * s->size_parts ) != 0 )
          error( "Failed to allocate new part data." );
      memcpy( parts_new , s->parts , sizeof(struct part) * offset );
      memcpy( xparts_new , s->xparts , sizeof(struct xpart) * offset );
      free( s->parts );
      free( s->xparts );
      s->parts = parts_new;
      s->xparts = xparts_new;
    }
        
    /* Collect the requests for the particle data from the proxies. */
888
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
889
        if ( e->proxies[k].nr_parts_in > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
890
891
            reqs_in[2*k] = e->proxies[k].req_parts_in;
            reqs_in[2*k+1] = e->proxies[k].req_xparts_in;
892
893
894
            nr_in += 1;
            }
        else
Pedro Gonnet's avatar
Pedro Gonnet committed
895
            reqs_in[2*k] = reqs_in[2*k+1] = MPI_REQUEST_NULL;
896
        if ( e->proxies[k].nr_parts_out > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
897
898
            reqs_out[2*k] = e->proxies[k].req_parts_out;
            reqs_out[2*k+1] = e->proxies[k].req_xparts_out;
899
900
901
            nr_out += 1;
            }
        else
Pedro Gonnet's avatar
Pedro Gonnet committed
902
            reqs_out[2*k] = reqs_out[2*k+1] = MPI_REQUEST_NULL;
903
        }
904
905
906
    
    /* Wait for each part array to come in and collect the new
       parts from the proxies. */
907
    for ( k = 0 ; k < 2*(nr_in + nr_out) ; k++ ) {
908
909
        int err;
        if ( ( err = MPI_Waitany( 2*e->nr_proxies , reqs_in , &pid , &status ) ) != MPI_SUCCESS ) {
910
911
912
913
914
            char buff[ MPI_MAX_ERROR_STRING ];
            int res;
            MPI_Error_string( err , buff , &res );
                error( "MPI_Waitany failed (%s)." , buff );
            }
915
916
        if ( pid == MPI_UNDEFINED )
            break;
917
        // message( "request from proxy %i has arrived." , pid );
Pedro Gonnet's avatar
Pedro Gonnet committed
918
919
920
        if ( reqs_in[pid & ~1] == MPI_REQUEST_NULL &&
             reqs_in[pid | 1 ] == MPI_REQUEST_NULL ) {
            p = &e->proxies[pid/2];
921
922
            memcpy( &s->parts[offset + count] , p->parts_in , sizeof(struct part) * p->nr_parts_in );
            memcpy( &s->xparts[offset + count] , p->xparts_in , sizeof(struct xpart) * p->nr_parts_in );
Pedro Gonnet's avatar
Pedro Gonnet committed
923
924
925
926
927
928
            count += p->nr_parts_in;
            /* for ( int k = 0 ; k < p->nr_parts_in ; k++ )
                message( "received particle %lli, x=[%.3e %.3e %.3e], h=%.3e, from node %i." ,
                    p->parts_in[k].id , p->parts_in[k].x[0] , p->parts_in[k].x[1] , p->parts_in[k].x[2] ,
                    p->parts_in[k].h , p->nodeID ); */
            }
929
930
        }
    
931
    /* Wait for all the sends to have finnished too. */
932
    if ( nr_out > 0 )
Pedro Gonnet's avatar
Pedro Gonnet committed
933
        if ( MPI_Waitall( 2*e->nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
934
            error( "MPI_Waitall on sends failed." );
935
        
936
937
938
939
940
941
942
943
944
945
946
    /* Return the number of harvested parts. */
    return count;
    
#else
    error( "SWIFT was not compiled with MPI support." );
    return 0;
#endif

    }


947
/**
948
 * @brief Fill the #space's task list.
949
 *
950
 * @param e The #engine we are working with.
951
952
 */
 
953
void engine_maketasks ( struct engine *e ) {
954
955

    struct space *s = e->s;
956
    struct scheduler *sched = &e->sched;
957
958
959
    struct cell *cells = s->cells;
    int nr_cells = s->nr_cells;
    int nodeID = e->nodeID;
960
961
962
963
964
965
    int i, j, k, ii, jj, kk, iii, jjj, kkk, cid, cjd, sid;
    int *cdim = s->cdim;
    struct task *t, *t2;
    struct cell *ci, *cj;

    /* Re-set the scheduler. */
966
    scheduler_reset( sched , s->tot_cells * engine_maxtaskspercell );
967
968
969
970
971
972
    
    /* Run through the highest level of cells and add pairs. */
    for ( i = 0 ; i < cdim[0] ; i++ )
        for ( j = 0 ; j < cdim[1] ; j++ )
            for ( k = 0 ; k < cdim[2] ; k++ ) {
                cid = cell_getid( cdim , i , j , k );
973
                if ( cells[cid].count == 0 )
974
                    continue;
975
                ci = &cells[cid];
976
977
                if ( ci->count == 0 )
                    continue;
978
                if ( ci->nodeID == nodeID )
979
                    scheduler_addtask( sched , task_type_self , task_subtype_density , 0 , 0 , ci , NULL , 0 );
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
                for ( ii = -1 ; ii < 2 ; ii++ ) {
                    iii = i + ii;
                    if ( !s->periodic && ( iii < 0 || iii >= cdim[0] ) )
                        continue;
                    iii = ( iii + cdim[0] ) % cdim[0];
                    for ( jj = -1 ; jj < 2 ; jj++ ) {
                        jjj = j + jj;
                        if ( !s->periodic && ( jjj < 0 || jjj >= cdim[1] ) )
                            continue;
                        jjj = ( jjj + cdim[1] ) % cdim[1];
                        for ( kk = -1 ; kk < 2 ; kk++ ) {
                            kkk = k + kk;
                            if ( !s->periodic && ( kkk < 0 || kkk >= cdim[2] ) )
                                continue;
                            kkk = ( kkk + cdim[2] ) % cdim[2];
                            cjd = cell_getid( cdim , iii , jjj , kkk );
996
                            cj = &cells[cjd];
997
                            if ( cid >= cjd || cj->count == 0 || 
998
                                 ( ci->nodeID != nodeID && cj->nodeID != nodeID ) )
999
1000
                                continue;
                            sid = sortlistID[ (kk+1) + 3*( (jj+1) + 3*(ii+1) ) ];
For faster browsing, not all history is shown. View entire blame