engine.c 80.1 KB
Newer Older
Pedro Gonnet's avatar
Pedro Gonnet committed
1
/*******************************************************************************
2
 * This file is part of SWIFT.
Pedro Gonnet's avatar
Pedro Gonnet committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
 * Coypright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 * 
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdio.h>
#include <stdlib.h>
26
#include <unistd.h>
Pedro Gonnet's avatar
Pedro Gonnet committed
27
28
29
30
31
32
33
34
#include <string.h>
#include <pthread.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <omp.h>
#include <sched.h>

35
36
37
38
39
/* MPI headers. */
#ifdef WITH_MPI
    #include <mpi.h>
#endif

40
41
42
43
44
/* METIS headers. */
#ifdef HAVE_METIS
    #include <metis.h>
#endif

Pedro Gonnet's avatar
Pedro Gonnet committed
45
/* Local headers. */
46
#include "const.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
47
#include "cycle.h"
48
#include "atomic.h"
49
#include "timers.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
50
#include "const.h"
51
#include "vector.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
52
53
54
#include "lock.h"
#include "task.h"
#include "part.h"
55
#include "debug.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
56
#include "space.h"
57
#include "multipole.h"
58
#include "cell.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
59
#include "queue.h"
60
#include "scheduler.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
61
62
#include "engine.h"
#include "runner.h"
63
#include "proxy.h"
64
#include "error.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
65

66
67
68
69
70
71
72
#ifdef LEGACY_GADGET2_SPH
#include "runner_iact_legacy.h"
#else
#include "runner_iact.h"
#endif


Pedro Gonnet's avatar
Pedro Gonnet committed
73
74
75
76
/* Convert cell location to ID. */
#define cell_getid( cdim , i , j , k ) ( (int)(k) + (cdim)[2]*( (int)(j) + (cdim)[1]*(int)(i) ) )


77
78
79
80
/** The rank of the engine as a global variable (for messages). */
int engine_rank;


81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/**
 * @brief Link a density/force task to a cell.
 *
 * @param e The #engine.
 * @param l The #link.
 * @param t The #task.
 *
 * @return The new #link pointer.
 */
 
struct link *engine_addlink( struct engine *e , struct link *l , struct task *t ) {

    struct link *res = &e->links[ atomic_inc( &e->nr_links ) ];
    res->next = l;
    res->t = t;
    return res;

    }


/**
 * @brief Generate the ghost and kick tasks for a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The #cell.
 * @param super The super #cell.
 */
 
void engine_mkghosts ( struct engine *e , struct cell *c , struct cell *super ) {

    int k;
    struct scheduler *s = &e->sched;

    /* Am I the super-cell? */
    if ( super == NULL && c->nr_tasks > 0 ) {
    
        /* Remember me. */
        super = c;
        
        /* Local tasks only... */
        if ( c->nodeID == e->nodeID ) {
        
            /* Generate the ghost task. */
            c->ghost = scheduler_addtask( s , task_type_ghost , task_subtype_none , 0 , 0 , c , NULL , 0 );

            /* Add the kick2 task. */
            c->kick2 = scheduler_addtask( s , task_type_kick2 , task_subtype_none , 0 , 0 , c , NULL , 0 );

            /* Add the kick1 task if needed. */
            if ( !(e->policy & engine_policy_fixdt) )
                c->kick1 = scheduler_addtask( s , task_type_kick1 , task_subtype_none , 0 , 0 , c , NULL , 0 );
                
            }
            
        }
        
    /* Set the super-cell. */
    c->super = super;
        
    /* Recurse. */
    if ( c->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_mkghosts( e , c->progeny[k] , super );
    
    }


149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/**
 * @brief Redistribute the particles amongst the nodes accorind
 *      to their cell's node IDs.
 *
 * @param e The #engine.
 */
 
void engine_redistribute ( struct engine *e ) {

#ifdef WITH_MPI

    int i, j, k, cid;
    int nr_nodes = e->nr_nodes, nodeID = e->nodeID;
    struct space *s = e->s;
    int my_cells = 0;
    int *cdim = s->cdim;
    struct cell *cells = s->cells;
166
    int nr_cells = s->nr_cells;
167
168
169
170
171

    /* Start by sorting the particles according to their nodes and
       getting the counts. */
    int *counts, *dest;
    struct part *parts = s->parts;
172
    double ih[3], dim[3];
173
    ih[0] = s->ih[0]; ih[1] = s->ih[1]; ih[2] = s->ih[2];
174
    dim[0] = s->dim[0]; dim[1] = s->dim[1]; dim[2] = s->dim[2];
175
176
177
178
179
    if ( ( counts = (int *)malloc( sizeof(int) * nr_nodes * nr_nodes ) ) == NULL ||
         ( dest = (int *)malloc( sizeof(int) * s->nr_parts ) ) == NULL )
        error( "Failed to allocate count and dest buffers." );
    bzero( counts , sizeof(int) * nr_nodes * nr_nodes );
    for ( k = 0 ; k < s->nr_parts ; k++ ) {
180
181
182
183
        for ( j = 0 ; j < 3 ; j++ ) {
            if ( parts[k].x[j] < 0.0 ) parts[k].x[j] += dim[j];
            else if ( parts[k].x[j] >= dim[j] ) parts[k].x[j] -= dim[j];
            }
184
185
186
187
188
189
190
191
192
        cid = cell_getid( cdim , parts[k].x[0]*ih[0] , parts[k].x[1]*ih[1] , parts[k].x[2]*ih[2] );
        dest[k] = cells[ cid ].nodeID;
        counts[ nodeID*nr_nodes + dest[k] ] += 1;
        }
    parts_sort( s->parts , s->xparts , dest , s->nr_parts , 0 , nr_nodes-1 );
    
    /* Get all the counts from all the nodes. */
    if ( MPI_Allreduce( MPI_IN_PLACE , counts , nr_nodes * nr_nodes , MPI_INT , MPI_SUM , MPI_COMM_WORLD ) != MPI_SUCCESS )
        error( "Failed to allreduce particle transfer counts." );
193

194
195
196
197
198
199
    /* Get the new number of parts for this node, be generous in allocating. */
    int nr_parts = 0;
    for ( k = 0 ; k < nr_nodes ; k++ )
        nr_parts += counts[ k*nr_nodes + nodeID ];
    struct part *parts_new;
    struct xpart *xparts_new, *xparts = s->xparts;
200
201
    if ( posix_memalign( (void **)&parts_new , part_align , sizeof(struct part) * nr_parts * 1.2 ) != 0 ||
         posix_memalign( (void **)&xparts_new , part_align , sizeof(struct xpart) * nr_parts * 1.2 ) != 0 )
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
        error( "Failed to allocate new part data." );
        
    /* Emit the sends and recvs for the particle data. */
    MPI_Request *reqs;
    if ( ( reqs = (MPI_Request *)malloc( sizeof(MPI_Request) * 4 * nr_nodes ) ) == NULL )
        error( "Failed to allocate MPI request list." );
    for ( k = 0 ; k < 4*nr_nodes ; k++ )
        reqs[k] = MPI_REQUEST_NULL;
    for ( i = 0 , j = 0 , k = 0 ; k < nr_nodes ; k++ ) {
        if ( k == nodeID && counts[ nodeID*nr_nodes + k ] > 0 ) {
            memcpy( &parts_new[j] , &parts[i] , sizeof(struct part) * counts[ k*nr_nodes + nodeID ] );
            memcpy( &xparts_new[j] , &xparts[i] , sizeof(struct xpart) * counts[ k*nr_nodes + nodeID ] );
            i += counts[ nodeID*nr_nodes + k ];
            j += counts[ k*nr_nodes + nodeID ];
            }
        if ( k != nodeID && counts[ nodeID*nr_nodes + k ] > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
218
            if ( MPI_Isend( &parts[i] , sizeof(struct part) * counts[ nodeID*nr_nodes + k ] , MPI_BYTE , k , 2*(nodeID*nr_nodes + k) + 0 , MPI_COMM_WORLD , &reqs[4*k] ) != MPI_SUCCESS )
219
                error( "Failed to isend parts to node %i." , k );
Pedro Gonnet's avatar
Pedro Gonnet committed
220
            if ( MPI_Isend( &xparts[i] , sizeof(struct xpart) * counts[ nodeID*nr_nodes + k ] , MPI_BYTE , k , 2*(nodeID*nr_nodes + k) + 1 , MPI_COMM_WORLD , &reqs[4*k+1] ) != MPI_SUCCESS )
221
222
223
224
                error( "Failed to isend xparts to node %i." , k );
            i += counts[ nodeID*nr_nodes + k ];
            }
        if ( k != nodeID && counts[ k*nr_nodes + nodeID ] > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
225
            if ( MPI_Irecv( &parts_new[j] , sizeof(struct part) * counts[ k*nr_nodes + nodeID ] , MPI_BYTE , k , 2*(k*nr_nodes + nodeID) + 0 , MPI_COMM_WORLD , &reqs[4*k+2] ) != MPI_SUCCESS )
226
                error( "Failed to emit irecv of parts from node %i." , k );
Pedro Gonnet's avatar
Pedro Gonnet committed
227
            if ( MPI_Irecv( &xparts_new[j] , sizeof(struct xpart) * counts[ k*nr_nodes + nodeID ] , MPI_BYTE , k , 2*(k*nr_nodes + nodeID) + 1 , MPI_COMM_WORLD , &reqs[4*k+3] ) != MPI_SUCCESS )
228
229
230
231
232
                error( "Failed to emit irecv of parts from node %i." , k );
            j += counts[ k*nr_nodes + nodeID ];
            }
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
233
    /* Wait for all the sends and recvs to tumble in. */
234
235
236
237
    MPI_Status stats[4*nr_nodes];
    int res;
    if ( ( res = MPI_Waitall( 4*nr_nodes , reqs , stats ) ) != MPI_SUCCESS ) {
        for ( k = 0 ; k < 4*nr_nodes ; k++ ) {
238
239
240
241
242
243
        char buff[ MPI_MAX_ERROR_STRING ];
        int res;
        MPI_Error_string( stats[k].MPI_ERROR , buff , &res );
        message( "request %i has error '%s'." , k , buff );
        }
    message( "counts is [ %i %i %i %i ]." , counts[0] , counts[1] , counts[2] , counts[3] );
244
        error( "Failed during waitall for part data." );
245
246
        }

247
248
249
250
251
252
253
254
255
256
257
258
259
    /* Verify that all parts are in the right place. */
    /* for ( k = 0 ; k < nr_parts ; k++ ) {
        cid = cell_getid( cdim , parts_new[k].x[0]*ih[0] , parts_new[k].x[1]*ih[1] , parts_new[k].x[2]*ih[2] );
        if ( cells[ cid ].nodeID != nodeID )
            error( "Received particle (%i) that does not belong here (nodeID=%i)." , k , cells[ cid ].nodeID );
        } */
        
    /* Set the new part data, free the old. */
    free( parts );
    free( xparts );
    s->parts = parts_new;
    s->xparts = xparts_new;
    s->nr_parts = nr_parts;
260
    s->size_parts = 1.2*nr_parts;
261
262
    
    /* Be verbose about what just happened. */
263
264
265
    for ( k = 0 ; k < nr_cells ; k++ )
        if ( cells[k].nodeID == nodeID )
            my_cells += 1;
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    message( "node %i now has %i parts in %i cells." , nodeID , nr_parts , my_cells );
    
    /* Clean up other stuff. */
    free( reqs );
    free( counts );
    free( dest );
        
#else
    error( "SWIFT was not compiled with MPI and METIS support." );
#endif

    }


280
/**
281
 * @brief Repartition the cells amongst the nodes.
282
283
284
285
286
287
288
289
 *
 * @param e The #engine.
 */
 
void engine_repartition ( struct engine *e ) {

#if defined(WITH_MPI) && defined(HAVE_METIS)

290
    int i, j, k, l, cid, cjd, ii, jj, kk, res;
291
    idx_t *inds, *nodeIDs;
292
293
    idx_t *weights_v, *weights_e;
    struct space *s = e->s;
294
    int nr_cells = s->nr_cells, my_cells = 0;
295
296
297
298
    struct cell *cells = s->cells;
    int ind[3], *cdim = s->cdim;
    struct task *t, *tasks = e->sched.tasks;
    struct cell *ci, *cj;
299
    int nr_nodes = e->nr_nodes, nodeID = e->nodeID;
300
    float wscale = 1e-3, vscale = 1e-3, wscale_buff;
301
302
    idx_t wtot = 0;
    const idx_t wmax = 1e9 / e->nr_nodes;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
    
    /* Clear the repartition flag. */
    e->forcerepart = 0;
    
    /* Allocate the inds and weights. */
    if ( ( inds = (idx_t *)malloc( sizeof(idx_t) * 26*nr_cells ) ) == NULL ||
         ( weights_v = (idx_t *)malloc( sizeof(idx_t) * nr_cells ) ) == NULL ||
         ( weights_e = (idx_t *)malloc( sizeof(idx_t) * 26*nr_cells ) ) == NULL ||
         ( nodeIDs = (idx_t *)malloc( sizeof(idx_t) * nr_cells ) ) == NULL )
        error( "Failed to allocate inds and weights arrays." );
        
    /* Fill the inds array. */
    for ( cid = 0 ; cid < nr_cells ; cid++ ) {
        ind[0] = cells[cid].loc[0] / s->cells[cid].h[0] + 0.5;
        ind[1] = cells[cid].loc[1] / s->cells[cid].h[1] + 0.5;
        ind[2] = cells[cid].loc[2] / s->cells[cid].h[2] + 0.5;
        l = 0;
        for ( i = -1 ; i <= 1 ; i++ ) {
            ii = ind[0] + i;
            if ( ii < 0 ) ii += cdim[0];
            else if ( ii >= cdim[0] ) ii -= cdim[0];
            for ( j = -1 ; j <= 1 ; j++ ) {
                jj = ind[1] + j;
                if ( jj < 0 ) jj += cdim[1];
                else if ( jj >= cdim[1] ) jj -= cdim[1];
                for ( k = -1 ; k <= 1 ; k++ ) {
                    kk = ind[2] + k;
                    if ( kk < 0 ) kk += cdim[2];
                    else if ( kk >= cdim[2] ) kk -= cdim[2];
                    if ( i || j || k ) {
                        inds[ cid*26 + l ] = cell_getid( cdim , ii , jj , kk );
                        l += 1;
                        }
                    }
                }
            }
        }
        
    /* Init the weights arrays. */
342
343
    bzero( weights_e , sizeof(idx_t) * 26*nr_cells );
    bzero( weights_v , sizeof(idx_t) * nr_cells );
344
345
346
347
348
349
350
351
352
353
    
    /* Loop over the tasks... */
    for ( j = 0 ; j < e->sched.nr_tasks ; j++ ) {
    
        /* Get a pointer to the kth task. */
        t = &tasks[j];
        
        /* Skip un-interesting tasks. */
        if ( t->type != task_type_self &&
             t->type != task_type_pair &&
354
355
356
357
             t->type != task_type_sub &&
             t->type != task_type_ghost &&
             t->type != task_type_kick1 &&
             t->type != task_type_kick2 )
358
            continue;
359
360
            
        /* Get the task weight. */
361
        idx_t w = ( t->toc - t->tic ) * wscale;
362
363
        if ( w < 0 )
            error( "Bad task weight (%i)." , w );
364
365
366
            
        /* Do we need to re-scale? */
        wtot += w;
367
        while (wtot > wmax) {
368
369
          wscale /= 2;
          wtot /= 2;
370
          w /= 2;
371
372
373
          for (k = 0; k < 26 * nr_cells; k++) weights_e[k] *= 0.5;
          for (k = 0; k < nr_cells; k++) weights_v[k] *= 0.5;
        }
374
375
376
377
378
379
380
381
382
383
384
385
        
        /* Get the top-level cells involved. */
        for ( ci = t->ci ; ci->parent != NULL ; ci = ci->parent );
        if ( t->cj != NULL )
            for ( cj = t->cj ; cj->parent != NULL ; cj = cj->parent );
        else
            cj = NULL;
            
        /* Get the cell IDs. */
        cid = ci - cells;
            
        /* Different weights for different tasks. */
386
387
388
389
390
391
392
393
394
395
396
        if ( t->type == task_type_ghost ||
             t->type == task_type_kick1 || 
             t->type == task_type_kick2 ) {
             
            /* Particle updates add only to vertex weight. */
            weights_v[cid] += w;
            
            }
        
        /* Self interaction? */     
        else if ( ( t->type == task_type_self && ci->nodeID == nodeID ) ||
397
398
399
             ( t->type == task_type_sub && cj == NULL && ci->nodeID == nodeID ) ) {
        
            /* Self interactions add only to vertex weight. */
400
            weights_v[cid] += w;
401
402
403
404
405
406
407
408
409
410
411
            
            }
            
        /* Pair? */
        else if ( t->type == task_type_pair ||
                  ( t->type == task_type_sub && cj != NULL ) ) {
                  
            /* In-cell pair? */
            if ( ci == cj ) {
            
                /* Add weight to vertex for ci. */
412
                weights_v[cid] += w;
413
414
415
416
417
418
419
420
421
422
423
            
                }
                
            /* Distinct cells with local ci? */
            else if ( ci->nodeID == nodeID ) {
            
                /* Index of the jth cell. */
                cjd = cj - cells;
                
                /* Add half of weight to each cell. */
                if ( ci->nodeID == nodeID )
424
                    weights_v[cid] += 0.5 * w;
425
                if ( cj->nodeID == nodeID )
426
                    weights_v[cjd] += 0.5 * w;
427
428
429
                    
                /* Add Weight to edge. */
                for ( k = 26*cid ; inds[k] != cjd ; k++ );
430
                weights_e[ k ] += w;
431
                for ( k = 26*cjd ; inds[k] != cid ; k++ );
432
                weights_e[ k ] += w;
433
434
435
436
437
438
439
            
                }
                  
            }
    
        }
        
440
441
442
443
444
445
446
    /* Get the minimum scaling and re-scale if necessary. */
    if ( ( res = MPI_Allreduce( &wscale , &wscale_buff , 1 , MPI_FLOAT , MPI_MIN , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
        char buff[ MPI_MAX_ERROR_STRING ];
        MPI_Error_string( res , buff , &i );
        error( "Failed to allreduce the weight scales (%s)." , buff );
    }
    if (wscale_buff != wscale) {
447
      float scale = wscale_buff / wscale;
448
449
450
451
      for (k = 0; k < 26 * nr_cells; k++) weights_e[k] *= scale;
      for (k = 0; k < nr_cells; k++) weights_v[k] *= scale;
    }
        
452
    /* Merge the weights arrays accross all nodes. */
453
#if IDXTYPEWIDTH==32
454
    if ( ( res = MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_v , weights_v , nr_cells , MPI_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
455
456
457
#else
    if ( ( res = MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_v , weights_v , nr_cells , MPI_LONG_LONG_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) ) != MPI_SUCCESS ) {
#endif
458
459
460
461
        char buff[ MPI_MAX_ERROR_STRING ];
        MPI_Error_string( res , buff , &i );
        error( "Failed to allreduce vertex weights (%s)." , buff );
        }
462
#if IDXTYPEWIDTH==32
463
    if ( MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_e , weights_e , 26*nr_cells , MPI_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
464
465
466
467
#else
    if ( MPI_Reduce( ( nodeID == 0 ) ? MPI_IN_PLACE : weights_e , weights_e , 26*nr_cells , MPI_LONG_LONG_INT , MPI_SUM , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
#endif
       error( "Failed to allreduce edge weights." );
468
469
470
471
        
    /* As of here, only one node needs to compute the partition. */
    if ( nodeID == 0 ) {
    
472
        /* Check that the edge weights are fully symmetric. */
473
        /* for ( cid = 0 ; cid < nr_cells ; cid++ )
474
475
476
477
478
            for ( k = 0 ; k < 26 ; k++ ) {
                cjd = inds[ cid*26 + k ];
                for ( j = 26*cjd ; inds[j] != cid ; j++ );
                if ( weights_e[ cid*26+k ] != weights_e[ j ] )
                    error( "Unsymmetric edge weights detected (%i vs %i)." , weights_e[ cid*26+k ] , weights_e[ j ] );
479
                } */
480
481
482
        /* int w_min = weights_e[0], w_max = weights_e[0], w_tot = weights_e[0];
        for ( k = 1 ; k < 26*nr_cells ; k++ ) {
            w_tot += weights_e[k];
483
484
485
486
            if ( weights_e[k] < w_min )
                w_min = weights_e[k];
            else if ( weights_e[k] > w_max )
                w_max = weights_e[k];
487
488
489
490
491
            }
        message( "edge weights in [ %i , %i ], tot=%i." , w_min , w_max , w_tot );
        w_min = weights_e[0], w_max = weights_e[0]; w_tot = weights_v[0];
        for ( k = 1 ; k < nr_cells ; k++ ) {
            w_tot += weights_v[k];
492
493
494
495
            if ( weights_v[k] < w_min )
                w_min = weights_v[k];
            else if ( weights_v[k] > w_max )
                w_max = weights_v[k];
496
497
            }
        message( "vertex weights in [ %i , %i ], tot=%i." , w_min , w_max , w_tot ); */
498
499
500
501
                
        /* Make sure there are no zero weights. */
        for ( k = 0 ; k < 26*nr_cells ; k++ )
            if ( weights_e[k] == 0 )
Pedro Gonnet's avatar
typo.    
Pedro Gonnet committed
502
                weights_e[k] = 1;
503
        for ( k = 0 ; k < nr_cells ; k++ )
504
            if ( ( weights_v[k] *= vscale ) == 0 )
505
                weights_v[k] = 1;
506
    
507
508
509
510
511
512
513
514
        /* Allocate and fill the connection array. */
        idx_t *offsets;
        if ( ( offsets = (idx_t *)malloc( sizeof(idx_t) * (nr_cells + 1) ) ) == NULL )
            error( "Failed to allocate offsets buffer." );
        offsets[0] = 0;
        for ( k = 0 ; k < nr_cells ; k++ )
            offsets[k+1] = offsets[k] + 26;
            
515
516
        /* Set the METIS options. +1 to keep the GCC sanitizer happy. */
        idx_t options[METIS_NOPTIONS+1];
517
518
519
520
        METIS_SetDefaultOptions( options );
        options[ METIS_OPTION_OBJTYPE ] = METIS_OBJTYPE_CUT;
        options[ METIS_OPTION_NUMBERING ] = 0;
        options[ METIS_OPTION_CONTIG ] = 1;
521
522
523
        options[ METIS_OPTION_NCUTS ] = 10;
        options[ METIS_OPTION_NITER ] = 20;
        // options[ METIS_OPTION_UFACTOR ] = 1;
524
        
525
526
527
528
        /* Set the initial partition, although this is probably ignored. */
        for ( k = 0 ; k < nr_cells ; k++ )
            nodeIDs[k] = cells[k].nodeID;
            
529
        /* Call METIS. */
530
        idx_t one = 1, idx_nr_cells = nr_cells, idx_nr_nodes = nr_nodes;
531
        idx_t objval;
532
        if ( METIS_PartGraphRecursive( &idx_nr_cells , &one , offsets , inds , weights_v , NULL , weights_e , &idx_nr_nodes , NULL , NULL , options , &objval , nodeIDs ) != METIS_OK )
533
            error( "Call to METIS_PartGraphKway failed." );
534

535
        /* Dump the 3d array of cell IDs. */
536
        /* printf( "engine_repartition: nodeIDs = reshape( [" );
537
        for ( i = 0 ; i < cdim[0]*cdim[1]*cdim[2] ; i++ )
Pedro Gonnet's avatar
Pedro Gonnet committed
538
            printf( "%i " , (int)nodeIDs[ i ] );
539
        printf("] ,%i,%i,%i);\n",cdim[0],cdim[1],cdim[2]); */
540
541
542
543
    
        }
        
    /* Broadcast the result of the partition. */
544
#if IDXTYPEWIDTH==32
545
546
    if ( MPI_Bcast( nodeIDs , nr_cells , MPI_INT , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
        error( "Failed to bcast the node IDs." );
547
548
549
550
#else
    if ( MPI_Bcast( nodeIDs , nr_cells , MPI_LONG_LONG_INT , 0 , MPI_COMM_WORLD ) != MPI_SUCCESS )
        error( "Failed to bcast the node IDs." );
#endif
551
552
        
    /* Set the cell nodeIDs and clear any non-local parts. */
553
    for ( k = 0 ; k < nr_cells ; k++ ) {
554
        cells[k].nodeID = nodeIDs[k];
555
556
557
        if ( nodeIDs[k] == nodeID )
            my_cells += 1;
        }
558
559
560
561
562
563
564
565
566
567
568
569
570
        
    /* Clean up. */
    free( inds );
    free( weights_v );
    free( weights_e );
    free( nodeIDs );
        
    /* Now comes the tricky part: Exchange particles between all nodes.
       This is done in two steps, first allreducing a matrix of 
       how many particles go from where to where, then re-allocating
       the parts array, and emiting the sends and receives.
       Finally, the space, tasks, and proxies need to be rebuilt. */
       
571
572
    /* Redistribute the particles between the nodes. */
    engine_redistribute( e );
573
574
575
576
577
578
579
580
581
582
583
584
        
    /* Make the proxies. */
    engine_makeproxies( e );
        
    /* Tell the engine it should re-build whenever possible */
    e->forcerebuild = 1;
    
#else
    error( "SWIFT was not compiled with MPI and METIS support." );
#endif

    }
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
    
    
/**
 * @brief Add up/down gravity tasks to a cell hierarchy.
 *
 * @param e The #engine.
 * @param c The #cell
 * @param up The upward gravity #task.
 * @param down The downward gravity #task.
 */
 
void engine_addtasks_grav ( struct engine *e , struct cell *c , struct task *up , struct task *down ) {

    /* Link the tasks to this cell. */
    c->grav_up = up;
    c->grav_down = down;
    
    /* Recurse? */
    if ( c->split )
        for ( int k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_addtasks_grav( e , c->progeny[k] , up , down );

    }
609
610


611
612
613
614
/**
 * @brief Add send tasks to a hierarchy of cells.
 *
 * @param e The #engine.
615
616
 * @param ci The sending #cell.
 * @param cj The receiving #cell
617
618
619
620
 */

void engine_addtasks_send ( struct engine *e , struct cell *ci , struct cell *cj ) {

Pedro Gonnet's avatar
Pedro Gonnet committed
621
    int k;
622
    struct link *l = NULL;
623
    struct scheduler *s = &e->sched;
624
625

    /* Check if any of the density tasks are for the target node. */
626
627
628
    for ( l = ci->density ; l != NULL ; l = l->next )
        if ( l->t->ci->nodeID == cj->nodeID ||
             ( l->t->cj != NULL && l->t->cj->nodeID == cj->nodeID ) )
629
630
631
            break;

    /* If so, attach send tasks. */
632
    if ( l != NULL ) {
633
634

        /* Create the tasks. */
635
636
        struct task *t_xv = scheduler_addtask( &e->sched , task_type_send , task_subtype_none , 2*ci->tag , 0 , ci , cj , 0 );
        struct task *t_rho = scheduler_addtask( &e->sched , task_type_send , task_subtype_none , 2*ci->tag + 1 , 0 , ci , cj , 0 );
637
638

        /* The send_rho task depends on the cell's ghost task. */
639
        scheduler_addunlock( s , ci->super->ghost , t_rho );
640
641

        /* The send_rho task should unlock the super-cell's kick2 task. */
642
        scheduler_addunlock( s , t_rho , ci->super->kick2 );
643
644

        /* The send_xv task should unlock the super-cell's ghost task. */
645
        scheduler_addunlock( s , t_xv , ci->super->ghost );
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668

        }
        
    /* Recurse? */
    else if ( ci->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( ci->progeny[k] != NULL )
                engine_addtasks_send( e , ci->progeny[k] , cj );

    }


/**
 * @brief Add recv tasks to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 */

void engine_addtasks_recv ( struct engine *e , struct cell *c , struct task *t_xv , struct task *t_rho ) {

Pedro Gonnet's avatar
Pedro Gonnet committed
669
    int k;
670
    struct scheduler *s = &e->sched;
671
672

    /* Do we need to construct a recv task? */
673
    if ( t_xv == NULL && c->nr_density > 0 ) {
674
675
    
        /* Create the tasks. */
676
677
        t_xv = c->recv_xv = scheduler_addtask( &e->sched , task_type_recv , task_subtype_none , 2*c->tag , 0 , c , NULL , 0 );
        t_rho = c->recv_rho = scheduler_addtask( &e->sched , task_type_recv , task_subtype_none , 2*c->tag + 1 , 0 , c , NULL , 0 );
678
679
680
        
        }
        
681
682
683
684
685
686
687
688
689
690
    /* Add dependencies. */
    for ( struct link *l = c->density ; l != NULL ; l = l->next ) {
        scheduler_addunlock( s , t_xv , l->t );
        scheduler_addunlock( s , l->t , t_rho );
        }
    for ( struct link *l = c->force ; l != NULL ; l = l->next )
        scheduler_addunlock( s , t_rho , l->t );
    if ( c->sorts != NULL )
        scheduler_addunlock( s , t_xv , c->sorts );
    
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
    /* Recurse? */
    if ( c->split )
        for ( k = 0 ; k < 8 ; k++ )
            if ( c->progeny[k] != NULL )
                engine_addtasks_recv( e , c->progeny[k] , t_xv , t_rho );

    }


/**
 * @brief Exchange cell structures with other nodes.
 *
 * @param e The #engine.
 */
 
void engine_exchange_cells ( struct engine *e ) {

#ifdef WITH_MPI

    int j, k, pid, count = 0;
    struct pcell *pcells;
712
713
714
715
    struct space *s = e->s;
    struct cell *cells = s->cells;
    int nr_cells = s->nr_cells;
    int nr_proxies = e->nr_proxies;
716
    int offset[ nr_cells ];
717
718
    MPI_Request reqs_in[ engine_maxproxies ];
    MPI_Request reqs_out[ engine_maxproxies ];
719
    MPI_Status status;
720
    struct part *parts = &s->parts[ s->nr_parts ];
721
722
723
724
725
726
727
728
729
730
731
732
733
    
    /* Run through the cells and get the size of the ones that will be sent off. */
    for ( k = 0 ; k < nr_cells ; k++ ) {
        offset[k] = count;
        if ( cells[k].sendto )
            count += ( cells[k].pcell_size = cell_getsize( &cells[k] ) );
        }
        
    /* Allocate the pcells. */
    if ( ( pcells = (struct pcell *)malloc( sizeof(struct pcell) * count ) ) == NULL )
        error( "Failed to allocate pcell buffer." );
        
    /* Pack the cells. */
734
    cell_next_tag = 0;
735
736
737
738
739
740
741
    for ( k = 0 ; k < nr_cells ; k++ )
        if ( cells[k].sendto ) {
            cell_pack( &cells[k] , &pcells[ offset[k] ] );
            cells[k].pcell = &pcells[ offset[k] ];
            }

    /* Launch the proxies. */
742
    for ( k = 0 ; k < nr_proxies ; k++ ) {
743
        proxy_cells_exch1( &e->proxies[k] );
744
        reqs_in[k] = e->proxies[k].req_cells_count_in;
Pedro Gonnet's avatar
Pedro Gonnet committed
745
        reqs_out[k] = e->proxies[k].req_cells_count_out;
746
747
748
        }
        
    /* Wait for each count to come in and start the recv. */
749
750
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        if ( MPI_Waitany( nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
751
752
753
754
755
756
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
        // message( "request from proxy %i has arrived." , pid );
        proxy_cells_exch2( &e->proxies[pid] );
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
757
758
759
760
    /* Wait for all the sends to have finnished too. */
    if ( MPI_Waitall( nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
        error( "MPI_Waitall on sends failed." );
        
761
    /* Set the requests for the cells. */
762
763
764
765
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        reqs_in[k] = e->proxies[k].req_cells_in;
        reqs_out[k] = e->proxies[k].req_cells_out;
        }
766
767
    
    /* Wait for each pcell array to come in from the proxies. */
768
769
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        if ( MPI_Waitany( nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
770
771
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
772
773
774
775
776
777
        // message( "cell data from proxy %i has arrived." , pid );
        for ( count = 0 , j = 0 ; j < e->proxies[pid].nr_cells_in ; j++ )
            count += cell_unpack( &e->proxies[pid].pcells_in[count] , e->proxies[pid].cells_in[j] , e->s );
        }
        
    /* Wait for all the sends to have finnished too. */
778
    if ( MPI_Waitall( nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
        error( "MPI_Waitall on sends failed." );
        
    /* Count the number of particles we need to import and re-allocate
       the buffer if needed. */
    for ( count = 0 , k = 0 ; k < nr_proxies ; k++ )
        for ( j = 0 ; j < e->proxies[k].nr_cells_in ; j++ )
            count += e->proxies[k].cells_in[j]->count;
    if ( count > s->size_parts_foreign ) {
        if ( s->parts_foreign != NULL )
            free( s->parts_foreign );
        s->size_parts_foreign = 1.1 * count;
        if ( posix_memalign( (void **)&s->parts_foreign , part_align , sizeof(struct part) * s->size_parts_foreign ) != 0 )
            error( "Failed to allocate foreign part data." );
        }
        
    /* Unpack the cells and link to the particle data. */
    parts = s->parts_foreign;
    for ( k = 0 ; k < nr_proxies ; k++ ) {
        for ( count = 0 , j = 0 ; j < e->proxies[k].nr_cells_in ; j++ ) {
            count += cell_link( e->proxies[k].cells_in[j] , parts );
            parts = &parts[ e->proxies[k].cells_in[j]->count ];
800
801
            }
        }
802
803
804
805
806
    s->nr_parts_foreign = parts - s->parts_foreign;
        
    /* Is the parts buffer large enough? */
    if ( s->nr_parts_foreign > s->size_parts_foreign )
        error( "Foreign parts buffer too small." );
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
        
    /* Free the pcell buffer. */
    free( pcells );
    
#else
    error( "SWIFT was not compiled with MPI support." );
#endif

    }


/**
 * @brief Exchange straying parts with other nodes.
 *
 * @param e The #engine.
822
 * @param offset The index in the parts array as of which the foreign parts reside.
823
824
825
826
827
828
 * @param ind The ID of the foreign #cell.
 * @param N The number of stray parts.
 *
 * @return The number of arrived parts copied to parts and xparts.
 */
 
829
int engine_exchange_strays ( struct engine *e , int offset , int *ind , int N ) {
830
831
832

#ifdef WITH_MPI

833
    int k, pid, count = 0, nr_in = 0, nr_out = 0;
Pedro Gonnet's avatar
Pedro Gonnet committed
834
835
    MPI_Request reqs_in[ 2*engine_maxproxies ];
    MPI_Request reqs_out[ 2*engine_maxproxies ];
836
837
    MPI_Status status;
    struct proxy *p;
838
    struct space *s = e->s;
839
840
841
842
843
844
845

    /* Re-set the proxies. */
    for ( k = 0 ; k < e->nr_proxies ; k++ )
        e->proxies[k].nr_parts_out = 0;
    
    /* Put the parts into the corresponding proxies. */
    for ( k = 0 ; k < N ; k++ ) {
846
847
848
849
        int node_id = e->s->cells[ ind[k] ].nodeID;
        if (node_id < 0 || node_id >= e->nr_nodes)
          error("Bad node ID %i.", node_id);
        pid = e->proxy_ind[ node_id ];
850
        if ( pid < 0 )
851
            error( "Do not have a proxy for the requested nodeID %i.", node_id );
852
        proxy_parts_load( &e->proxies[pid] , &s->parts[offset + k] , &s->xparts[offset + k] , 1 );
853
854
855
856
857
        }
    
    /* Launch the proxies. */
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
        proxy_parts_exch1( &e->proxies[k] );
858
        reqs_in[k] = e->proxies[k].req_parts_count_in;
Pedro Gonnet's avatar
Pedro Gonnet committed
859
        reqs_out[k] = e->proxies[k].req_parts_count_out;
860
861
862
863
        }
        
    /* Wait for each count to come in and start the recv. */
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
864
        if ( MPI_Waitany( e->nr_proxies , reqs_in , &pid , &status ) != MPI_SUCCESS ||
865
866
867
868
869
870
             pid == MPI_UNDEFINED )
            error( "MPI_Waitany failed." );
        // message( "request from proxy %i has arrived." , pid );
        proxy_parts_exch2( &e->proxies[pid] );
        }
        
Pedro Gonnet's avatar
Pedro Gonnet committed
871
872
873
874
    /* Wait for all the sends to have finnished too. */
    if ( MPI_Waitall( e->nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
        error( "MPI_Waitall on sends failed." );
        
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
    /* Count the total number of incomming particles and make sure we have
       enough space to accommodate them. */
    int count_in = 0;
    for ( k = 0 ; k < e->nr_proxies ; k++ )
      count_in += e->proxies[k].nr_parts_in;
    message("sent out %i particles, got %i back.", N, count_in);
    if ( offset + count_in > s->size_parts ) {
      s->size_parts = (offset + count_in) * 1.05;
      struct part *parts_new;
      struct xpart *xparts_new;
      if ( posix_memalign( (void **)&parts_new , part_align , sizeof(struct part) * s->size_parts ) != 0 ||
           posix_memalign( (void **)&xparts_new , part_align , sizeof(struct xpart) * s->size_parts ) != 0 )
          error( "Failed to allocate new part data." );
      memcpy( parts_new , s->parts , sizeof(struct part) * offset );
      memcpy( xparts_new , s->xparts , sizeof(struct xpart) * offset );
      free( s->parts );
      free( s->xparts );
      s->parts = parts_new;
      s->xparts = xparts_new;
    }
        
    /* Collect the requests for the particle data from the proxies. */
897
    for ( k = 0 ; k < e->nr_proxies ; k++ ) {
898
        if ( e->proxies[k].nr_parts_in > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
899
900
            reqs_in[2*k] = e->proxies[k].req_parts_in;
            reqs_in[2*k+1] = e->proxies[k].req_xparts_in;
901
902
903
            nr_in += 1;
            }
        else
Pedro Gonnet's avatar
Pedro Gonnet committed
904
            reqs_in[2*k] = reqs_in[2*k+1] = MPI_REQUEST_NULL;
905
        if ( e->proxies[k].nr_parts_out > 0 ) {
Pedro Gonnet's avatar
Pedro Gonnet committed
906
907
            reqs_out[2*k] = e->proxies[k].req_parts_out;
            reqs_out[2*k+1] = e->proxies[k].req_xparts_out;
908
909
910
            nr_out += 1;
            }
        else
Pedro Gonnet's avatar
Pedro Gonnet committed
911
            reqs_out[2*k] = reqs_out[2*k+1] = MPI_REQUEST_NULL;
912
        }
913
914
915
    
    /* Wait for each part array to come in and collect the new
       parts from the proxies. */
916
    for ( k = 0 ; k < 2*(nr_in + nr_out) ; k++ ) {
917
918
        int err;
        if ( ( err = MPI_Waitany( 2*e->nr_proxies , reqs_in , &pid , &status ) ) != MPI_SUCCESS ) {
919
920
921
922
923
            char buff[ MPI_MAX_ERROR_STRING ];
            int res;
            MPI_Error_string( err , buff , &res );
                error( "MPI_Waitany failed (%s)." , buff );
            }
924
925
        if ( pid == MPI_UNDEFINED )
            break;
926
        // message( "request from proxy %i has arrived." , pid );
Pedro Gonnet's avatar
Pedro Gonnet committed
927
928
929
        if ( reqs_in[pid & ~1] == MPI_REQUEST_NULL &&
             reqs_in[pid | 1 ] == MPI_REQUEST_NULL ) {
            p = &e->proxies[pid/2];
930
931
            memcpy( &s->parts[offset + count] , p->parts_in , sizeof(struct part) * p->nr_parts_in );
            memcpy( &s->xparts[offset + count] , p->xparts_in , sizeof(struct xpart) * p->nr_parts_in );
Pedro Gonnet's avatar
Pedro Gonnet committed
932
933
934
935
936
937
            count += p->nr_parts_in;
            /* for ( int k = 0 ; k < p->nr_parts_in ; k++ )
                message( "received particle %lli, x=[%.3e %.3e %.3e], h=%.3e, from node %i." ,
                    p->parts_in[k].id , p->parts_in[k].x[0] , p->parts_in[k].x[1] , p->parts_in[k].x[2] ,
                    p->parts_in[k].h , p->nodeID ); */
            }
938
939
        }
    
940
    /* Wait for all the sends to have finnished too. */
941
    if ( nr_out > 0 )
Pedro Gonnet's avatar
Pedro Gonnet committed
942
        if ( MPI_Waitall( 2*e->nr_proxies , reqs_out , MPI_STATUSES_IGNORE ) != MPI_SUCCESS )
943
            error( "MPI_Waitall on sends failed." );
944
        
945
946
947
948
949
950
951
952
953
954
955
    /* Return the number of harvested parts. */
    return count;
    
#else
    error( "SWIFT was not compiled with MPI support." );
    return 0;
#endif

    }


956
/**
957
 * @brief Fill the #space's task list.
958
 *
959
 * @param e The #engine we are working with.
960
961
 */
 
962
void engine_maketasks ( struct engine *e ) {
963
964

    struct space *s = e->s;
965
    struct scheduler *sched = &e->sched;
966
967
968
    struct cell *cells = s->cells;
    int nr_cells = s->nr_cells;
    int nodeID = e->nodeID;
969
970
971
972
973
974
    int i, j, k, ii, jj, kk, iii, jjj, kkk, cid, cjd, sid;
    int *cdim = s->cdim;
    struct task *t, *t2;
    struct cell *ci, *cj;

    /* Re-set the scheduler. */
975
    scheduler_reset( sched , s->tot_cells * engine_maxtaskspercell );
976
977
978
979
980
981
    
    /* Run through the highest level of cells and add pairs. */
    for ( i = 0 ; i < cdim[0] ; i++ )
        for ( j = 0 ; j < cdim[1] ; j++ )
            for ( k = 0 ; k < cdim[2] ; k++ ) {
                cid = cell_getid( cdim , i , j , k );
982
                if ( cells[cid].count == 0 )
983
                    continue;
984
                ci = &cells[cid];
985
986
                if ( ci->count == 0 )
                    continue;
987
                if ( ci->nodeID == nodeID )
988
                    scheduler_addtask( sched , task_type_self , task_subtype_density , 0 , 0 , ci , NULL , 0 );
989
990
991
992
993
994
995
996
997
998
999
1000
                for ( ii = -1 ; ii < 2 ; ii++ ) {
                    iii = i + ii;
                    if ( !s->periodic && ( iii < 0 || iii >= cdim[0] ) )
                        continue;
                    iii = ( iii + cdim[0] ) % cdim[0];
                    for ( jj = -1 ; jj < 2 ; jj++ ) {
                        jjj = j + jj;
                        if ( !s->periodic && ( jjj < 0 || jjj >= cdim[1] ) )
                            continue;
                        jjj = ( jjj + cdim[1] ) % cdim[1];
                        for ( kk = -1 ; kk < 2 ; kk++ ) {
                            kkk = k + kk;
For faster browsing, not all history is shown. View entire blame