space.c 46.6 KB
Newer Older
1
/*******************************************************************************
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/
Pedro Gonnet's avatar
Pedro Gonnet committed
23
24
25
26
27
28
29
30

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <float.h>
#include <limits.h>
#include <math.h>
31
#include <stdlib.h>
32
#include <string.h>
Pedro Gonnet's avatar
Pedro Gonnet committed
33

34
35
/* MPI headers. */
#ifdef WITH_MPI
36
#include <mpi.h>
37
38
#endif

39
40
41
/* This object's header. */
#include "space.h"

Pedro Gonnet's avatar
Pedro Gonnet committed
42
/* Local headers. */
43
#include "atomic.h"
44
#include "engine.h"
45
#include "error.h"
46
#include "kernel_hydro.h"
47
#include "lock.h"
48
#include "minmax.h"
49
#include "runner.h"
50
#include "threadpool.h"
51
#include "tools.h"
Pedro Gonnet's avatar
Pedro Gonnet committed
52
53
54

/* Split size. */
int space_splitsize = space_splitsize_default;
55
int space_subsize = space_subsize_default;
56
int space_maxsize = space_maxsize_default;
Pedro Gonnet's avatar
Pedro Gonnet committed
57
58
59

/* Map shift vector to sortlist. */
const int sortlistID[27] = {
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
    /* ( -1 , -1 , -1 ) */ 0,
    /* ( -1 , -1 ,  0 ) */ 1,
    /* ( -1 , -1 ,  1 ) */ 2,
    /* ( -1 ,  0 , -1 ) */ 3,
    /* ( -1 ,  0 ,  0 ) */ 4,
    /* ( -1 ,  0 ,  1 ) */ 5,
    /* ( -1 ,  1 , -1 ) */ 6,
    /* ( -1 ,  1 ,  0 ) */ 7,
    /* ( -1 ,  1 ,  1 ) */ 8,
    /* (  0 , -1 , -1 ) */ 9,
    /* (  0 , -1 ,  0 ) */ 10,
    /* (  0 , -1 ,  1 ) */ 11,
    /* (  0 ,  0 , -1 ) */ 12,
    /* (  0 ,  0 ,  0 ) */ 0,
    /* (  0 ,  0 ,  1 ) */ 12,
    /* (  0 ,  1 , -1 ) */ 11,
    /* (  0 ,  1 ,  0 ) */ 10,
    /* (  0 ,  1 ,  1 ) */ 9,
    /* (  1 , -1 , -1 ) */ 8,
    /* (  1 , -1 ,  0 ) */ 7,
    /* (  1 , -1 ,  1 ) */ 6,
    /* (  1 ,  0 , -1 ) */ 5,
    /* (  1 ,  0 ,  0 ) */ 4,
    /* (  1 ,  0 ,  1 ) */ 3,
    /* (  1 ,  1 , -1 ) */ 2,
    /* (  1 ,  1 ,  0 ) */ 1,
    /* (  1 ,  1 ,  1 ) */ 0};

88
89
90
91
92
93
94
95
96
97
98
99
/**
 * @brief Get the shift-id of the given pair of cells, swapping them
 *      if need be.
 *
 * @param s The space
 * @param ci Pointer to first #cell.
 * @param cj Pointer second #cell.
 * @param shift Vector from ci to cj.
 *
 * @return The shift ID and set shift, may or may not swap ci and cj.
 */

100
101
102
103
int space_getsid(struct space *s, struct cell **ci, struct cell **cj,
                 double *shift) {

  /* Get the relative distance between the pairs, wrapping. */
104
105
106
  const int periodic = s->periodic;
  double dx[3];
  for (int k = 0; k < 3; k++) {
107
108
109
110
111
112
113
114
115
116
117
    dx[k] = (*cj)->loc[k] - (*ci)->loc[k];
    if (periodic && dx[k] < -s->dim[k] / 2)
      shift[k] = s->dim[k];
    else if (periodic && dx[k] > s->dim[k] / 2)
      shift[k] = -s->dim[k];
    else
      shift[k] = 0.0;
    dx[k] += shift[k];
  }

  /* Get the sorting index. */
118
  int sid = 0;
119
  for (int k = 0; k < 3; k++)
120
121
122
123
    sid = 3 * sid + ((dx[k] < 0.0) ? 0 : ((dx[k] > 0.0) ? 2 : 1));

  /* Switch the cells around? */
  if (runner_flip[sid]) {
124
    struct cell *temp = *ci;
125
126
    *ci = *cj;
    *cj = temp;
127
    for (int k = 0; k < 3; k++) shift[k] = -shift[k];
128
129
130
131
132
133
  }
  sid = sortlistID[sid];

  /* Return the sort ID. */
  return sid;
}
134

135
/**
136
 * @brief Recursively dismantle a cell tree.
137
138
 *
 */
139
140
141
142

void space_rebuild_recycle(struct space *s, struct cell *c) {

  if (c->split)
143
    for (int k = 0; k < 8; k++)
144
145
146
147
148
149
150
      if (c->progeny[k] != NULL) {
        space_rebuild_recycle(s, c->progeny[k]);
        space_recycle(s, c->progeny[k]);
        c->progeny[k] = NULL;
      }
}

151
/**
152
 * @brief Re-build the cell grid.
153
 *
154
155
 * @param s The #space.
 * @param cell_max Maximum cell edge length.
156
 * @param verbose Print messages to stdout or not.
157
 */
158

159
void space_regrid(struct space *s, double cell_max, int verbose) {
160

161
  const size_t nr_parts = s->nr_parts;
162
  struct cell *restrict c;
163
  ticks tic = getticks();
164
165
166

  /* Run through the parts and get the current h_max. */
  // tic = getticks();
167
  float h_max = s->cell_min / kernel_gamma / space_stretch;
168
  if (nr_parts > 0) {
169
    if (s->cells != NULL) {
Tom Theuns's avatar
Tom Theuns committed
170
      for (int k = 0; k < s->nr_cells; k++) {
Matthieu Schaller's avatar
Matthieu Schaller committed
171
        if (s->cells[k].h_max > h_max) h_max = s->cells[k].h_max;
172
173
      }
    } else {
174
      for (size_t k = 0; k < nr_parts; k++) {
Matthieu Schaller's avatar
Matthieu Schaller committed
175
        if (s->parts[k].h > h_max) h_max = s->parts[k].h;
176
177
      }
      s->h_max = h_max;
178
179
180
181
182
183
184
185
186
187
    }
  }

/* If we are running in parallel, make sure everybody agrees on
   how large the largest cell should be. */
#ifdef WITH_MPI
  {
    float buff;
    if (MPI_Allreduce(&h_max, &buff, 1, MPI_FLOAT, MPI_MAX, MPI_COMM_WORLD) !=
        MPI_SUCCESS)
188
      error("Failed to aggregate the rebuild flag across nodes.");
189
190
191
    h_max = buff;
  }
#endif
192
  if (verbose) message("h_max is %.3e (cell_max=%.3e).", h_max, cell_max);
193
194

  /* Get the new putative cell dimensions. */
195
  int cdim[3];
196
  for (int k = 0; k < 3; k++)
197
198
199
200
201
202
203
204
205
    cdim[k] =
        floor(s->dim[k] / fmax(h_max * kernel_gamma * space_stretch, cell_max));

  /* Check if we have enough cells for periodicity. */
  if (s->periodic && (cdim[0] < 3 || cdim[1] < 3 || cdim[2] < 3))
    error(
        "Must have at least 3 cells in each spatial dimension when periodicity "
        "is switched on.");

206
207
208
/* In MPI-Land, changing the top-level cell size requires that the
 * global partition is recomputed and the particles redistributed.
 * Be prepared to do that. */
209
#ifdef WITH_MPI
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  double oldh[3];
  double oldcdim[3];
  int *oldnodeIDs = NULL;
  if (cdim[0] < s->cdim[0] || cdim[1] < s->cdim[1] || cdim[2] < s->cdim[2]) {

    /* Capture state of current space. */
    oldcdim[0] = s->cdim[0];
    oldcdim[1] = s->cdim[1];
    oldcdim[2] = s->cdim[2];
    oldh[0] = s->h[0];
    oldh[1] = s->h[1];
    oldh[2] = s->h[2];

    if ((oldnodeIDs = (int *)malloc(sizeof(int) * s->nr_cells)) == NULL)
      error("Failed to allocate temporary nodeIDs.");

    int cid = 0;
    for (int i = 0; i < s->cdim[0]; i++) {
      for (int j = 0; j < s->cdim[1]; j++) {
        for (int k = 0; k < s->cdim[2]; k++) {
          cid = cell_getid(oldcdim, i, j, k);
          oldnodeIDs[cid] = s->cells[cid].nodeID;
        }
      }
    }
  }

237
238
239
240
241
242
243
244
245
#endif

  /* Do we need to re-build the upper-level cells? */
  // tic = getticks();
  if (s->cells == NULL || cdim[0] < s->cdim[0] || cdim[1] < s->cdim[1] ||
      cdim[2] < s->cdim[2]) {

    /* Free the old cells, if they were allocated. */
    if (s->cells != NULL) {
246
      for (int k = 0; k < s->nr_cells; k++) {
247
248
249
250
251
252
253
254
        space_rebuild_recycle(s, &s->cells[k]);
        if (s->cells[k].sort != NULL) free(s->cells[k].sort);
      }
      free(s->cells);
      s->maxdepth = 0;
    }

    /* Set the new cell dimensions only if smaller. */
255
    for (int k = 0; k < 3; k++) {
256
257
258
259
      s->cdim[k] = cdim[k];
      s->h[k] = s->dim[k] / cdim[k];
      s->ih[k] = 1.0 / s->h[k];
    }
260
    const float dmin = fminf(s->h[0], fminf(s->h[1], s->h[2]));
261
262
263
264
265
266
267

    /* Allocate the highest level of cells. */
    s->tot_cells = s->nr_cells = cdim[0] * cdim[1] * cdim[2];
    if (posix_memalign((void *)&s->cells, 64,
                       s->nr_cells * sizeof(struct cell)) != 0)
      error("Failed to allocate cells.");
    bzero(s->cells, s->nr_cells * sizeof(struct cell));
268
    for (int k = 0; k < s->nr_cells; k++)
269
270
271
      if (lock_init(&s->cells[k].lock) != 0) error("Failed to init spinlock.");

    /* Set the cell location and sizes. */
272
273
274
    for (int i = 0; i < cdim[0]; i++)
      for (int j = 0; j < cdim[1]; j++)
        for (int k = 0; k < cdim[2]; k++) {
275
276
277
278
279
280
281
282
283
284
285
286
287
          c = &s->cells[cell_getid(cdim, i, j, k)];
          c->loc[0] = i * s->h[0];
          c->loc[1] = j * s->h[1];
          c->loc[2] = k * s->h[2];
          c->h[0] = s->h[0];
          c->h[1] = s->h[1];
          c->h[2] = s->h[2];
          c->dmin = dmin;
          c->depth = 0;
          c->count = 0;
          c->gcount = 0;
          c->super = c;
          lock_init(&c->lock);
Pedro Gonnet's avatar
Pedro Gonnet committed
288
        }
289
290

    /* Be verbose about the change. */
291
292
293
    if (verbose)
      message("set cell dimensions to [ %i %i %i ].", cdim[0], cdim[1],
              cdim[2]);
294
295
    fflush(stdout);

296
#ifdef WITH_MPI
297
298
299
300
301
    if (oldnodeIDs != NULL) {
      /* We have changed the top-level cell dimension, so need to redistribute
       * cells around the nodes. We repartition using the old space node
       * positions as a grid to resample. */
      if (s->e->nodeID == 0)
302
303
304
        message(
            "basic cell dimensions have increased - recalculating the "
            "global partition.");
305

306
      if (!partition_space_to_space(oldh, oldcdim, oldnodeIDs, s)) {
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

        /* Failed, try another technique that requires no settings. */
        message("Failed to get a new partition, trying less optimal method");
        struct partition initial_partition;
#ifdef HAVE_METIS
        initial_partition.type = INITPART_METIS_NOWEIGHT;
#else
        initial_partition.type = INITPART_VECTORIZE;
#endif
        partition_initial_partition(&initial_partition, s->e->nodeID,
                                    s->e->nr_nodes, s);
      }

      /* Re-distribute the particles to their new nodes. */
      engine_redistribute(s->e);

      /* Make the proxies. */
      engine_makeproxies(s->e);
325

326
327
      /* Finished with these. */
      free(oldnodeIDs);
328
329
    }
#endif
330
  } /* re-build upper-level cells? */
331
332
  // message( "rebuilding upper-level cells took %.3f %s." ,
  // clocks_from_ticks(double)(getticks() - tic), clocks_getunit());
333
334
335
336
337

  /* Otherwise, just clean up the cells. */
  else {

    /* Free the old cells, if they were allocated. */
338
    for (int k = 0; k < s->nr_cells; k++) {
339
340
341
342
343
344
345
346
347
348
349
      space_rebuild_recycle(s, &s->cells[k]);
      s->cells[k].sorts = NULL;
      s->cells[k].nr_tasks = 0;
      s->cells[k].nr_density = 0;
      s->cells[k].nr_force = 0;
      s->cells[k].density = NULL;
      s->cells[k].force = NULL;
      s->cells[k].dx_max = 0.0f;
      s->cells[k].sorted = 0;
      s->cells[k].count = 0;
      s->cells[k].gcount = 0;
Matthieu Schaller's avatar
Matthieu Schaller committed
350
      s->cells[k].init = NULL;
Matthieu Schaller's avatar
Matthieu Schaller committed
351
      s->cells[k].ghost = NULL;
Matthieu Schaller's avatar
Matthieu Schaller committed
352
353
      s->cells[k].drift = NULL;
      s->cells[k].kick = NULL;
354
      s->cells[k].super = &s->cells[k];
355
    }
356
357
    s->maxdepth = 0;
  }
358
359
360
361

  if (verbose)
    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
            clocks_getunit());
362
}
363
364
365
366
367
368

/**
 * @brief Re-build the cells as well as the tasks.
 *
 * @param s The #space in which to update the cells.
 * @param cell_max Maximal cell size.
369
 * @param verbose Print messages to stdout or not
370
371
 *
 */
372

373
void space_rebuild(struct space *s, double cell_max, int verbose) {
374

Matthieu Schaller's avatar
Matthieu Schaller committed
375
  const ticks tic = getticks();
376
377

  /* Be verbose about this. */
378
  // message("re)building space..."); fflush(stdout);
379
380

  /* Re-grid if necessary, or just re-set the cell data. */
381
  space_regrid(s, cell_max, verbose);
382

Pedro Gonnet's avatar
Pedro Gonnet committed
383
384
  size_t nr_parts = s->nr_parts;
  size_t nr_gparts = s->nr_gparts;
385
386
  struct cell *restrict cells = s->cells;

Matthieu Schaller's avatar
Matthieu Schaller committed
387
388
389
  const double ih[3] = {s->ih[0], s->ih[1], s->ih[2]};
  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
390
391
392
393

  /* Run through the particles and get their cell index. */
  // tic = getticks();
  const size_t ind_size = s->size_parts;
394
395
  int *ind;
  if ((ind = (int *)malloc(sizeof(int) * ind_size)) == NULL)
396
    error("Failed to allocate temporary particle indices.");
Pedro Gonnet's avatar
Pedro Gonnet committed
397
  for (size_t k = 0; k < nr_parts; k++) {
398
399
    struct part *restrict p = &s->parts[k];
    for (int j = 0; j < 3; j++)
400
401
402
403
      if (p->x[j] < 0.0)
        p->x[j] += dim[j];
      else if (p->x[j] >= dim[j])
        p->x[j] -= dim[j];
404
    ind[k] =
405
        cell_getid(cdim, p->x[0] * ih[0], p->x[1] * ih[1], p->x[2] * ih[2]);
406
    cells[ind[k]].count++;
407
  }
Pedro Gonnet's avatar
Pedro Gonnet committed
408
409
  // message( "getting particle indices took %.3f %s." ,
  // clocks_from_ticks(getticks() - tic), clocks_getunit()):
410

411
412
413
414
415
416
417
  /* Run through the gravity particles and get their cell index. */
  // tic = getticks();
  const size_t gind_size = s->size_gparts;
  int *gind;
  if ((gind = (int *)malloc(sizeof(int) * gind_size)) == NULL)
    error("Failed to allocate temporary g-particle indices.");
  for (int k = 0; k < nr_gparts; k++) {
Matthieu Schaller's avatar
Matthieu Schaller committed
418
    struct gpart *restrict gp = &s->gparts[k];
419
420
421
422
423
424
425
426
427
428
429
430
    for (int j = 0; j < 3; j++)
      if (gp->x[j] < 0.0)
        gp->x[j] += dim[j];
      else if (gp->x[j] >= dim[j])
        gp->x[j] -= dim[j];
    gind[k] =
        cell_getid(cdim, gp->x[0] * ih[0], gp->x[1] * ih[1], gp->x[2] * ih[2]);
    cells[gind[k]].gcount++;
  }
// message( "getting particle indices took %.3f %s." ,
// clocks_from_ticks(getticks() - tic), clocks_getunit());

431
432
#ifdef WITH_MPI
  /* Move non-local parts to the end of the list. */
433
  const int local_nodeID = s->e->nodeID;
434
  for (size_t k = 0; k < nr_parts;) {
435
    if (cells[ind[k]].nodeID != local_nodeID) {
436
437
      cells[ind[k]].count -= 1;
      nr_parts -= 1;
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
438
      const struct part tp = s->parts[k];
439
440
      s->parts[k] = s->parts[nr_parts];
      s->parts[nr_parts] = tp;
441
442
443
444
445
446
      if (s->parts[k].gpart != NULL) {
        s->parts[k].gpart->part = &s->parts[k];
      }
      if (s->parts[nr_parts].gpart != NULL) {
        s->parts[nr_parts].gpart->part = &s->parts[nr_parts];
      }
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
447
      const struct xpart txp = s->xparts[k];
448
449
      s->xparts[k] = s->xparts[nr_parts];
      s->xparts[nr_parts] = txp;
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
450
      const int t = ind[k];
451
452
      ind[k] = ind[nr_parts];
      ind[nr_parts] = t;
Matthieu Schaller's avatar
Matthieu Schaller committed
453
    } else {
454
455
456
457
      /* Increment when not exchanging otherwise we need to retest "k".*/
      k++;
    }
  }
458

Peter W. Draper's avatar
Peter W. Draper committed
459
  /* Check that all parts are in the correct places. */
460
461
462
463
464
465
466
467
468
469
470
  /*  for (size_t k = 0; k < nr_parts; k++) {
    if (cells[ind[k]].nodeID != local_nodeID) {
      error("Failed to move all non-local parts to send list");
    }
  }
  for (size_t k = nr_parts; k < s->nr_parts; k++) {
    if (cells[ind[k]].nodeID == local_nodeID) {
      error("Failed to remove local parts from send list");
    }
  }*/

471
  /* Move non-local gparts to the end of the list. */
472
  for (int k = 0; k < nr_gparts;) {
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
473
474
    if (cells[gind[k]].nodeID != local_nodeID) {
      cells[gind[k]].gcount -= 1;
475
      nr_gparts -= 1;
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
476
      const struct gpart tp = s->gparts[k];
477
478
      s->gparts[k] = s->gparts[nr_gparts];
      s->gparts[nr_gparts] = tp;
479
480
481
482
483
484
      if (s->gparts[k].id > 0) {
        s->gparts[k].part->gpart = &s->gparts[k];
      }
      if (s->gparts[nr_gparts].id > 0) {
        s->gparts[nr_gparts].part->gpart = &s->gparts[nr_gparts];
      }
Matthieu Schaller's avatar
Bug fix    
Matthieu Schaller committed
485
486
487
      const int t = gind[k];
      gind[k] = gind[nr_gparts];
      gind[nr_gparts] = t;
Matthieu Schaller's avatar
Matthieu Schaller committed
488
    } else {
489
490
491
492
      /* Increment when not exchanging otherwise we need to retest "k".*/
      k++;
    }
  }
493

494
495
496
497
498
499
500
501
502
503
504
505
506
  /* Check that all gparts are in the correct place (untested). */
  /*
  for (size_t k = 0; k < nr_gparts; k++) {
    if (cells[gind[k]].nodeID != local_nodeID) {
      error("Failed to move all non-local gparts to send list");
    }
  }
  for (size_t k = nr_gparts; k < s->nr_gparts; k++) {
    if (cells[gind[k]].nodeID == local_nodeID) {
      error("Failed to remove local gparts from send list");
    }
  }*/

507
508
  /* Exchange the strays, note that this potentially re-allocates
     the parts arrays. */
509
  size_t nr_parts_exchanged = s->nr_parts - nr_parts;
510
  size_t nr_gparts_exchanged = s->nr_gparts - nr_gparts;
Pedro Gonnet's avatar
Pedro Gonnet committed
511
512
513
514
  engine_exchange_strays(s->e, nr_parts, &ind[nr_parts], &nr_parts_exchanged,
                         nr_gparts, &gind[nr_gparts], &nr_gparts_exchanged);

  /* Set the new particle counts. */
515
  s->nr_parts = nr_parts + nr_parts_exchanged;
516
  s->nr_gparts = nr_gparts + nr_gparts_exchanged;
517
518

  /* Re-allocate the index array if needed.. */
519
  if (s->nr_parts > ind_size) {
520
521
    int *ind_new;
    if ((ind_new = (int *)malloc(sizeof(int) * s->nr_parts)) == NULL)
522
      error("Failed to allocate temporary particle indices.");
523
    memcpy(ind_new, ind, sizeof(int) * nr_parts);
524
525
    free(ind);
    ind = ind_new;
526
527
528
  }

  /* Assign each particle to its cell. */
Pedro Gonnet's avatar
Pedro Gonnet committed
529
  for (size_t k = nr_parts; k < s->nr_parts; k++) {
Matthieu Schaller's avatar
Matthieu Schaller committed
530
    const struct part *const p = &s->parts[k];
531
    ind[k] =
532
        cell_getid(cdim, p->x[0] * ih[0], p->x[1] * ih[1], p->x[2] * ih[2]);
533
534
535
536
    cells[ind[k]].count += 1;
    /* if ( cells[ ind[k] ].nodeID != nodeID )
        error( "Received part that does not belong to me (nodeID=%i)." , cells[
       ind[k] ].nodeID ); */
537
  }
538
  nr_parts = s->nr_parts;
539
540
541
#endif

  /* Sort the parts according to their cells. */
542
  space_parts_sort(s, ind, nr_parts, 0, s->nr_cells - 1, verbose);
543
544

  /* Re-link the gparts. */
Pedro Gonnet's avatar
Pedro Gonnet committed
545
  for (size_t k = 0; k < nr_parts; k++)
546
    if (s->parts[k].gpart != NULL) s->parts[k].gpart->part = &s->parts[k];
547

548
  /* Verify sort_struct. */
549
  /* for ( k = 1 ; k < nr_parts ; k++ ) {
550
      if ( ind[k-1] > ind[k] ) {
551
552
          error( "Sort failed!" );
          }
553
      else if ( ind[k] != cell_getid( cdim , parts[k].x[0]*ih[0] ,
554
555
556
557
558
     parts[k].x[1]*ih[1] , parts[k].x[2]*ih[2] ) )
          error( "Incorrect indices!" );
      } */

  /* We no longer need the indices as of here. */
559
  free(ind);
560

561
562
563
564
#ifdef WITH_MPI

  /* Re-allocate the index array if needed.. */
  if (s->nr_gparts > gind_size) {
565
566
    int *gind_new;
    if ((gind_new = (int *)malloc(sizeof(int) * s->nr_gparts)) == NULL)
567
      error("Failed to allocate temporary g-particle indices.");
568
    memcpy(gind_new, gind, sizeof(int) * nr_gparts);
569
570
571
572
573
    free(gind);
    gind = gind_new;
  }

  /* Assign each particle to its cell. */
574
  for (int k = nr_gparts; k < s->nr_gparts; k++) {
Matthieu Schaller's avatar
Matthieu Schaller committed
575
    const struct gpart *const p = &s->gparts[k];
576
577
    gind[k] =
        cell_getid(cdim, p->x[0] * ih[0], p->x[1] * ih[1], p->x[2] * ih[2]);
Matthieu Schaller's avatar
Typo    
Matthieu Schaller committed
578
    cells[gind[k]].gcount += 1;
579
580
581
582
583
    /* if ( cells[ ind[k] ].nodeID != nodeID )
        error( "Received part that does not belong to me (nodeID=%i)." , cells[
       ind[k] ].nodeID ); */
  }
  nr_gparts = s->nr_gparts;
584

585
#endif
586
587

  /* Sort the parts according to their cells. */
Matthieu Schaller's avatar
Matthieu Schaller committed
588
  space_gparts_sort(s, gind, nr_gparts, 0, s->nr_cells - 1, verbose);
589
590

  /* Re-link the parts. */
591
  for (int k = 0; k < nr_gparts; k++)
592
    if (s->gparts[k].id > 0) s->gparts[k].part->gpart = &s->gparts[k];
593
594

  /* We no longer need the indices as of here. */
595
  free(gind);
596

597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
  /* Verify that the links are correct */
  /* MATTHIEU: To be commented out once we are happy */
  for (size_t k = 0; k < nr_gparts; ++k) {

    if (s->gparts[k].id > 0) {

      if (s->gparts[k].part->gpart != &s->gparts[k]) error("Linking problem !");

      if (s->gparts[k].x[0] != s->gparts[k].part->x[0] ||
          s->gparts[k].x[1] != s->gparts[k].part->x[1] ||
          s->gparts[k].x[2] != s->gparts[k].part->x[2])
        error("Linked particles are not at the same position !");
    }
  }
  for (size_t k = 0; k < nr_parts; ++k) {

    if (s->parts[k].gpart != NULL) {

      if (s->parts[k].gpart->part != &s->parts[k]) error("Linking problem !");
    }
  }

619
620
  /* Hook the cells up to the parts. */
  // tic = getticks();
621
622
623
  struct part *finger = s->parts;
  struct xpart *xfinger = s->xparts;
  struct gpart *gfinger = s->gparts;
624
625
  for (int k = 0; k < s->nr_cells; k++) {
    struct cell *restrict c = &cells[k];
626
627
628
629
630
631
632
    c->parts = finger;
    c->xparts = xfinger;
    c->gparts = gfinger;
    finger = &finger[c->count];
    xfinger = &xfinger[c->count];
    gfinger = &gfinger[c->gcount];
  }
633
  // message( "hooking up cells took %.3f %s." ,
Matthieu Schaller's avatar
Matthieu Schaller committed
634
  // clocks_from_ticks(getticks() - tic), clocks_getunit());
635
636
637

  /* At this point, we have the upper-level cells, old or new. Now make
     sure that the parts in each cell are ok. */
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
  space_split(s, cells, verbose);

  if (verbose)
    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
            clocks_getunit());
}

/**
 * @brief Split particles between cells of a hierarchy
 *
 * @param s The #space.
 * @param cells The cell hierarchy
 * @param verbose Are we talkative ?
 */
void space_split(struct space *s, struct cell *cells, int verbose) {

Matthieu Schaller's avatar
Matthieu Schaller committed
654
  const ticks tic = getticks();
655

656
657
  threadpool_map(&s->e->threadpool, space_split_mapper, cells, s->nr_cells,
                 sizeof(struct cell), s);
658

659
660
661
  if (verbose)
    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
            clocks_getunit());
662
}
663

664
/**
665
666
 * @brief Sort the particles and condensed particles according to the given
 *indices.
667
 *
Matthieu Schaller's avatar
Matthieu Schaller committed
668
 * @param s The #space.
669
670
671
672
 * @param ind The indices with respect to which the parts are sorted.
 * @param N The number of parts
 * @param min Lowest index.
 * @param max highest index.
673
 * @param verbose Are we talkative ?
674
 */
675

676
void space_parts_sort(struct space *s, int *ind, size_t N, int min, int max,
677
678
                      int verbose) {

Matthieu Schaller's avatar
Matthieu Schaller committed
679
  const ticks tic = getticks();
680

681
682
683
684
685
686
687
688
  /* Populate a parallel_sort structure with the input data */
  struct parallel_sort sort_struct;
  sort_struct.parts = s->parts;
  sort_struct.xparts = s->xparts;
  sort_struct.ind = ind;
  sort_struct.stack_size = 2 * (max - min + 1) + 10 + s->e->nr_threads;
  if ((sort_struct.stack = malloc(sizeof(struct qstack) *
                                        sort_struct.stack_size)) == NULL)
689
    error("Failed to allocate sorting stack.");
690
691
  for (int i = 0; i < sort_struct.stack_size; i++)
    sort_struct.stack[i].ready = 0;
692

693
  /* Add the first interval. */
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
  sort_struct.stack[0].i = 0;
  sort_struct.stack[0].j = N - 1;
  sort_struct.stack[0].min = min;
  sort_struct.stack[0].max = max;
  sort_struct.stack[0].ready = 1;
  sort_struct.first = 0;
  sort_struct.last = 1;
  sort_struct.waiting = 1;

  /* Launch the sorting tasks with a stride of zero such that the same
     map data is passed to each thread. */
  threadpool_map(&s->e->threadpool, space_parts_sort_mapper,
    &sort_struct, s->e->threadpool.num_threads, 0, NULL);

  /* Verify sort_struct. */
709
  /* for (int i = 1; i < N; i++)
710
    if (ind[i - 1] > ind[i])
711
712
      error("Sorting failed (ind[%i]=%i,ind[%i]=%i), min=%i, max=%i.", i - 1,
  ind[i - 1], i,
713
714
            ind[i], min, max);
  message("Sorting succeeded."); */
715

716
  /* Clean up. */
717
  free(sort_struct.stack);
718
719
720
721

  if (verbose)
    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
            clocks_getunit());
722
}
723

724
725
726
727
void space_parts_sort_mapper(void *map_data, void *extra_data) {

  /* Unpack the mapping data. */
  struct parallel_sort *sort_struct = (struct parallel_sort *)map_data;
728

729
  /* Pointers to the sorting data. */
730
731
732
  int *ind = sort_struct->ind;
  struct part *parts = sort_struct->parts;
  struct xpart *xparts = sort_struct->xparts;
733

734
  /* Main loop. */
735
  while (sort_struct->waiting) {
736

737
    /* Grab an interval off the queue. */
738
    int qid =
739
        atomic_inc(&sort_struct->first) % sort_struct->stack_size;
740

741
    /* Wait for the entry to be ready, or for the sorting do be done. */
742
743
    while (!sort_struct->stack[qid].ready)
      if (!sort_struct->waiting) return;
744

745
    /* Get the stack entry. */
746
747
748
749
750
    ptrdiff_t i = sort_struct->stack[qid].i;
    ptrdiff_t j = sort_struct->stack[qid].j;
    int min = sort_struct->stack[qid].min;
    int max = sort_struct->stack[qid].max;
    sort_struct->stack[qid].ready = 0;
751

752
753
    /* Loop over sub-intervals. */
    while (1) {
754

755
      /* Bring beer. */
756
      const int pivot = (min + max) / 2;
757
758
      /* message("Working on interval [%i,%i] with min=%i, max=%i, pivot=%i.",
              i, j, min, max, pivot); */
759
760

      /* One pass of QuickSort's partitioning. */
761
762
      ptrdiff_t ii = i;
      ptrdiff_t jj = j;
763
764
765
766
      while (ii < jj) {
        while (ii <= j && ind[ii] <= pivot) ii++;
        while (jj >= i && ind[jj] > pivot) jj--;
        if (ii < jj) {
767
          size_t temp_i = ind[ii];
768
769
          ind[ii] = ind[jj];
          ind[jj] = temp_i;
770
          struct part temp_p = parts[ii];
771
772
          parts[ii] = parts[jj];
          parts[jj] = temp_p;
773
          struct xpart temp_xp = xparts[ii];
774
775
776
777
          xparts[ii] = xparts[jj];
          xparts[jj] = temp_xp;
        }
      }
778

779
      /* Verify sort_struct. */
780
781
782
783
784
785
786
787
788
789
790
791
      /* for (int k = i; k <= jj; k++)
        if (ind[k] > pivot) {
          message("sorting failed at k=%i, ind[k]=%i, pivot=%i, i=%i, j=%i.", k,
                  ind[k], pivot, i, j);
          error("Partition failed (<=pivot).");
        }
      for (int k = jj + 1; k <= j; k++)
        if (ind[k] <= pivot) {
          message("sorting failed at k=%i, ind[k]=%i, pivot=%i, i=%i, j=%i.", k,
                  ind[k], pivot, i, j);
          error("Partition failed (>pivot).");
        } */
792
793
794
795
796
797

      /* Split-off largest interval. */
      if (jj - i > j - jj + 1) {

        /* Recurse on the left? */
        if (jj > i && pivot > min) {
798
799
800
          qid = atomic_inc(&sort_struct->last) %
                sort_struct->stack_size;
          while (sort_struct->stack[qid].ready)
801
            ;
802
803
804
805
806
807
          sort_struct->stack[qid].i = i;
          sort_struct->stack[qid].j = jj;
          sort_struct->stack[qid].min = min;
          sort_struct->stack[qid].max = pivot;
          if (atomic_inc(&sort_struct->waiting) >=
              sort_struct->stack_size)
808
            error("Qstack overflow.");
809
          sort_struct->stack[qid].ready = 1;
810
        }
811

812
813
814
815
816
817
818
819
820
821
        /* Recurse on the right? */
        if (jj + 1 < j && pivot + 1 < max) {
          i = jj + 1;
          min = pivot + 1;
        } else
          break;

      } else {

        /* Recurse on the right? */
822
        if (pivot + 1 < max) {
823
824
825
          qid = atomic_inc(&sort_struct->last) %
                sort_struct->stack_size;
          while (sort_struct->stack[qid].ready)
826
            ;
827
828
829
830
831
832
          sort_struct->stack[qid].i = jj + 1;
          sort_struct->stack[qid].j = j;
          sort_struct->stack[qid].min = pivot + 1;
          sort_struct->stack[qid].max = max;
          if (atomic_inc(&sort_struct->waiting) >=
              sort_struct->stack_size)
833
            error("Qstack overflow.");
834
          sort_struct->stack[qid].ready = 1;
835
        }
836

837
838
839
840
841
842
843
        /* Recurse on the left? */
        if (jj > i && pivot > min) {
          j = jj;
          max = pivot;
        } else
          break;
      }
844

845
846
    } /* loop over sub-intervals. */

847
    atomic_dec(&sort_struct->waiting);
848
849

  } /* main loop. */
850
851
}

852
853
854
855
856
/**
 * @brief Sort the g-particles and condensed particles according to the given
 *indices.
 *
 * @param s The #space.
Matthieu Schaller's avatar
Matthieu Schaller committed
857
858
 * @param ind The indices with respect to which the gparts are sorted.
 * @param N The number of gparts
859
860
861
862
 * @param min Lowest index.
 * @param max highest index.
 * @param verbose Are we talkative ?
 */
Matthieu Schaller's avatar
Matthieu Schaller committed
863
void space_gparts_sort(struct space *s, int *ind, size_t N, int min, int max,
864
865
                       int verbose) {

Matthieu Schaller's avatar
Matthieu Schaller committed
866
  const ticks tic = getticks();
867

868
869
870
871
872
873
874
  /*Populate a global parallel_sort structure with the input data */
  struct parallel_sort sort_struct;
  sort_struct.gparts = s->gparts;
  sort_struct.ind = ind;
  sort_struct.stack_size = 2 * (max - min + 1) + 10 + s->e->nr_threads;
  if ((sort_struct.stack = malloc(sizeof(struct qstack) *
                                        sort_struct.stack_size)) == NULL)
875
    error("Failed to allocate sorting stack.");
876
877
  for (int i = 0; i < sort_struct.stack_size; i++)
    sort_struct.stack[i].ready = 0;
878
879

  /* Add the first interval. */
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
  sort_struct.stack[0].i = 0;
  sort_struct.stack[0].j = N - 1;
  sort_struct.stack[0].min = min;
  sort_struct.stack[0].max = max;
  sort_struct.stack[0].ready = 1;
  sort_struct.first = 0;
  sort_struct.last = 1;
  sort_struct.waiting = 1;

  /* Launch the sorting tasks with a stride of zero such that the same
     map data is passed to each thread. */
  threadpool_map(&s->e->threadpool, space_gparts_sort_mapper,
    &sort_struct, s->e->threadpool.num_threads, 0, NULL);

  /* Verify sort_struct. */
895
896
897
898
899
900
901
902
  /* for (int i = 1; i < N; i++)
    if (ind[i - 1] > ind[i])
      error("Sorting failed (ind[%i]=%i,ind[%i]=%i), min=%i, max=%i.", i - 1,
  ind[i - 1], i,
            ind[i], min, max);
  message("Sorting succeeded."); */

  /* Clean up. */
903
  free(sort_struct.stack);
904
905
906
907
908
909

  if (verbose)
    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
            clocks_getunit());
}

910
911
912
913
void space_gparts_sort_mapper(void *map_data, void *extra_data) {

  /* Unpack the mapping data. */
  struct parallel_sort *sort_struct = (struct parallel_sort *)map_data;
914
915

  /* Pointers to the sorting data. */
916
917
  int *ind = sort_struct->ind;
  struct gpart *gparts = sort_struct->gparts;
918

919
  /* Main loop. */
920
  while (sort_struct->waiting) {
921

922
    /* Grab an interval off the queue. */
923
    int qid =
924
        atomic_inc(&sort_struct->first) % sort_struct->stack_size;
925
926

    /* Wait for the entry to be ready, or for the sorting do be done. */
927
928
    while (!sort_struct->stack[qid].ready)
      if (!sort_struct->waiting) return;
929

930
    /* Get the stack entry. */
931
932
933
934
935
    ptrdiff_t i = sort_struct->stack[qid].i;
    ptrdiff_t j = sort_struct->stack[qid].j;
    int min = sort_struct->stack[qid].min;
    int max = sort_struct->stack[qid].max;
    sort_struct->stack[qid].ready = 0;
936
937
938

    /* Loop over sub-intervals. */
    while (1) {
939

940
      /* Bring beer. */
941
942
943
      const int pivot = (min + max) / 2;
      /* message("Working on interval [%i,%i] with min=%i, max=%i, pivot=%i.",
              i, j, min, max, pivot); */
944
945

      /* One pass of QuickSort's partitioning. */
946
947
      ptrdiff_t ii = i;
      ptrdiff_t jj = j;
948
949
950
951
      while (ii < jj) {
        while (ii <= j && ind[ii] <= pivot) ii++;
        while (jj >= i && ind[jj] > pivot) jj--;
        if (ii < jj) {
952
          size_t temp_i = ind[ii];
953
954
          ind[ii] = ind[jj];
          ind[jj] = temp_i;
955
          struct gpart temp_p = gparts[ii];
956
957
958
959
          gparts[ii] = gparts[jj];
          gparts[jj] = temp_p;
        }
      }
960

961
      /* Verify sort_struct. */
962
963
964
965
966
967
968
969
970
971
972
973
      /* for (int k = i; k <= jj; k++)
        if (ind[k] > pivot) {
          message("sorting failed at k=%i, ind[k]=%i, pivot=%i, i=%i, j=%i.", k,
                  ind[k], pivot, i, j);
          error("Partition failed (<=pivot).");
        }
      for (int k = jj + 1; k <= j; k++)
        if (ind[k] <= pivot) {
          message("sorting failed at k=%i, ind[k]=%i, pivot=%i, i=%i, j=%i.", k,
                  ind[k], pivot, i, j);
          error("Partition failed (>pivot).");
        } */
974
975
976
977
978
979

      /* Split-off largest interval. */
      if (jj - i > j - jj + 1) {

        /* Recurse on the left? */
        if (jj > i && pivot > min) {
980
981
982
          qid = atomic_inc(&sort_struct->last) %
                sort_struct->stack_size;
          while (sort_struct->stack[qid].ready)
983
            ;
984
985
986
987
988
989
          sort_struct->stack[qid].i = i;
          sort_struct->stack[qid].j = jj;
          sort_struct->stack[qid].min = min;
          sort_struct->stack[qid].max = pivot;
          if (atomic_inc(&sort_struct->waiting) >=
              sort_struct->stack_size)
990
            error("Qstack overflow.");
991
          sort_struct->stack[qid].ready = 1;
992
        }
993

994
995
996
997
998
999
        /* Recurse on the right? */
        if (jj + 1 < j && pivot + 1 < max) {
          i = jj + 1;
          min = pivot + 1;
        } else
          break;
1000

1001
1002
1003
      } else {

        /* Recurse on the right? */
1004
        if (pivot + 1 < max) {
1005
1006
1007
          qid = atomic_inc(&sort_struct->last) %
                sort_struct->stack_size;
          while (sort_struct->stack[qid].ready)
1008
            ;
1009
1010
1011
1012
1013
1014
          sort_struct->stack[qid].i = jj + 1;
          sort_struct->stack[qid].j = j;
          sort_struct->stack[qid].min = pivot + 1;
          sort_struct->stack[qid].max = max;
          if (atomic_inc(&sort_struct->waiting) >=
              sort_struct->stack_size)
1015
            error("Qstack overflow.");
1016
          sort_struct->stack[qid].ready = 1;
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
        }

        /* Recurse on the left? */
        if (jj > i && pivot > min) {
          j = jj;
          max = pivot;
        } else
          break;
      }

    } /* loop over sub-intervals. */

1029
    atomic_dec(&sort_struct->waiting);
1030
1031

  } /* main loop. */
1032
}
1033

Pedro Gonnet's avatar
Pedro Gonnet committed
1034
/**
1035
 * @brief Mapping function to free the sorted indices buffers.
Pedro Gonnet's avatar
Pedro Gonnet committed
1036
1037
 */

1038
void space_map_clearsort(struct cell *c, void *data) {
Pedro Gonnet's avatar
Pedro Gonnet committed
1039

1040
1041
1042
1043
1044
  if (c->sort != NULL) {
    free(c->sort);
    c->sort = NULL;
  }
}
Pedro Gonnet's avatar
Pedro Gonnet committed
1045

1046
1047
1048
/**
 * @brief Map a function to all particles in a cell recursively.
 *
1049
 * @param c The #cell we are working in.
1050
1051
1052
1053
 * @param fun Function pointer to apply on the cells.
 * @param data Data passed to the function fun.
 */

Pedro Gonnet's avatar
Pedro Gonnet committed
1054
1055
1056
1057
static void rec_map_parts(struct cell *c,
                          void (*fun)(struct part *p, struct cell *c,
                                      void *data),
                          void *data) {
1058
1059
1060
1061
1062