engine_maketasks.c 85.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
/*******************************************************************************
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *                    Angus Lepper (angus.lepper@ed.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdlib.h>
#include <unistd.h>

/* MPI headers. */
#ifdef WITH_MPI
#include <mpi.h>
#endif

/* Load the profiler header, if needed. */
#ifdef WITH_PROFILER
#include <gperftools/profiler.h>
#endif

/* This object's header. */
#include "engine.h"

/* Local headers. */
#include "atomic.h"
#include "cell.h"
#include "clocks.h"
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "proxy.h"
#include "timers.h"

/**
 * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_grav The send_grav #task, if it has already been created.
62
 * @param t_ti The recv_ti_end #task, if it has already been created.
63
64
 */
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
65
66
                                  struct cell *cj, struct task *t_grav,
                                  struct task *t_ti) {
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_grav == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
85
86
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
87
88
89
90

      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
                                 ci->mpi.tag, 0, ci, cj);

91
92
93
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_gpart,
                               ci->mpi.tag, 0, ci, cj);

94
95
96
97
98
      /* The sends should unlock the down pass. */
      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);

      /* Drift before you send */
      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
99
100

      scheduler_addunlock(s, ci->super->timestep, t_ti);
101
102
103
    }

    /* Add them to the local cell. */
104
105
    engine_addlink(e, &ci->mpi.send, t_grav);
    engine_addlink(e, &ci->mpi.send, t_ti);
106
107
108
109
110
111
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
112
        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_ti);
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
 * @param t_rho The send_rho #task, if it has already been created.
 * @param t_gradient The send_gradient #task, if already created.
128
 * @param t_ti The recv_ti_end #task, if it has already been created.
129
130
131
 */
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
132
133
                                struct task *t_rho, struct task *t_gradient,
                                struct task *t_ti) {
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
  for (l = ci->hydro.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_xv == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
152
153
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
154
155
156
157
158

      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
                               0, ci, cj);
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
                                ci->mpi.tag, 0, ci, cj);
159

160
161
162
163
164
#ifdef EXTRA_HYDRO_LOOP
      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
                                     ci->mpi.tag, 0, ci, cj);
#endif

165
166
167
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_part,
                               ci->mpi.tag, 0, ci, cj);

168
169
#ifdef EXTRA_HYDRO_LOOP

170
      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);

      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
       * task. */
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#else
      /* The send_rho task should unlock the super_hydro-cell's kick task. */
186
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
187
188
189
190
191
192
193
194
195

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#endif

196
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_rho);
197

198
199
      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
200
201

      scheduler_addunlock(s, ci->super->timestep, t_ti);
202
203
204
    }

    /* Add them to the local cell. */
205
206
    engine_addlink(e, &ci->mpi.send, t_xv);
    engine_addlink(e, &ci->mpi.send, t_rho);
207
#ifdef EXTRA_HYDRO_LOOP
208
    engine_addlink(e, &ci->mpi.send, t_gradient);
209
#endif
210
    engine_addlink(e, &ci->mpi.send, t_ti);
211
212
213
214
215
216
217
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
218
                                   t_gradient, t_ti);
219
220
221
222
223
224

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

225
226
227
228
229
230
/**
 * @brief Add send tasks for the stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
231
 * @param t_feedback The send_feed #task, if it has already been created.
232
 * @param t_ti The recv_ti_end #task, if it has already been created.
233
234
 */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
235
236
                                struct cell *cj, struct task *t_feedback,
                                struct task *t_ti) {
237
238

#ifdef WITH_MPI
239

240
241
242
243
244
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
245
  for (l = ci->stars.density; l != NULL; l = l->next)
246
    if (l->t->ci->nodeID == nodeID ||
247
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
248
249
250
251
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {
252

253
    if (t_feedback == NULL) {
254

255
256
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
257

258
      /* Create the tasks and their dependencies? */
259
      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
260
261
                                     ci->mpi.tag, 0, ci, cj);

262
263
264
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_spart,
                               ci->mpi.tag, 0, ci, cj);

265
      /* The send_stars task should unlock the super_cell's kick task. */
266
      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
267

268
      /* Ghost before you send */
269
      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
270
271
272

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
273

274
275
276
      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

277
278
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
279
280
281
282
283
284
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
285
        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback, t_ti);
286
287
288
289
290
291
292
293
294
295
296
297
298
299

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 * @param t_gradient The recv_gradient #task, if it has already been created.
300
 * @param t_ti The recv_ti_end #task, if it has already been created.
301
302
303
 */
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
                                struct task *t_xv, struct task *t_rho,
304
                                struct task *t_gradient, struct task *t_ti) {
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any hydro tasks ? */
  if (t_xv == NULL && c->hydro.density != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
                             c, NULL);
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
                              0, c, NULL);
#ifdef EXTRA_HYDRO_LOOP
    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
                                   c->mpi.tag, 0, c, NULL);
#endif
326
327
328

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_part,
                             c->mpi.tag, 0, c, NULL);
329
330
  }

331
332
333
334
335
336
337
  if (t_xv != NULL) {
    engine_addlink(e, &c->mpi.recv, t_xv);
    engine_addlink(e, &c->mpi.recv, t_rho);
#ifdef EXTRA_HYDRO_LOOP
    engine_addlink(e, &c->mpi.recv, t_gradient);
#endif
    engine_addlink(e, &c->mpi.recv, t_ti);
338

339
340
341
342
343
    /* Add dependencies. */
    if (c->hydro.sorts != NULL) {
      scheduler_addunlock(s, t_xv, c->hydro.sorts);
      scheduler_addunlock(s, c->hydro.sorts, t_rho);
    }
344

345
346
347
348
    for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_xv, l->t);
      scheduler_addunlock(s, l->t, t_rho);
    }
349
#ifdef EXTRA_HYDRO_LOOP
350
351
352
353
354
355
356
357
    for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gradient);
    }
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_gradient, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
358
#else
359
360
361
362
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
363
364
#endif

365
366
367
368
369
    /* Make sure the density has been computed before the stars compute theirs.
     */
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
    }
370
  }
371

372
373
374
375
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
376
377
        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient,
                                   t_ti);
378
379
380
381
382
383

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

384
385
386
387
388
/**
 * @brief Add recv tasks for stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
Matthieu Schaller's avatar
Matthieu Schaller committed
389
 * @param t_feedback The recv_feed #task, if it has already been created.
390
 * @param t_ti The recv_ti_end #task, if it has already been created.
391
392
 */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
393
                                struct task *t_feedback, struct task *t_ti) {
394
395
396
397

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

398
399
  /* Have we reached a level where there are any stars tasks ? */
  if (t_feedback == NULL && c->stars.density != NULL) {
400
401
402
403
404
405
406

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
407
    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
408
                                   c->mpi.tag, 0, c, NULL);
409
410
411

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_spart,
                             c->mpi.tag, 0, c, NULL);
412
413
  }

414
  c->mpi.stars.recv = t_feedback;
415
  c->mpi.stars.recv_ti = t_ti;
416

417
#ifdef SWIFT_DEBUG_CHECKS
418
  if (c->nodeID == e->nodeID) error("Local cell!");
419
#endif
420
421
422
  if (c->stars.sorts != NULL)
    scheduler_addunlock(s, t_feedback, c->stars.sorts);

423
  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
424
    scheduler_addunlock(s, l->t, t_feedback);
425
  }
426

427
  for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
428
    scheduler_addunlock(s, t_feedback, l->t);
429
    scheduler_addunlock(s, l->t, t_ti);
430
431
  }

432
433
434
435
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
436
        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_ti);
437
438
439
440
441
442

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

443
444
445
446
447
448
/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_grav The recv_gpart #task, if it has already been created.
449
 * @param t_ti The recv_ti_end #task, if it has already been created.
450
451
 */
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
452
                                  struct task *t_grav, struct task *t_ti) {
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any gravity tasks ? */
  if (t_grav == NULL && c->grav.grav != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
                               c->mpi.tag, 0, c, NULL);
468

469
470
    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_gpart,
                             c->mpi.tag, 0, c, NULL);
471
472
  }

473
474
475
476
  /* If we have tasks, link them. */
  if (t_grav != NULL) {
    engine_addlink(e, &c->mpi.recv, t_grav);
    engine_addlink(e, &c->mpi.recv, t_ti);
477

478
479
480
481
    for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_grav, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
482
  }
483
484
485
486
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
487
        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_ti);
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- timestep version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
509
  const int with_limiter = (e->policy & engine_policy_limiter);
510
511
512
513
514
515
516
517
518
519
  const int with_star_formation = (e->policy & engine_policy_star_formation);

  /* Are we at the top-level? */
  if (c->top == c && c->nodeID == e->nodeID) {

    if (with_star_formation && c->hydro.count > 0) {
      c->hydro.star_formation = scheduler_addtask(
          s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);
    }
  }
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542

  /* Are we in a super-cell ? */
  if (c->super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the two half kicks */
      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
                                   c, NULL);

#if defined(WITH_LOGGER)
      c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
                                    0, c, NULL);
#endif

      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
                                   c, NULL);

      /* Add the time-step calculation task and its dependency */
      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
                                      0, 0, c, NULL);

543
      scheduler_addunlock(s, c->kick2, c->timestep);
544
545
      scheduler_addunlock(s, c->timestep, c->kick1);

546
547
548
549
550
551
      /* Subgrid tasks: star formation */
      if (with_star_formation && c->hydro.count > 0) {
        scheduler_addunlock(s, c->kick2, c->top->hydro.star_formation);
        scheduler_addunlock(s, c->top->hydro.star_formation, c->timestep);
      }

552
553
554
555
556
557
558
559
560
561
      /* Time-step limiting */
      if (with_limiter) {
        c->timestep_limiter = scheduler_addtask(
            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);

        /* Make sure it is not run before kick2 */
        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
      }

562
563
564
565
#if defined(WITH_LOGGER)
      scheduler_addunlock(s, c->kick1, c->logger);
#endif
    }
566
  } else { /* We are above the super-cell so need to go deeper */
567

568
569
570
    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
571
572
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- gravity version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
  const int periodic = e->s->periodic;
  const int is_self_gravity = (e->policy & engine_policy_self_gravity);

  /* Are we in a super-cell ? */
  if (c->grav.super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
                                        task_subtype_none, 0, 0, c, NULL);

603
604
605
606
607
      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
                                            task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);

608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
      if (is_self_gravity) {

        /* Initialisation of the multipoles */
        c->grav.init = scheduler_addtask(s, task_type_init_grav,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Gravity non-neighbouring pm calculations */
        c->grav.long_range = scheduler_addtask(
            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);

        /* Gravity recursive down-pass */
        c->grav.down = scheduler_addtask(s, task_type_grav_down,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Implicit tasks for the up and down passes */
623
624
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
                                              task_subtype_none, 0, 1, c, NULL);
625
626
627
628
629
630
631
632
633
634
635
636
637
638
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);
        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        /* Gravity mesh force propagation */
        if (periodic)
          c->grav.mesh = scheduler_addtask(s, task_type_grav_mesh,
                                           task_subtype_none, 0, 0, c, NULL);

        if (periodic) scheduler_addunlock(s, c->grav.drift, c->grav.mesh);
        if (periodic) scheduler_addunlock(s, c->grav.mesh, c->grav.down);
        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
639
        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
640
641
642

        /* Link in the implicit tasks */
        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
643
        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
644
645
646
647
648
649
        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
      }
    }
  }

  /* We are below the super-cell but not below the maximal splitting depth */
650
  else if ((c->grav.super != NULL) &&
651
           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
652
653
654
655
656
657

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      if (is_self_gravity) {

658
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
659
                                              task_subtype_none, 0, 1, c, NULL);
660

661
662
663
664
665
666
667
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);

        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
668
        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
669
670
671
672
673
674
        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
      }
    }
  }

  /* Recurse but not below the maximal splitting depth */
675
  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
676
677
678
679
680
681
682
683
684
685
686
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
}

/**
 * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
 */
void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
                       struct task *ghost_out) {

687
688
689
  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

690
  /* If we have reached the leaf OR have to few particles to play with*/
691
  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
692
693
694
695
696
697
698

    /* Add the ghost task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.ghost =
        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
699

700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- hydro version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
723
  const int with_stars = (e->policy & engine_policy_stars);
724
  const int with_feedback = (e->policy & engine_policy_feedback);
725
726
  const int with_cooling = (e->policy & engine_policy_cooling);
  const int with_star_formation = (e->policy & engine_policy_star_formation);
727

728
729
730
731
732
733
734
  /* Are we in a super-cell ? */
  if (c->hydro.super == c) {

    /* Add the sort task. */
    c->hydro.sorts =
        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);

735
    if (with_feedback) {
736
737
      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
                                         task_subtype_none, 0, 0, c, NULL);
738
    }
739

740
741
742
743
744
745
    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the drift task. */
      c->hydro.drift = scheduler_addtask(s, task_type_drift_part,
                                         task_subtype_none, 0, 0, c, NULL);
746
747
748
749

      /* Add the task finishing the force calculation */
      c->hydro.end_force = scheduler_addtask(s, task_type_end_hydro_force,
                                             task_subtype_none, 0, 0, c, NULL);
750

751
752
753
754
755
756
757
758
759
760
      /* Generate the ghost tasks. */
      c->hydro.ghost_in =
          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      c->hydro.ghost_out =
          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      engine_add_ghosts(e, c, c->hydro.ghost_in, c->hydro.ghost_out);

      /* Generate the extra ghost task. */
761
#ifdef EXTRA_HYDRO_LOOP
762
763
764
765
      c->hydro.extra_ghost = scheduler_addtask(
          s, task_type_extra_ghost, task_subtype_none, 0, 0, c, NULL);
#endif

766
767
      /* Stars */
      if (with_stars) {
768
        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
769
                                           task_subtype_none, 0, 0, c, NULL);
770
        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
771
      }
772

773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
      /* Subgrid tasks: cooling */
      if (with_cooling) {

        c->hydro.cooling = scheduler_addtask(s, task_type_cooling,
                                             task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->hydro.end_force, c->hydro.cooling);
        scheduler_addunlock(s, c->hydro.cooling, c->super->kick2);

      } else {
        scheduler_addunlock(s, c->hydro.end_force, c->super->kick2);
      }

      /* Subgrid tasks: feedback */
      if (with_feedback) {

        c->stars.stars_in =
            scheduler_addtask(s, task_type_stars_in, task_subtype_none, 0,
                              /* implicit = */ 1, c, NULL);

        c->stars.stars_out =
            scheduler_addtask(s, task_type_stars_out, task_subtype_none, 0,
                              /* implicit = */ 1, c, NULL);

        c->stars.ghost = scheduler_addtask(s, task_type_stars_ghost,
                                           task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->super->kick2, c->stars.stars_in);
        scheduler_addunlock(s, c->stars.stars_out, c->super->timestep);

803
        if (with_star_formation && c->hydro.count > 0) {
804
805
          scheduler_addunlock(s, c->top->hydro.star_formation,
                              c->stars.stars_in);
806
        }
807
808
      }
    }
809
810
811
812
813
814
815
816
817
818
  } else { /* We are above the super-cell so need to go deeper */

    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_hydro(e, c->progeny[k]);
  }
}

Matthieu Schaller's avatar
Matthieu Schaller committed
819
820
void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
                                           void *extra_data) {
821

Matthieu Schaller's avatar
Matthieu Schaller committed
822
  struct engine *e = (struct engine *)extra_data;
823
824
825
  const int with_hydro = (e->policy & engine_policy_hydro);
  const int with_self_gravity = (e->policy & engine_policy_self_gravity);
  const int with_ext_gravity = (e->policy & engine_policy_external_gravity);
826

Matthieu Schaller's avatar
Matthieu Schaller committed
827
828
829
830
831
  for (int ind = 0; ind < num_elements; ind++) {
    struct cell *c = &((struct cell *)map_data)[ind];
    /* Make the common tasks (time integration) */
    engine_make_hierarchical_tasks_common(e, c);
    /* Add the hydro stuff */
832
    if (with_hydro) engine_make_hierarchical_tasks_hydro(e, c);
Matthieu Schaller's avatar
Matthieu Schaller committed
833
    /* And the gravity stuff */
834
    if (with_self_gravity || with_ext_gravity)
Matthieu Schaller's avatar
Matthieu Schaller committed
835
836
837
      engine_make_hierarchical_tasks_gravity(e, c);
  }
}
838
839
840
841
842
843
844
845
846
847
848
849

/**
 * @brief Constructs the top-level tasks for the short-range gravity
 * and long-range gravity interactions.
 *
 * - All top-cells get a self task.
 * - All pairs within range according to the multipole acceptance
 *   criterion get a pair task.
 */
void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements,
                                           void *extra_data) {

Matthieu Schaller's avatar
Matthieu Schaller committed
850
  struct engine *e = (struct engine *)extra_data;
851
852
853
854
855
856
857
858
859
  struct space *s = e->s;
  struct scheduler *sched = &e->sched;
  const int nodeID = e->nodeID;
  const int periodic = s->periodic;
  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
  struct cell *cells = s->cells_top;
  const double theta_crit = e->gravity_properties->theta_crit;
  const double max_distance = e->mesh->r_cut_max;
860
  const double max_distance2 = max_distance * max_distance;
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895

  /* Compute how many cells away we need to walk */
  const double distance = 2.5 * cells[0].width[0] / theta_crit;
  int delta = (int)(distance / cells[0].width[0]) + 1;
  int delta_m = delta;
  int delta_p = delta;

  /* Special case where every cell is in range of every other one */
  if (delta >= cdim[0] / 2) {
    if (cdim[0] % 2 == 0) {
      delta_m = cdim[0] / 2;
      delta_p = cdim[0] / 2 - 1;
    } else {
      delta_m = cdim[0] / 2;
      delta_p = cdim[0] / 2;
    }
  }

  /* Loop through the elements, which are just byte offsets from NULL. */
  for (int ind = 0; ind < num_elements; ind++) {

    /* Get the cell index. */
    const int cid = (size_t)(map_data) + ind;

    /* Integer indices of the cell in the top-level grid */
    const int i = cid / (cdim[1] * cdim[2]);
    const int j = (cid / cdim[2]) % cdim[1];
    const int k = cid % cdim[2];

    /* Get the cell */
    struct cell *ci = &cells[cid];

    /* Skip cells without gravity particles */
    if (ci->grav.count == 0) continue;

896
    /* If the cell is local build a self-interaction */
897
    if (ci->nodeID == nodeID) {
898
      scheduler_addtask(sched, task_type_self, task_subtype_grav, 0, 0, ci,
Matthieu Schaller's avatar
Matthieu Schaller committed
899
                        NULL);
900
    }
901
902

    /* Loop over every other cell within (Manhattan) range delta */
903
904
905
906
907
908
909
910
911
912
913
914
    for (int ii = -delta_m; ii <= delta_p; ii++) {
      int iii = i + ii;
      if (!periodic && (iii < 0 || iii >= cdim[0])) continue;
      iii = (iii + cdim[0]) % cdim[0];
      for (int jj = -delta_m; jj <= delta_p; jj++) {
        int jjj = j + jj;
        if (!periodic && (jjj < 0 || jjj >= cdim[1])) continue;
        jjj = (jjj + cdim[1]) % cdim[1];
        for (int kk = -delta_m; kk <= delta_p; kk++) {
          int kkk = k + kk;
          if (!periodic && (kkk < 0 || kkk >= cdim[2])) continue;
          kkk = (kkk + cdim[2]) % cdim[2];
915
916

          /* Get the cell */
917
          const int cjd = cell_getid(cdim, iii, jjj, kkk);
918
919
          struct cell *cj = &cells[cjd];

920
          /* Avoid duplicates, empty cells and completely foreign pairs */
921
          if (cid >= cjd || cj->grav.count == 0 ||
Matthieu Schaller's avatar
Matthieu Schaller committed
922
              (ci->nodeID != nodeID && cj->nodeID != nodeID))
923
            continue;
924
925

          /* Recover the multipole information */
Matthieu Schaller's avatar
Matthieu Schaller committed
926
          const struct gravity_tensors *multi_i = ci->grav.multipole;
927
928
          const struct gravity_tensors *multi_j = cj->grav.multipole;

Matthieu Schaller's avatar
Matthieu Schaller committed
929
930
931
932
          if (multi_i == NULL && ci->nodeID != nodeID)
            error("Multipole of ci was not exchanged properly via the proxies");
          if (multi_j == NULL && cj->nodeID != nodeID)
            error("Multipole of cj was not exchanged properly via the proxies");
933
934

          /* Minimal distance between any pair of particles */
Matthieu Schaller's avatar
Matthieu Schaller committed
935
936
          const double min_radius2 =
              cell_min_dist2_same_size(ci, cj, periodic, dim);
937
938

          /* Are we beyond the distance where the truncated forces are 0 ?*/
939
          if (periodic && min_radius2 > max_distance2) continue;
940
941
942
943
944
945
946

          /* Are the cells too close for a MM interaction ? */
          if (!cell_can_use_pair_mm_rebuild(ci, cj, e, s)) {

            /* Ok, we need to add a direct pair calculation */
            scheduler_addtask(sched, task_type_pair, task_subtype_grav, 0, 0,
                              ci, cj);
947

Matthieu Schaller's avatar
Matthieu Schaller committed
948
#ifdef SWIFT_DEBUG_CHECKS
949
950
#ifdef WITH_MPI

Matthieu Schaller's avatar
Matthieu Schaller committed
951
952
953
954
955
956
957
958
959
960
961
            /* Let's cross-check that we had a proxy for that cell */
            if (ci->nodeID == nodeID && cj->nodeID != engine_rank) {

              /* Find the proxy for this node */
              const int proxy_id = e->proxy_ind[cj->nodeID];
              if (proxy_id < 0)
                error("No proxy exists for that foreign node %d!", cj->nodeID);

              const struct proxy *p = &e->proxies[proxy_id];

              /* Check whether the cell exists in the proxy */
962
              int n = 0;
Matthieu Schaller's avatar
Matthieu Schaller committed
963
964
965
966
              for (; n < p->nr_cells_in; n++)
                if (p->cells_in[n] == cj) {
                  break;
                }
967
              if (n == p->nr_cells_in)
Matthieu Schaller's avatar
Matthieu Schaller committed
968
969
970
971
972
973
974
975
976
977
978
979
980
981
                error(
                    "Cell %d not found in the proxy but trying to construct "
                    "grav task!",
                    cjd);
            } else if (cj->nodeID == nodeID && ci->nodeID != engine_rank) {

              /* Find the proxy for this node */
              const int proxy_id = e->proxy_ind[ci->nodeID];
              if (proxy_id < 0)
                error("No proxy exists for that foreign node %d!", ci->nodeID);

              const struct proxy *p = &e->proxies[proxy_id];

              /* Check whether the cell exists in the proxy */
982
              int n = 0;
Matthieu Schaller's avatar
Matthieu Schaller committed
983
984
985
986
              for (; n < p->nr_cells_in; n++)
                if (p->cells_in[n] == ci) {
                  break;
                }
987
              if (n == p->nr_cells_in)
Matthieu Schaller's avatar
Matthieu Schaller committed
988
989
990
991
992
                error(
                    "Cell %d not found in the proxy but trying to construct "
                    "grav task!",
                    cid);
            }
Matthieu Schaller's avatar
Matthieu Schaller committed
993
994
#endif /* WITH_MPI */
#endif /* SWIFT_DEBUG_CHECKS */
995
996
997
998
999
1000
          }
        }
      }
    }
  }
}