engine_maketasks.c 129 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
/*******************************************************************************
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *                    Angus Lepper (angus.lepper@ed.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdlib.h>
#include <unistd.h>

/* MPI headers. */
#ifdef WITH_MPI
#include <mpi.h>
#endif

/* Load the profiler header, if needed. */
#ifdef WITH_PROFILER
#include <gperftools/profiler.h>
#endif

/* This object's header. */
#include "engine.h"

/* Local headers. */
#include "atomic.h"
#include "cell.h"
#include "clocks.h"
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "proxy.h"
53
#include "task_order.h"
54
55
#include "timers.h"

56
57
extern int engine_max_parts_per_ghost;
extern int engine_max_sparts_per_ghost;
58
extern int engine_star_resort_task_depth;
59

60
61
62
63
64
65
66
/**
 * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_grav The send_grav #task, if it has already been created.
67
 * @param t_ti The recv_ti_end #task, if it has already been created.
68
69
 */
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
70
71
                                  struct cell *cj, struct task *t_grav,
                                  struct task *t_ti) {
72
73
74
75
76
77

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

78
79
80
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

81
82
83
84
85
86
87
88
89
90
91
92
  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_grav == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
93
94
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
95
96
97
98

      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
                                 ci->mpi.tag, 0, ci, cj);

99
100
101
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_gpart,
                               ci->mpi.tag, 0, ci, cj);

102
103
104
105
106
      /* The sends should unlock the down pass. */
      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);

      /* Drift before you send */
      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
107
108

      scheduler_addunlock(s, ci->super->timestep, t_ti);
109
110
111
    }

    /* Add them to the local cell. */
112
113
    engine_addlink(e, &ci->mpi.send, t_grav);
    engine_addlink(e, &ci->mpi.send, t_ti);
114
115
116
117
118
119
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
120
        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_ti);
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
 * @param t_rho The send_rho #task, if it has already been created.
 * @param t_gradient The send_gradient #task, if already created.
136
137
138
139
 * @param t_ti The send_ti_end #task, if it has already been created.
 * @param t_limiter The send_limiter #task, if it has already been created.
 * @param with_limiter Are we running with the time-step limiter?
 * @param with_sync Are we running with time-step synchronization?
140
141
142
 */
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
143
                                struct task *t_rho, struct task *t_gradient,
144
145
                                struct task *t_ti, struct task *t_limiter,
                                const int with_limiter, const int with_sync) {
146
147
148
149
150
151

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

152
153
154
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

155
156
157
158
159
160
161
162
163
164
165
166
  /* Check if any of the density tasks are for the target node. */
  for (l = ci->hydro.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_xv == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
167
168
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
169
170
171
172
173

      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
                               0, ci, cj);
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
                                ci->mpi.tag, 0, ci, cj);
174

175
176
177
178
179
#ifdef EXTRA_HYDRO_LOOP
      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
                                     ci->mpi.tag, 0, ci, cj);
#endif

180
181
182
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_part,
                               ci->mpi.tag, 0, ci, cj);

183
184
185
186
187
      if (with_limiter) {
        t_limiter = scheduler_addtask(s, task_type_send, task_subtype_limiter,
                                      ci->mpi.tag, 0, ci, cj);
      }

188
189
#ifdef EXTRA_HYDRO_LOOP

190
      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205

      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);

      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
       * task. */
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#else
      /* The send_rho task should unlock the super_hydro-cell's kick task. */
206
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
207
208
209
210
211
212
213
214
215

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#endif

216
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_rho);
217

218
219
      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
220
221

      scheduler_addunlock(s, ci->super->timestep, t_ti);
222
      if (with_limiter) scheduler_addunlock(s, ci->super->timestep, t_limiter);
223
224
225
    }

    /* Add them to the local cell. */
226
227
    engine_addlink(e, &ci->mpi.send, t_xv);
    engine_addlink(e, &ci->mpi.send, t_rho);
228
#ifdef EXTRA_HYDRO_LOOP
229
    engine_addlink(e, &ci->mpi.send, t_gradient);
230
#endif
231
    engine_addlink(e, &ci->mpi.send, t_ti);
232
    if (with_limiter) engine_addlink(e, &ci->mpi.send, t_limiter);
233
234
235
236
237
238
239
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
240
241
                                   t_gradient, t_ti, t_limiter, with_limiter,
                                   with_sync);
242
243
244
245
246
247

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

248
249
250
251
252
253
/**
 * @brief Add send tasks for the stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
254
 * @param t_feedback The send_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
255
 * @param t_sf_counts The send_sf_counts, if it has been created.
256
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
257
 * @param with_star_formation Are we running with star formation on?
258
259
 */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
260
                                struct cell *cj, struct task *t_feedback,
261
262
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
263
264

#ifdef WITH_MPI
265

266
267
268
269
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

270
271
272
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

Loic Hausammann's avatar
Format    
Loic Hausammann committed
273
274
  if (task_order_star_formation_before_feedback && t_sf_counts == NULL &&
      with_star_formation && ci->hydro.count > 0) {
275
#ifdef SWIFT_DEBUG_CHECKS
276
277
278
279
280
    if (ci->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          ci->depth, ci->hydro.count);
281
#endif
282
283
    t_sf_counts = scheduler_addtask(s, task_type_send, task_subtype_sf_counts,
                                    ci->mpi.tag, 0, ci, cj);
284
285
286
    scheduler_addunlock(s, ci->hydro.star_formation, t_sf_counts);
  }

287
  /* Check if any of the density tasks are for the target node. */
288
  for (l = ci->stars.density; l != NULL; l = l->next)
289
    if (l->t->ci->nodeID == nodeID ||
290
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
291
292
293
294
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {
295

296
    if (t_feedback == NULL) {
297

298
299
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
300

301
      /* Create the tasks and their dependencies? */
302
      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
303
304
                                     ci->mpi.tag, 0, ci, cj);

305
306
307
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_spart,
                               ci->mpi.tag, 0, ci, cj);

308
      /* The send_stars task should unlock the super_cell's kick task. */
309
      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
310

311
      /* Ghost before you send */
312
      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
313
314
315

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
316
317
318
319

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

320
321
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
Loic Hausammann's avatar
Format    
Loic Hausammann committed
322
323
    if (task_order_star_formation_before_feedback && with_star_formation &&
        ci->hydro.count > 0) {
324
      engine_addlink(e, &ci->mpi.send, t_sf_counts);
325
    }
326
327
328
329
330
331
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
332
        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback,
333
                                   t_sf_counts, t_ti, with_star_formation);
334
335
336
337
338
339

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

340
341
342
343
344
345
/**
 * @brief Add send tasks for the black holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
346
 * @param t_rho The density comm. task, if it has already been created.
347
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
348
349
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
350
351
352
353
 * @param t_feedback The send_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_send_black_holes(struct engine *e, struct cell *ci,
354
                                      struct cell *cj, struct task *t_rho,
355
                                      struct task *t_bh_merger,
356
357
                                      struct task *t_gas_swallow,
                                      struct task *t_feedback,
358
359
360
361
362
363
364
365
                                      struct task *t_ti) {

#ifdef WITH_MPI

  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

366
367
368
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

369
370
371
372
373
374
375
376
377
  /* Check if any of the density tasks are for the target node. */
  for (l = ci->black_holes.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

378
    if (t_rho == NULL) {
379
380
381
382
383

      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);

      /* Create the tasks and their dependencies? */
384
385
386
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_bpart_rho,
                                ci->mpi.tag, 0, ci, cj);

387
388
      t_bh_merger = scheduler_addtask(
          s, task_type_send, task_subtype_bpart_merger, ci->mpi.tag, 0, ci, cj);
389
390
391
392
393
394
395

      t_gas_swallow = scheduler_addtask(
          s, task_type_send, task_subtype_part_swallow, ci->mpi.tag, 0, ci, cj);

      t_feedback =
          scheduler_addtask(s, task_type_send, task_subtype_bpart_feedback,
                            ci->mpi.tag, 0, ci, cj);
396
397
398
399

      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_bpart,
                               ci->mpi.tag, 0, ci, cj);

400
401
      /* The send_black_holes task should unlock the super_cell's BH exit point
       * task. */
402
403
404
      scheduler_addunlock(s, t_feedback,
                          ci->hydro.super->black_holes.black_holes_out);

405
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[2],
406
407
                          t_feedback);

408
      /* Ghost before you send */
409
      scheduler_addunlock(s, ci->hydro.super->black_holes.drift, t_rho);
410
411
412
      scheduler_addunlock(s, ci->hydro.super->black_holes.density_ghost, t_rho);
      scheduler_addunlock(s, t_rho,
                          ci->hydro.super->black_holes.swallow_ghost[0]);
413

414
415
416
417
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_bh_merger);
      scheduler_addunlock(s, t_bh_merger,
                          ci->hydro.super->black_holes.swallow_ghost[2]);
418

419
420
421
422
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_gas_swallow);
      scheduler_addunlock(s, t_gas_swallow,
                          ci->hydro.super->black_holes.swallow_ghost[1]);
423
424
425
426

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

427
    engine_addlink(e, &ci->mpi.send, t_rho);
428
    engine_addlink(e, &ci->mpi.send, t_bh_merger);
429
    engine_addlink(e, &ci->mpi.send, t_gas_swallow);
430
431
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
432
433
434
435
436
437
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
438
        engine_addtasks_send_black_holes(e, ci->progeny[k], cj, t_rho,
439
                                         t_bh_merger, t_gas_swallow, t_feedback,
440
441
442
443
444
445
446
                                         t_ti);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

447
448
449
450
451
452
453
454
/**
 * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 * @param t_gradient The recv_gradient #task, if it has already been created.
455
 * @param t_ti The recv_ti_end #task, if it has already been created.
456
457
458
 * @param t_limiter The recv_limiter #task, if it has already been created.
 * @param with_limiter Are we running with the time-step limiter?
 * @param with_sync Are we running with time-step synchronization?
459
460
461
 */
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
                                struct task *t_xv, struct task *t_rho,
462
463
464
                                struct task *t_gradient, struct task *t_ti,
                                struct task *t_limiter, const int with_limiter,
                                const int with_sync) {
465
466
467
468

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

469
470
471
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

472
473
474
475
476
477
  /* Have we reached a level where there are any hydro tasks ? */
  if (t_xv == NULL && c->hydro.density != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
478
#endif /* SWIFT_DEBUG_CHECKS */
479
480
481
482
483
484
485
486
487
488

    /* Create the tasks. */
    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
                             c, NULL);
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
                              0, c, NULL);
#ifdef EXTRA_HYDRO_LOOP
    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
                                   c->mpi.tag, 0, c, NULL);
#endif
489
490
491

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_part,
                             c->mpi.tag, 0, c, NULL);
492
493
494
495
496

    if (with_limiter) {
      t_limiter = scheduler_addtask(s, task_type_recv, task_subtype_limiter,
                                    c->mpi.tag, 0, c, NULL);
    }
497
498
  }

499
500
501
502
503
504
505
  if (t_xv != NULL) {
    engine_addlink(e, &c->mpi.recv, t_xv);
    engine_addlink(e, &c->mpi.recv, t_rho);
#ifdef EXTRA_HYDRO_LOOP
    engine_addlink(e, &c->mpi.recv, t_gradient);
#endif
    engine_addlink(e, &c->mpi.recv, t_ti);
506
    if (with_limiter) engine_addlink(e, &c->mpi.recv, t_limiter);
507

508
509
510
511
512
    /* Add dependencies. */
    if (c->hydro.sorts != NULL) {
      scheduler_addunlock(s, t_xv, c->hydro.sorts);
      scheduler_addunlock(s, c->hydro.sorts, t_rho);
    }
513

514
515
516
517
    for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_xv, l->t);
      scheduler_addunlock(s, l->t, t_rho);
    }
518
#ifdef EXTRA_HYDRO_LOOP
519
520
521
522
523
524
525
526
    for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gradient);
    }
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_gradient, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
527
#else
528
529
530
531
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
532
533
#endif

534
535
536
537
538
539
540
541
542
    if (with_limiter) {
      for (struct link *l = c->hydro.limiter; l != NULL; l = l->next) {
        scheduler_addunlock(s, t_ti, l->t);
        scheduler_addunlock(s, t_limiter, l->t);
      }
    }

    /* Make sure the gas density has been computed before the
     * stars compute theirs. */
543
544
545
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
    }
Matthieu Schaller's avatar
Matthieu Schaller committed
546

547
548
    /* Make sure the part have been received before the BHs compute their
     * accretion rates (depends on particles' rho). */
Matthieu Schaller's avatar
Matthieu Schaller committed
549
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
550
      scheduler_addunlock(s, t_rho, l->t);
Matthieu Schaller's avatar
Matthieu Schaller committed
551
    }
552
  }
553

554
555
556
557
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
558
        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient,
559
                                   t_ti, t_limiter, with_limiter, with_sync);
560
561
562
563
564
565

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

566
567
568
569
570
/**
 * @brief Add recv tasks for stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
Matthieu Schaller's avatar
Matthieu Schaller committed
571
 * @param t_feedback The recv_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
572
 * @param t_sf_counts The recv_sf_counts, if it has been created.
573
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
574
 * @param with_star_formation Are we running with star formation on?
575
576
 */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
577
578
579
                                struct task *t_feedback,
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
580
581
582
583

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

584
585
586
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

Loic Hausammann's avatar
Format    
Loic Hausammann committed
587
588
  if (task_order_star_formation_before_feedback && t_sf_counts == NULL &&
      with_star_formation && c->hydro.count > 0) {
589
#ifdef SWIFT_DEBUG_CHECKS
590
591
592
593
594
    if (c->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          c->depth, c->hydro.count);
595
596
#endif
    t_sf_counts = scheduler_addtask(s, task_type_recv, task_subtype_sf_counts,
597
                                    c->mpi.tag, 0, c, NULL);
598
  }
599

600
601
  /* Have we reached a level where there are any stars tasks ? */
  if (t_feedback == NULL && c->stars.density != NULL) {
602
603
604
605
606
607
608

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
609
    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
610
                                   c->mpi.tag, 0, c, NULL);
611
612
613

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_spart,
                             c->mpi.tag, 0, c, NULL);
614

Loic Hausammann's avatar
Format    
Loic Hausammann committed
615
616
    if (task_order_star_formation_before_feedback && with_star_formation &&
        c->hydro.count > 0) {
617
618
619
620

      /* Receive the stars only once the counts have been received */
      scheduler_addunlock(s, t_sf_counts, t_feedback);
    }
621
622
  }

623
624
625
  if (t_feedback != NULL) {
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
Loic Hausammann's avatar
Format    
Loic Hausammann committed
626
627
    if (task_order_star_formation_before_feedback && with_star_formation &&
        c->hydro.count > 0) {
Matthieu Schaller's avatar
Matthieu Schaller committed
628
629
      engine_addlink(e, &c->mpi.recv, t_sf_counts);
    }
630

631
#ifdef SWIFT_DEBUG_CHECKS
632
    if (c->nodeID == e->nodeID) error("Local cell!");
633
#endif
634
635
    if (c->stars.sorts != NULL)
      scheduler_addunlock(s, t_feedback, c->stars.sorts);
636

637
638
639
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_feedback);
    }
640

641
642
643
644
    for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
645
646
  }

647
648
649
650
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
651
        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_sf_counts,
652
                                   t_ti, with_star_formation);
653
654
655
656
657
658

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

659
660
661
662
663
/**
 * @brief Add recv tasks for black_holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
664
 * @param t_rho The density comm. task, if it has already been created.
665
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
666
667
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
668
669
670
671
 * @param t_feedback The recv_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_recv_black_holes(struct engine *e, struct cell *c,
672
                                      struct task *t_rho,
673
                                      struct task *t_bh_merger,
674
                                      struct task *t_gas_swallow,
675
676
677
678
679
680
                                      struct task *t_feedback,
                                      struct task *t_ti) {

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

681
682
683
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

684
  /* Have we reached a level where there are any black_holes tasks ? */
685
  if (t_rho == NULL && c->black_holes.density != NULL) {
686
687
688
689
690
691
692

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
693
694
695
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_bpart_rho,
                              c->mpi.tag, 0, c, NULL);

696
697
    t_bh_merger = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_merger, c->mpi.tag, 0, c, NULL);
698
699
700
701
702
703

    t_gas_swallow = scheduler_addtask(
        s, task_type_recv, task_subtype_part_swallow, c->mpi.tag, 0, c, NULL);

    t_feedback = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_feedback, c->mpi.tag, 0, c, NULL);
704
705
706
707
708

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_bpart,
                             c->mpi.tag, 0, c, NULL);
  }

709
710
  if (t_rho != NULL) {
    engine_addlink(e, &c->mpi.recv, t_rho);
711
    engine_addlink(e, &c->mpi.recv, t_bh_merger);
712
    engine_addlink(e, &c->mpi.recv, t_gas_swallow);
713
714
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
715
716

#ifdef SWIFT_DEBUG_CHECKS
717
    if (c->nodeID == e->nodeID) error("Local cell!");
718
719
#endif

720
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
721
      scheduler_addunlock(s, l->t, t_rho);
722
    }
723

724
725
726
727
728
729
730
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_gas_swallow);
    }

    for (struct link *l = c->black_holes.swallow; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gas_swallow);
731
      scheduler_addunlock(s, l->t, t_bh_merger);
732
    }
733
734
    for (struct link *l = c->black_holes.do_gas_swallow; l != NULL;
         l = l->next) {
735
      scheduler_addunlock(s, t_gas_swallow, l->t);
736
737
    }
    for (struct link *l = c->black_holes.do_bh_swallow; l != NULL;
738
         l = l->next) {
739
      scheduler_addunlock(s, t_bh_merger, l->t);
740
741
      scheduler_addunlock(s, l->t, t_feedback);
    }
742
743
744
745
    for (struct link *l = c->black_holes.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
746
747
748
749
750
751
  }

  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
752
        engine_addtasks_recv_black_holes(e, c->progeny[k], t_rho, t_bh_merger,
753
                                         t_gas_swallow, t_feedback, t_ti);
754
755
756
757
758
759

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

760
761
762
763
764
765
/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_grav The recv_gpart #task, if it has already been created.
766
 * @param t_ti The recv_ti_end #task, if it has already been created.
767
768
 */
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
769
                                  struct task *t_grav, struct task *t_ti) {
770
771
772
773

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

774
775
776
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

777
778
779
780
781
782
783
784
785
786
787
788
  /* Have we reached a level where there are any gravity tasks ? */
  if (t_grav == NULL && c->grav.grav != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
                               c->mpi.tag, 0, c, NULL);

789
790
    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_gpart,
                             c->mpi.tag, 0, c, NULL);
791
792
  }

793
794
795
796
  /* If we have tasks, link them. */
  if (t_grav != NULL) {
    engine_addlink(e, &c->mpi.recv, t_grav);
    engine_addlink(e, &c->mpi.recv, t_ti);
797

798
799
800
801
    for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_grav, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
802
  }
803

804
805
806
807
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
808
        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_ti);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- timestep version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
830
  const int with_star_formation = (e->policy & engine_policy_star_formation);
831
832
833
  const int with_timestep_limiter =
      (e->policy & engine_policy_timestep_limiter);
  const int with_timestep_sync = (e->policy & engine_policy_timestep_sync);
Loic Hausammann's avatar
Loic Hausammann committed
834
  const int with_logger = e->policy & engine_policy_logger;
835
836
837
838
839
840
841
842
843

  /* Are we at the top-level? */
  if (c->top == c && c->nodeID == e->nodeID) {

    if (with_star_formation && c->hydro.count > 0) {
      c->hydro.star_formation = scheduler_addtask(
          s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);
    }
  }
844
845
846
847
848
849
850
851
852
853
854

  /* Are we in a super-cell ? */
  if (c->super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the two half kicks */
      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
                                   c, NULL);

855
856
857
      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
                                   c, NULL);

858
#if defined(WITH_LOGGER)
Loic Hausammann's avatar
Loic Hausammann committed
859
860
861
862
863
      struct task *kick2_or_logger;
      if (with_logger) {
        /* Add the hydro logger task. */
        c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
                                      0, c, NULL);
864

Loic Hausammann's avatar
Loic Hausammann committed
865
866
        /* Add the kick2 dependency */
        scheduler_addunlock(s, c->kick2, c->logger);
867

Loic Hausammann's avatar
Loic Hausammann committed
868
869
        /* Create a variable in order to avoid to many ifdef */
        kick2_or_logger = c->logger;
Loic Hausammann's avatar
Format    
Loic Hausammann committed
870
      } else {
Loic Hausammann's avatar
Loic Hausammann committed
871
872
        kick2_or_logger = c->kick2;
      }
873
874
875
#else
      struct task *kick2_or_logger = c->kick2;
#endif
876
877
878
879
880

      /* Add the time-step calculation task and its dependency */
      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
                                      0, 0, c, NULL);

881
      scheduler_addunlock(s, kick2_or_logger, c->timestep);
882
883
      scheduler_addunlock(s, c->timestep, c->kick1);

884
885
      /* Subgrid tasks: star formation */
      if (with_star_formation && c->hydro.count > 0) {
886
        scheduler_addunlock(s, kick2_or_logger, c->top->hydro.star_formation);
887
888
889
        scheduler_addunlock(s, c->top->hydro.star_formation, c->timestep);
      }

890
      /* Time-step limiter */
891
      if (with_timestep_limiter) {
892

893
894
895
896
897
898
        c->timestep_limiter = scheduler_addtask(
            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
      }
899
900
901
902

      /* Time-step synchronization */
      if (with_timestep_sync) {

903
904
905
        c->timestep_sync = scheduler_addtask(s, task_type_timestep_sync,
                                             task_subtype_none, 0, 0, c, NULL);

906
907
        scheduler_addunlock(s, c->timestep, c->timestep_sync);
        scheduler_addunlock(s, c->timestep_sync, c->kick1);
908
      }
909

910
911
      if (with_timestep_limiter && with_timestep_sync) {
        scheduler_addunlock(s, c->timestep_limiter, c->timestep_sync);
912
      }
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
    }
  } else { /* We are above the super-cell so need to go deeper */

    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- gravity version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
  const int periodic = e->s->periodic;
  const int is_self_gravity = (e->policy & engine_policy_self_gravity);

  /* Are we in a super-cell ? */
  if (c->grav.super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
                                        task_subtype_none, 0, 0, c, NULL);

951
952
953
954
955
      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
                                            task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);

956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
      if (is_self_gravity) {

        /* Initialisation of the multipoles */
        c->grav.init = scheduler_addtask(s, task_type_init_grav,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Gravity non-neighbouring pm calculations */
        c->grav.long_range = scheduler_addtask(
            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);

        /* Gravity recursive down-pass */
        c->grav.down = scheduler_addtask(s, task_type_grav_down,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Implicit tasks for the up and down passes */
971
972
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
                                              task_subtype_none, 0, 1, c, NULL);
973
974
975
976
977
978
979
980
981
982
983
984
985
986
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);
        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        /* Gravity mesh force propagation */
        if (periodic)
          c->grav.mesh = scheduler_addtask(s, task_type_grav_mesh,
                                           task_subtype_none, 0, 0, c, NULL);

        if (periodic) scheduler_addunlock(s, c->grav.drift, c->grav.mesh);
        if (periodic) scheduler_addunlock(s, c->grav.mesh, c->grav.down);
        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
987
        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
988
989
990

        /* Link in the implicit tasks */
        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
991
        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
992
993
994
995
996
997
        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
      }
    }
  }

  /* We are below the super-cell but not below the maximal splitting depth */
998
  else if ((c->grav.super != NULL) &&
999
           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
1000
1001
1002
1003
1004
1005

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      if (is_self_gravity) {

1006
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
1007
                                              task_subtype_none, 0, 1, c, NULL);
1008

1009
1010
1011
1012
1013
1014
1015
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);

        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
1016
        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
1017
1018
1019
1020
1021
1022
        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
      }
    }
  }

  /* Recurse but not below the maximal splitting depth */
1023
  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
}

/**
 * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
 */
void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
                       struct task *ghost_out) {

1035
1036
1037
  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

1038
  /* If we have reached the leaf OR have to few particles to play with*/
1039
  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
1040
1041
1042
1043
1044
1045
1046

    /* Add the ghost task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.ghost =
        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
1047

1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- hydro version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
1067
1068
 * @param star_resort_cell Pointer to the cell where the star_resort task has
 * been created. NULL above that level or if not running with star formation.
1069
 */
1070
1071
void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c,
                                          struct cell *star_resort_cell) {
1072
1073

  struct scheduler *s = &e->sched;
1074
  const int with_stars = (e->policy & engine_policy_stars);
1075
  const int with_feedback = (e->policy & engine_policy_feedback);
1076
1077
  const int with_cooling = (e->policy & engine_policy_cooling);
  const int with_star_formation = (e->policy & engine_policy_star_formation);
1078
  const int with_black_holes = (e->policy & engine_policy_black_holes);
Loic Hausammann's avatar
Loic Hausammann committed
1079
  const int with_logger = (e->policy & engine_policy_logger);
1080

1081
1082
1083
  /* Are we are the level where we create the stars' resort tasks?
   * If the tree is shallow, we need to do this at the super-level if the
   * super-level is above the level we want */
Loic Hausammann's avatar
Format    
Loic Hausammann committed
1084
1085
  if (task_order_star_formation_before_feedback && (c->nodeID == e->nodeID) &&
      (star_resort_cell == NULL) &&
1086
      (c->depth == engine_star_resort_task_depth || c->hydro.super == c)) {
1087

1088
    if (with_feedback && with_star_formation && c->hydro.count > 0) {
1089

1090
1091
      /* Record this is the level where we re-sort */
      star_resort_cell = c;
1092

1093
1094
1095
1096
1097
1098
1099
1100
      c->hydro.stars_resort = scheduler_addtask(
          s, task_type_stars_resort, task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->top->hydro.star_formation,
                          c->hydro.stars_resort);
    }
  }

1101
1102
1103
1104
1105
1106
1107
  /* Are we in a super-cell ? */
  if (c->hydro.super == c) {

    /* Add the sort task. */
    c->hydro.sorts =
        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);

1108
    if (with_feedback) {
1109
1110
      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
                                         task_subtype_none, 0, 0, c, NULL);
1111
    }
1112

1113
1114
1115
1116
1117
1118
    if (with_black_holes) {
      c->black_holes.swallow_ghost[0] =
          scheduler_addtask(s, task_type_bh_swallow_ghost1, task_subtype_none,
                            0, /* implicit =*/1, c, NULL);
    }

1119
1120
1121
1122
1123
1124
1125
    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the drift task. */
      c->hydro.drift = scheduler_addtask(s, task_type_drift_part,
                                         task_subtype_none, 0, 0, c, NULL);

1126
1127
1128
      /* Add the task finishing the force calculation */
      c->hydro.end_force = scheduler_addtask(s, task_type_end_hydro_force,
                                             task_subtype_none, 0, 0, c, NULL);
1129

1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
      /* Generate the ghost tasks. */
      c->hydro.ghost_in =
          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      c->hydro.ghost_out =
          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      engine_add_ghosts(e, c, c->hydro.ghost_in, c->hydro.ghost_out);

      /* Generate the extra ghost task. */
1140
#ifdef EXTRA_HYDRO_LOOP
1141
1142
1143
1144
      c->hydro.extra_ghost = scheduler_addtask(
          s, task_type_extra_ghost, task_subtype_none, 0, 0, c, NULL);
#endif

1145
1146
      /* Stars */
      if (with_stars) {
1147
        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
1148
                                           task_subtype_none, 0, 0, c, NULL);
1149
        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
1150
      }
1151

1152
1153
1154
1155
1156
1157
1158
      /* Black holes */
      if (with_black_holes) {
        c->black_holes.drift = scheduler_addtask(
            s, task_type_drift_bpart, task_subtype_none, 0, 0, c, NULL);
        scheduler_addunlock(s, c->black_holes.drift, c->super->kick2);
      }

1159
1160
      /* Subgrid tasks: cooling */
      if (with_cooling) {
1161

1162
1163
        c->hydro.cooling = scheduler_addtask(s, task_type_cooling,
                                             task_subtype_none, 0, 0, c, NULL);
1164

Loic Hausammann's avatar
Loic Hausammann committed
1165
        task_order_addunlock_cooling(s, c);
1166

1167
1168
1169
      } else {
        scheduler_addunlock(s, c->hydro.end_force, c->super->kick2);
      }
1170

1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181