engine_maketasks.c 141 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*******************************************************************************
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *                    Angus Lepper (angus.lepper@ed.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdlib.h>
#include <unistd.h>

/* MPI headers. */
#ifdef WITH_MPI
#include <mpi.h>
#endif

/* Load the profiler header, if needed. */
#ifdef WITH_PROFILER
#include <gperftools/profiler.h>
#endif

/* This object's header. */
#include "engine.h"

/* Local headers. */
#include "atomic.h"
#include "cell.h"
#include "clocks.h"
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "proxy.h"
#include "timers.h"

55 56
extern int engine_max_parts_per_ghost;
extern int engine_max_sparts_per_ghost;
57
extern int engine_star_resort_task_depth;
58
extern int engine_max_parts_per_cooling;
59

60 61 62 63 64 65 66
/**
 * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_grav The send_grav #task, if it has already been created.
67
 * @param t_ti The recv_ti_end #task, if it has already been created.
68 69
 */
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
70 71
                                  struct cell *cj, struct task *t_grav,
                                  struct task *t_ti) {
72 73 74 75 76 77

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

78 79 80
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

81 82 83 84 85 86 87 88 89 90 91 92
  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_grav == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
93 94
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
95 96 97 98

      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
                                 ci->mpi.tag, 0, ci, cj);

99 100 101
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_gpart,
                               ci->mpi.tag, 0, ci, cj);

102 103 104 105 106
      /* The sends should unlock the down pass. */
      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);

      /* Drift before you send */
      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
107 108

      scheduler_addunlock(s, ci->super->timestep, t_ti);
109 110 111
    }

    /* Add them to the local cell. */
112 113
    engine_addlink(e, &ci->mpi.send, t_grav);
    engine_addlink(e, &ci->mpi.send, t_ti);
114 115 116 117 118 119
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
120
        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_ti);
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
 * @param t_rho The send_rho #task, if it has already been created.
 * @param t_gradient The send_gradient #task, if already created.
136 137 138 139
 * @param t_ti The send_ti_end #task, if it has already been created.
 * @param t_limiter The send_limiter #task, if it has already been created.
 * @param with_limiter Are we running with the time-step limiter?
 * @param with_sync Are we running with time-step synchronization?
140 141 142
 */
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
143
                                struct task *t_rho, struct task *t_gradient,
144 145
                                struct task *t_ti, struct task *t_limiter,
                                const int with_limiter, const int with_sync) {
146 147 148 149 150 151

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

152 153 154
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

155 156 157 158 159 160 161 162 163 164 165 166
  /* Check if any of the density tasks are for the target node. */
  for (l = ci->hydro.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_xv == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
167 168
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
169 170 171 172 173

      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
                               0, ci, cj);
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
                                ci->mpi.tag, 0, ci, cj);
174

175 176 177 178 179
#ifdef EXTRA_HYDRO_LOOP
      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
                                     ci->mpi.tag, 0, ci, cj);
#endif

180 181 182
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_part,
                               ci->mpi.tag, 0, ci, cj);

183 184 185 186 187
      if (with_limiter) {
        t_limiter = scheduler_addtask(s, task_type_send, task_subtype_limiter,
                                      ci->mpi.tag, 0, ci, cj);
      }

188 189
#ifdef EXTRA_HYDRO_LOOP

190
      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);

      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
       * task. */
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#else
      /* The send_rho task should unlock the super_hydro-cell's kick task. */
206
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
207 208 209 210 211 212 213 214 215

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#endif

216
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_rho);
217

218 219
      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
220 221

      scheduler_addunlock(s, ci->super->timestep, t_ti);
222
      if (with_limiter) scheduler_addunlock(s, ci->super->timestep, t_limiter);
223 224 225
    }

    /* Add them to the local cell. */
226 227
    engine_addlink(e, &ci->mpi.send, t_xv);
    engine_addlink(e, &ci->mpi.send, t_rho);
228
#ifdef EXTRA_HYDRO_LOOP
229
    engine_addlink(e, &ci->mpi.send, t_gradient);
230
#endif
231
    engine_addlink(e, &ci->mpi.send, t_ti);
232
    if (with_limiter) engine_addlink(e, &ci->mpi.send, t_limiter);
233 234 235 236 237 238 239
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
240 241
                                   t_gradient, t_ti, t_limiter, with_limiter,
                                   with_sync);
242 243 244 245 246 247

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

248 249 250 251 252 253
/**
 * @brief Add send tasks for the stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
254
 * @param t_feedback The send_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
255
 * @param t_sf_counts The send_sf_counts, if it has been created.
256
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
257
 * @param with_star_formation Are we running with star formation on?
258 259
 */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
260
                                struct cell *cj, struct task *t_feedback,
261 262
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
263 264

#ifdef WITH_MPI
265

266 267 268 269
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

270 271 272
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

Loic Hausammann's avatar
Loic Hausammann committed
273
  if (t_sf_counts == NULL && with_star_formation && ci->hydro.count > 0) {
274
#ifdef SWIFT_DEBUG_CHECKS
275 276 277 278 279
    if (ci->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          ci->depth, ci->hydro.count);
280
#endif
281 282
    t_sf_counts = scheduler_addtask(s, task_type_send, task_subtype_sf_counts,
                                    ci->mpi.tag, 0, ci, cj);
283 284 285
    scheduler_addunlock(s, ci->hydro.star_formation, t_sf_counts);
  }

286
  /* Check if any of the density tasks are for the target node. */
287
  for (l = ci->stars.density; l != NULL; l = l->next)
288
    if (l->t->ci->nodeID == nodeID ||
289
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
290 291 292 293
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {
294

295
    if (t_feedback == NULL) {
296

297 298
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
299

300
      /* Create the tasks and their dependencies? */
301
      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
302 303
                                     ci->mpi.tag, 0, ci, cj);

304 305 306
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_spart,
                               ci->mpi.tag, 0, ci, cj);

307
      /* The send_stars task should unlock the super_cell's kick task. */
308
      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
309

310
      /* Ghost before you send */
311
      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
312 313 314

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
315 316 317 318

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

319 320
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
Loic Hausammann's avatar
Loic Hausammann committed
321
    if (with_star_formation && ci->hydro.count > 0) {
322
      engine_addlink(e, &ci->mpi.send, t_sf_counts);
323
    }
324 325 326 327 328 329
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
330
        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback,
331
                                   t_sf_counts, t_ti, with_star_formation);
332 333 334 335 336 337

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

338 339 340 341 342 343
/**
 * @brief Add send tasks for the black holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
344
 * @param t_rho The density comm. task, if it has already been created.
345
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
346 347
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
348 349 350 351
 * @param t_feedback The send_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_send_black_holes(struct engine *e, struct cell *ci,
352
                                      struct cell *cj, struct task *t_rho,
353
                                      struct task *t_bh_merger,
354 355
                                      struct task *t_gas_swallow,
                                      struct task *t_feedback,
356 357 358 359 360 361 362 363
                                      struct task *t_ti) {

#ifdef WITH_MPI

  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

364 365 366
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(ci, cell_flag_has_tasks)) return;

367 368 369 370 371 372 373 374 375
  /* Check if any of the density tasks are for the target node. */
  for (l = ci->black_holes.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

376
    if (t_rho == NULL) {
377 378 379 380 381

      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);

      /* Create the tasks and their dependencies? */
382 383 384
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_bpart_rho,
                                ci->mpi.tag, 0, ci, cj);

385 386
      t_bh_merger = scheduler_addtask(
          s, task_type_send, task_subtype_bpart_merger, ci->mpi.tag, 0, ci, cj);
387 388 389 390 391 392 393

      t_gas_swallow = scheduler_addtask(
          s, task_type_send, task_subtype_part_swallow, ci->mpi.tag, 0, ci, cj);

      t_feedback =
          scheduler_addtask(s, task_type_send, task_subtype_bpart_feedback,
                            ci->mpi.tag, 0, ci, cj);
394 395 396 397

      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_bpart,
                               ci->mpi.tag, 0, ci, cj);

398 399
      /* The send_black_holes task should unlock the super_cell's BH exit point
       * task. */
400 401 402
      scheduler_addunlock(s, t_feedback,
                          ci->hydro.super->black_holes.black_holes_out);

403
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[2],
404 405
                          t_feedback);

406
      /* Ghost before you send */
407
      scheduler_addunlock(s, ci->hydro.super->black_holes.drift, t_rho);
408 409 410
      scheduler_addunlock(s, ci->hydro.super->black_holes.density_ghost, t_rho);
      scheduler_addunlock(s, t_rho,
                          ci->hydro.super->black_holes.swallow_ghost[0]);
411

412 413 414 415
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_bh_merger);
      scheduler_addunlock(s, t_bh_merger,
                          ci->hydro.super->black_holes.swallow_ghost[2]);
416

417 418 419 420
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_gas_swallow);
      scheduler_addunlock(s, t_gas_swallow,
                          ci->hydro.super->black_holes.swallow_ghost[1]);
421 422 423 424

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

425
    engine_addlink(e, &ci->mpi.send, t_rho);
426
    engine_addlink(e, &ci->mpi.send, t_bh_merger);
427
    engine_addlink(e, &ci->mpi.send, t_gas_swallow);
428 429
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
430 431 432 433 434 435
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
436
        engine_addtasks_send_black_holes(e, ci->progeny[k], cj, t_rho,
437
                                         t_bh_merger, t_gas_swallow, t_feedback,
438 439 440 441 442 443 444
                                         t_ti);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

445 446 447 448 449 450 451 452
/**
 * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 * @param t_gradient The recv_gradient #task, if it has already been created.
453
 * @param t_ti The recv_ti_end #task, if it has already been created.
454 455 456
 * @param t_limiter The recv_limiter #task, if it has already been created.
 * @param with_limiter Are we running with the time-step limiter?
 * @param with_sync Are we running with time-step synchronization?
457 458 459
 */
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
                                struct task *t_xv, struct task *t_rho,
460 461 462
                                struct task *t_gradient, struct task *t_ti,
                                struct task *t_limiter, const int with_limiter,
                                const int with_sync) {
463 464 465 466

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

467 468 469
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

470 471 472 473 474 475
  /* Have we reached a level where there are any hydro tasks ? */
  if (t_xv == NULL && c->hydro.density != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
476
#endif /* SWIFT_DEBUG_CHECKS */
477 478 479 480 481 482 483 484 485 486

    /* Create the tasks. */
    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
                             c, NULL);
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
                              0, c, NULL);
#ifdef EXTRA_HYDRO_LOOP
    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
                                   c->mpi.tag, 0, c, NULL);
#endif
487 488 489

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_part,
                             c->mpi.tag, 0, c, NULL);
490 491 492 493 494

    if (with_limiter) {
      t_limiter = scheduler_addtask(s, task_type_recv, task_subtype_limiter,
                                    c->mpi.tag, 0, c, NULL);
    }
495 496
  }

497 498 499 500 501 502 503
  if (t_xv != NULL) {
    engine_addlink(e, &c->mpi.recv, t_xv);
    engine_addlink(e, &c->mpi.recv, t_rho);
#ifdef EXTRA_HYDRO_LOOP
    engine_addlink(e, &c->mpi.recv, t_gradient);
#endif
    engine_addlink(e, &c->mpi.recv, t_ti);
504
    if (with_limiter) engine_addlink(e, &c->mpi.recv, t_limiter);
505

506 507 508 509 510
    /* Add dependencies. */
    if (c->hydro.sorts != NULL) {
      scheduler_addunlock(s, t_xv, c->hydro.sorts);
      scheduler_addunlock(s, c->hydro.sorts, t_rho);
    }
511

512 513 514 515
    for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_xv, l->t);
      scheduler_addunlock(s, l->t, t_rho);
    }
516
#ifdef EXTRA_HYDRO_LOOP
517 518 519 520 521 522 523 524
    for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gradient);
    }
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_gradient, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
525
#else
526 527 528 529
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
530 531
#endif

532 533 534 535 536 537 538 539 540
    if (with_limiter) {
      for (struct link *l = c->hydro.limiter; l != NULL; l = l->next) {
        scheduler_addunlock(s, t_ti, l->t);
        scheduler_addunlock(s, t_limiter, l->t);
      }
    }

    /* Make sure the gas density has been computed before the
     * stars compute theirs. */
541 542 543
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
    }
Matthieu Schaller's avatar
Matthieu Schaller committed
544

545 546
    /* Make sure the part have been received before the BHs compute their
     * accretion rates (depends on particles' rho). */
Matthieu Schaller's avatar
Matthieu Schaller committed
547
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
548
      scheduler_addunlock(s, t_rho, l->t);
Matthieu Schaller's avatar
Matthieu Schaller committed
549
    }
550
  }
551

552 553 554 555
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
556
        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient,
557
                                   t_ti, t_limiter, with_limiter, with_sync);
558 559 560 561 562 563

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

564 565 566 567 568
/**
 * @brief Add recv tasks for stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
Matthieu Schaller's avatar
Matthieu Schaller committed
569
 * @param t_feedback The recv_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
570
 * @param t_sf_counts The recv_sf_counts, if it has been created.
571
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
572
 * @param with_star_formation Are we running with star formation on?
573 574
 */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
575 576 577
                                struct task *t_feedback,
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
578 579 580 581

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

582 583 584
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

Loic Hausammann's avatar
Loic Hausammann committed
585
  if (t_sf_counts == NULL && with_star_formation && c->hydro.count > 0) {
586
#ifdef SWIFT_DEBUG_CHECKS
587 588 589 590 591
    if (c->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          c->depth, c->hydro.count);
592 593
#endif
    t_sf_counts = scheduler_addtask(s, task_type_recv, task_subtype_sf_counts,
594
                                    c->mpi.tag, 0, c, NULL);
595
  }
596

597 598
  /* Have we reached a level where there are any stars tasks ? */
  if (t_feedback == NULL && c->stars.density != NULL) {
599 600 601 602 603 604 605

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
606
    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
607
                                   c->mpi.tag, 0, c, NULL);
608 609 610

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_spart,
                             c->mpi.tag, 0, c, NULL);
611

Loic Hausammann's avatar
Loic Hausammann committed
612
    if (with_star_formation && c->hydro.count > 0) {
613 614 615 616

      /* Receive the stars only once the counts have been received */
      scheduler_addunlock(s, t_sf_counts, t_feedback);
    }
617 618
  }

619 620 621
  if (t_feedback != NULL) {
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
Loic Hausammann's avatar
Loic Hausammann committed
622
    if (with_star_formation && c->hydro.count > 0) {
Matthieu Schaller's avatar
Matthieu Schaller committed
623 624
      engine_addlink(e, &c->mpi.recv, t_sf_counts);
    }
625

626
#ifdef SWIFT_DEBUG_CHECKS
627
    if (c->nodeID == e->nodeID) error("Local cell!");
628
#endif
629 630
    if (c->stars.sorts != NULL)
      scheduler_addunlock(s, t_feedback, c->stars.sorts);
631

632 633 634
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_feedback);
    }
635

636 637 638 639
    for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
640 641
  }

642 643 644 645
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
646
        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_sf_counts,
647
                                   t_ti, with_star_formation);
648 649 650 651 652 653

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

654 655 656 657 658
/**
 * @brief Add recv tasks for black_holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
659
 * @param t_rho The density comm. task, if it has already been created.
660
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
661 662
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
663 664 665 666
 * @param t_feedback The recv_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_recv_black_holes(struct engine *e, struct cell *c,
667
                                      struct task *t_rho,
668
                                      struct task *t_bh_merger,
669
                                      struct task *t_gas_swallow,
670 671 672 673 674 675
                                      struct task *t_feedback,
                                      struct task *t_ti) {

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

676 677 678
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

679
  /* Have we reached a level where there are any black_holes tasks ? */
680
  if (t_rho == NULL && c->black_holes.density != NULL) {
681 682 683 684 685 686 687

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
688 689 690
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_bpart_rho,
                              c->mpi.tag, 0, c, NULL);

691 692
    t_bh_merger = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_merger, c->mpi.tag, 0, c, NULL);
693 694 695 696 697 698

    t_gas_swallow = scheduler_addtask(
        s, task_type_recv, task_subtype_part_swallow, c->mpi.tag, 0, c, NULL);

    t_feedback = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_feedback, c->mpi.tag, 0, c, NULL);
699 700 701 702 703

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_bpart,
                             c->mpi.tag, 0, c, NULL);
  }

704 705
  if (t_rho != NULL) {
    engine_addlink(e, &c->mpi.recv, t_rho);
706
    engine_addlink(e, &c->mpi.recv, t_bh_merger);
707
    engine_addlink(e, &c->mpi.recv, t_gas_swallow);
708 709
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
710 711

#ifdef SWIFT_DEBUG_CHECKS
712
    if (c->nodeID == e->nodeID) error("Local cell!");
713 714
#endif

715
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
716
      scheduler_addunlock(s, l->t, t_rho);
717
    }
718

719 720 721 722 723 724 725
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_gas_swallow);
    }

    for (struct link *l = c->black_holes.swallow; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gas_swallow);
726
      scheduler_addunlock(s, l->t, t_bh_merger);
727
    }
728 729
    for (struct link *l = c->black_holes.do_gas_swallow; l != NULL;
         l = l->next) {
730
      scheduler_addunlock(s, t_gas_swallow, l->t);
731 732
    }
    for (struct link *l = c->black_holes.do_bh_swallow; l != NULL;
733
         l = l->next) {
734
      scheduler_addunlock(s, t_bh_merger, l->t);
735 736
      scheduler_addunlock(s, l->t, t_feedback);
    }
737 738 739 740
    for (struct link *l = c->black_holes.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
741 742 743 744 745 746
  }

  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
747
        engine_addtasks_recv_black_holes(e, c->progeny[k], t_rho, t_bh_merger,
748
                                         t_gas_swallow, t_feedback, t_ti);
749 750 751 752 753 754

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

755 756 757 758 759 760
/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_grav The recv_gpart #task, if it has already been created.
761
 * @param t_ti The recv_ti_end #task, if it has already been created.
762 763
 */
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
764
                                  struct task *t_grav, struct task *t_ti) {
765 766 767 768

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

769 770 771
  /* Early abort (are we below the level where tasks are)? */
  if (!cell_get_flag(c, cell_flag_has_tasks)) return;

772 773 774 775 776 777 778 779 780 781 782 783
  /* Have we reached a level where there are any gravity tasks ? */
  if (t_grav == NULL && c->grav.grav != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
                               c->mpi.tag, 0, c, NULL);

784 785
    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_gpart,
                             c->mpi.tag, 0, c, NULL);
786 787
  }

788 789 790 791
  /* If we have tasks, link them. */
  if (t_grav != NULL) {
    engine_addlink(e, &c->mpi.recv, t_grav);
    engine_addlink(e, &c->mpi.recv, t_ti);
792

793 794 795 796
    for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_grav, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
797
  }
798

799 800 801 802
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
803
        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_ti);
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- timestep version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
Loic Hausammann's avatar
Loic Hausammann committed
825
  const int with_sinks = (e->policy & engine_policy_sinks);
826
  const int with_star_formation = (e->policy & engine_policy_star_formation);
827 828 829
  const int with_timestep_limiter =
      (e->policy & engine_policy_timestep_limiter);
  const int with_timestep_sync = (e->policy & engine_policy_timestep_sync);
Loic Hausammann's avatar
Loic Hausammann committed
830
#ifdef WITH_LOGGER
831
  const int with_logger = e->policy & engine_policy_logger;
Loic Hausammann's avatar
Loic Hausammann committed
832
#endif
833 834 835 836 837 838 839 840

  /* Are we at the top-level? */
  if (c->top == c && c->nodeID == e->nodeID) {

    if (with_star_formation && c->hydro.count > 0) {
      c->hydro.star_formation = scheduler_addtask(
          s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);
    }
Loic Hausammann's avatar
Loic Hausammann committed
841 842 843 844 845

    if (with_sinks && c->hydro.count > 0) {
      c->hydro.sink_formation = scheduler_addtask(
          s, task_type_sink_formation, task_subtype_none, 0, 0, c, NULL);
    }
846
  }
847 848 849 850 851 852 853 854 855 856 857

  /* Are we in a super-cell ? */
  if (c->super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the two half kicks */
      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
                                   c, NULL);

858 859 860
      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
                                   c, NULL);

861
#if defined(WITH_LOGGER)
862 863 864 865 866
      struct task *kick2_or_logger;
      if (with_logger) {
        /* Add the hydro logger task. */
        c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
                                      0, c, NULL);
867

868 869
        /* Add the kick2 dependency */
        scheduler_addunlock(s, c->kick2, c->logger);
870

871 872
        /* Create a variable in order to avoid to many ifdef */
        kick2_or_logger = c->logger;
Loic Hausammann's avatar
Loic Hausammann committed
873
      } else {
874 875
        kick2_or_logger = c->kick2;
      }
876 877 878
#else
      struct task *kick2_or_logger = c->kick2;
#endif
879 880 881 882 883

      /* Add the time-step calculation task and its dependency */
      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
                                      0, 0, c, NULL);

884
      scheduler_addunlock(s, kick2_or_logger, c->timestep);
885 886
      scheduler_addunlock(s, c->timestep, c->kick1);

887 888
      /* Subgrid tasks: star formation */
      if (with_star_formation && c->hydro.count > 0) {
889
        scheduler_addunlock(s, kick2_or_logger, c->top->hydro.star_formation);
890 891 892
        scheduler_addunlock(s, c->top->hydro.star_formation, c->timestep);
      }

893
      /* Time-step limiter */
894
      if (with_timestep_limiter) {
895

896 897 898 899 900 901
        c->timestep_limiter = scheduler_addtask(
            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
      }
902 903 904 905

      /* Time-step synchronization */
      if (with_timestep_sync) {

906 907 908
        c->timestep_sync = scheduler_addtask(s, task_type_timestep_sync,
                                             task_subtype_none, 0, 0, c, NULL);

909 910
        scheduler_addunlock(s, c->timestep, c->timestep_sync);
        scheduler_addunlock(s, c->timestep_sync, c->kick1);
911
      }
912

913 914
      if (with_timestep_limiter && with_timestep_sync) {
        scheduler_addunlock(s, c->timestep_limiter, c->timestep_sync);
915
      }
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
    }
  } else { /* We are above the super-cell so need to go deeper */

    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- gravity version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
  const int is_self_gravity = (e->policy & engine_policy_self_gravity);
943 944
  const int stars_only_gravity =
      (e->policy & engine_policy_stars) && !(e->policy & engine_policy_hydro);
945 946 947 948 949 950 951

  /* Are we in a super-cell ? */
  if (c->grav.super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

952 953 954 955 956 957 958 959 960 961 962
      if (stars_only_gravity) {

        /* In the special case where we have stars that just act under gravity
         * we must create their drift task here and not just copy over the hydro
         * behaviour. */
        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
                                           task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
      }

963 964 965
      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
                                        task_subtype_none, 0, 0, c, NULL);

966 967 968 969 970
      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
                                            task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
      if (is_self_gravity) {

        /* Initialisation of the multipoles */
        c->grav.init = scheduler_addtask(s, task_type_init_grav,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Gravity non-neighbouring pm calculations */
        c->grav.long_range = scheduler_addtask(
            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);

        /* Gravity recursive down-pass */
        c->grav.down = scheduler_addtask(s, task_type_grav_down,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Implicit tasks for the up and down passes */
986 987
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
                                              task_subtype_none, 0, 1, c, NULL);
988 989 990 991 992
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);
        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

993
        /* Long-range gravity forces (not the mesh ones!) */
994 995
        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
996
        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
997 998 999

        /* Link in the implicit tasks */
        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
1000
        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
1001 1002 1003 1004 1005 1006
        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
      }
    }
  }

  /* We are below the super-cell but not below the maximal splitting depth */
1007
  else if ((c->grav.super != NULL) &&
1008
           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
1009 1010 1011 1012 1013 1014

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      if (is_self_gravity) {

1015
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
1016
                                              task_subtype_none, 0, 1, c, NULL);
1017

1018 1019 1020 1021 1022 1023 1024
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);

        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
1025
        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
1026 1027 1028 1029 1030 1031
        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
      }
    }
  }

  /* Recurse but not below the maximal splitting depth */
1032
  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
}

/**
 * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
 */
void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
                       struct task *ghost_out) {

1044 1045 1046
  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

1047
  /* If we have reached the leaf OR have to few particles to play with*/
1048
  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
1049 1050 1051 1052 1053 1054 1055

    /* Add the ghost task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.ghost =
        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
1056

1057 1058 1059 1060 1061 1062 1063 1064
  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
  }
}

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
/**
 * @brief Recursively add non-implicit cooling tasks to a cell hierarchy.
 */
void engine_add_cooling(struct engine *e, struct cell *c,
                        struct task *cooling_in, struct task *cooling_out) {

  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

  /* If we have reached the leaf OR have to few particles to play with*/
  if (!c->split || c->hydro.count_total < engine_max_parts_per_cooling) {

    /* Add the cooling task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.cooling = scheduler_addtask(s, task_type_cooling,
                                         task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, cooling_in, c->hydro.cooling);
    scheduler_addunlock(s, c->hydro.cooling, cooling_out);

  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_cooling(e, c->progeny[k], cooling_in, cooling_out);
  }
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- hydro version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
1103 1104
 * @param star_resort_cell Pointer to the cell where the star_resort task has
 * been created. NULL above that level or if not running with star formation.
1105
 */
1106 1107
void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c,
                                          struct cell *star_resort_cell) {
1108 1109

  struct scheduler *s = &e->sched;
1110
  const int with_stars = (e->policy & engine_policy_stars);
Loic Hausammann's avatar
Loic Hausammann committed
1111
  const int with_sinks = (e->policy & engine_policy_sinks);
1112
  const int with_feedback = (e->policy & engine_policy_feedback);
1113 1114
  const int with_cooling = (e->policy & engine_policy_cooling);
  const int with_star_formation = (e->policy & engine_policy_star_formation);
1115
  const int with_black_holes = (e->policy & engine_policy_black_holes);
Mladen Ivkovic's avatar
Mladen Ivkovic committed
1116
  const int with_rt = (e->policy & engine_policy_rt);
Loic Hausammann's avatar
Loic Hausammann committed
1117
#ifdef WITH_LOGGER
1118
  const int with_logger = (e->policy & engine_policy_logger);
Loic Hausammann's avatar
Loic Hausammann committed
1119
#endif
1120

1121 1122 1123
  /* Are we are the level where we create the stars' resort tasks?
   * If the tree is shallow, we need to do this at the super-level if the
   * super-level is above the level we want */
Loic Hausammann's avatar
Loic Hausammann committed
1124
  if ((c->nodeID == e->nodeID) && (star_resort_cell == NULL) &&
1125
      (c->depth == engine_star_resort_task_depth || c->hydro.super == c)) {
1126

1127
    if (with_feedback && with_star_formation && c->hydro.count > 0) {
1128

1129 1130
      /* Record this is the level where we re-sort */
      star_resort_cell = c;
1131

1132 1133 1134 1135 1136 1137 1138 1139
      c->hydro.stars_resort = scheduler_addtask(
          s, task_type_stars_resort, task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->top->hydro.star_formation,
                          c->hydro.stars_resort);
    }
  }

1140 1141 1142 1143 1144 1145 1146
  /* Are we in a super-cell ? */
  if (c->hydro.super == c) {

    /* Add the sort task. */
    c->hydro.sorts =
        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);

1147
    if (with_feedback) {
1148 1149
      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
                                         task_subtype_none, 0, 0, c, NULL);
1150
    }
1151

1152 1153 1154 1155 1156 1157
    if (with_black_holes) {
      c->black_holes.swallow_ghost[0] =
          scheduler_addtask(s, task_type_bh_swallow_ghost1, task_subtype_none,
                            0, /* implicit =*/1, c, NULL);
    }

1158 1159 1160 1161 1162 1163 1164
    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the drift task. */
      c->hydro.drift = scheduler_addtask(s, task_type_drift_part,
                                         task_subtype_none, 0, 0, c, NULL);

1165 1166 1167
      /* Add the task finishing the force calculation */
      c->hydro.end_force = scheduler_addtask(s, task_type_end_hydro_force,
                                             task_subtype_none, 0, 0, c, NULL);
1168

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
      /* Generate the ghost tasks. */
      c->hydro.ghost_in =
          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      c->hydro.ghost_out =
          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      engine_add_ghosts(e, c, c->hydro.ghost_in, c->hydro.ghost_out);

      /* Generate the extra ghost task. */
1179
#ifdef EXTRA_HYDRO_LOOP
1180 1181 1182 1183
      c->hydro.extra_ghost = scheduler_addtask(
          s, task_type_extra_ghost, task_subtype_none, 0, 0, c, NULL);
#endif

1184 1185
      /* Stars */
      if (with_stars) {
1186
        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
1187
                                           task_subtype_none, 0, 0, c, NULL);
1188
        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
1189
      }
1190

Loic Hausammann's avatar
Loic Hausammann committed
1191 1192 1193 1194 1195 1196 1197
      /* Sinks */
      if (with_sinks) {
        c->sinks.drift = scheduler_addtask(s, task_type_drift_sink,
                                           task_subtype_none, 0, 0, c, NULL);
        scheduler_addunlock(s, c->sinks.drift, c->super->kick2);
      }

1198 1199 1200 1201 1202 1203 1204
      /* Black holes */
      if (with_black_holes) {
        c->black_holes.drift = scheduler_addtask(
            s, task_type_drift_bpart, task_subtype_none, 0, 0, c, NULL);
        scheduler_addunlock(s, c->black_holes.drift, c->super->kick2);
      }

1205 1206
      /* Subgrid tasks: cooling */
      if (with_cooling) {
1207

1208 1209 1210 1211 1212 1213 1214 1215
        c->hydro.cooling_in =
            scheduler_addtask(s, task_type_cooling_in, task_subtype_none, 0,
                              /*implicit=*/1, c, NULL);
        c->hydro.cooling_out =
            scheduler_addtask(s, task_type_cooling_out, task_subtype_none, 0,
                              /*implicit=*/1, c, NULL);

        engine_add_cooling(e, c, c->hydro.cooling_in, c->hydro.cooling_out);
1216

1217 1218
        scheduler_addunlock(s, c->hydro.end_force, c->hydro.cooling_in);
        scheduler_addunlock(s, c->hydro.cooling_out, c->super->kick2);
1219

1220 1221 1222
      } else {
        scheduler_addunlock(s, c->hydro.end_force, c->super->kick2);
      }
1223

1224