engine_maketasks.c 121 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
/*******************************************************************************
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *                    Angus Lepper (angus.lepper@ed.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdlib.h>
#include <unistd.h>

/* MPI headers. */
#ifdef WITH_MPI
#include <mpi.h>
#endif

/* Load the profiler header, if needed. */
#ifdef WITH_PROFILER
#include <gperftools/profiler.h>
#endif

/* This object's header. */
#include "engine.h"

/* Local headers. */
#include "atomic.h"
#include "cell.h"
#include "clocks.h"
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "proxy.h"
#include "timers.h"

55
56
extern int engine_max_parts_per_ghost;
extern int engine_max_sparts_per_ghost;
57
extern int engine_star_resort_task_depth;
58

59
60
61
62
63
64
65
/**
 * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_grav The send_grav #task, if it has already been created.
66
 * @param t_ti The recv_ti_end #task, if it has already been created.
67
68
 */
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
69
70
                                  struct cell *cj, struct task *t_grav,
                                  struct task *t_ti) {
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_grav == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
89
90
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
91
92
93
94

      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
                                 ci->mpi.tag, 0, ci, cj);

95
96
97
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_gpart,
                               ci->mpi.tag, 0, ci, cj);

98
99
100
101
102
      /* The sends should unlock the down pass. */
      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);

      /* Drift before you send */
      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
103
104

      scheduler_addunlock(s, ci->super->timestep, t_ti);
105
106
107
    }

    /* Add them to the local cell. */
108
109
    engine_addlink(e, &ci->mpi.send, t_grav);
    engine_addlink(e, &ci->mpi.send, t_ti);
110
111
112
113
114
115
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
116
        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_ti);
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
 * @param t_rho The send_rho #task, if it has already been created.
 * @param t_gradient The send_gradient #task, if already created.
132
 * @param t_ti The recv_ti_end #task, if it has already been created.
133
134
135
 */
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
136
137
                                struct task *t_rho, struct task *t_gradient,
                                struct task *t_ti) {
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
  for (l = ci->hydro.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_xv == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
156
157
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
158
159
160
161
162

      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
                               0, ci, cj);
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
                                ci->mpi.tag, 0, ci, cj);
163

164
165
166
167
168
#ifdef EXTRA_HYDRO_LOOP
      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
                                     ci->mpi.tag, 0, ci, cj);
#endif

169
170
171
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_part,
                               ci->mpi.tag, 0, ci, cj);

172
173
#ifdef EXTRA_HYDRO_LOOP

174
      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189

      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);

      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
       * task. */
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#else
      /* The send_rho task should unlock the super_hydro-cell's kick task. */
190
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
191
192
193
194
195
196
197
198
199

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#endif

200
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_rho);
201

202
203
      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
204
205

      scheduler_addunlock(s, ci->super->timestep, t_ti);
206
207
208
    }

    /* Add them to the local cell. */
209
210
    engine_addlink(e, &ci->mpi.send, t_xv);
    engine_addlink(e, &ci->mpi.send, t_rho);
211
#ifdef EXTRA_HYDRO_LOOP
212
    engine_addlink(e, &ci->mpi.send, t_gradient);
213
#endif
214
    engine_addlink(e, &ci->mpi.send, t_ti);
215
216
217
218
219
220
221
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
222
                                   t_gradient, t_ti);
223
224
225
226
227
228

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

229
230
231
232
233
234
/**
 * @brief Add send tasks for the stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
235
 * @param t_feedback The send_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
236
 * @param t_sf_counts The send_sf_counts, if it has been created.
237
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
238
 * @param with_star_formation Are we running with star formation on?
239
240
 */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
241
                                struct cell *cj, struct task *t_feedback,
242
243
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
244
245

#ifdef WITH_MPI
246

247
248
249
250
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

251
252
  if (t_sf_counts == NULL && with_star_formation && ci->hydro.count > 0) {
#ifdef SWIFT_DEBUG_CHECKS
253
254
255
256
257
    if (ci->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          ci->depth, ci->hydro.count);
258
#endif
259
260
    t_sf_counts = scheduler_addtask(s, task_type_send, task_subtype_sf_counts,
                                    ci->mpi.tag, 0, ci, cj);
261
262
263
    scheduler_addunlock(s, ci->hydro.star_formation, t_sf_counts);
  }

264
  /* Check if any of the density tasks are for the target node. */
265
  for (l = ci->stars.density; l != NULL; l = l->next)
266
    if (l->t->ci->nodeID == nodeID ||
267
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
268
269
270
271
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {
272

273
    if (t_feedback == NULL) {
274

275
276
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
277

278
      /* Create the tasks and their dependencies? */
279
      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
280
281
                                     ci->mpi.tag, 0, ci, cj);

282
283
284
      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_spart,
                               ci->mpi.tag, 0, ci, cj);

285
      /* The send_stars task should unlock the super_cell's kick task. */
286
      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
287

288
      /* Ghost before you send */
289
      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
290
291
292

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
293
294
295
296

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

297
298
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
299
    if (with_star_formation && ci->hydro.count > 0) {
300
      engine_addlink(e, &ci->mpi.send, t_sf_counts);
301
    }
302
303
304
305
306
307
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
308
        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback,
309
                                   t_sf_counts, t_ti, with_star_formation);
310
311
312
313
314
315

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

316
317
318
319
320
321
/**
 * @brief Add send tasks for the black holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
322
 * @param t_rho The density comm. task, if it has already been created.
323
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
324
325
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
326
327
328
329
 * @param t_feedback The send_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_send_black_holes(struct engine *e, struct cell *ci,
330
                                      struct cell *cj, struct task *t_rho,
331
                                      struct task *t_bh_merger,
332
333
                                      struct task *t_gas_swallow,
                                      struct task *t_feedback,
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
                                      struct task *t_ti) {

#ifdef WITH_MPI

  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
  for (l = ci->black_holes.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

351
    if (t_rho == NULL) {
352
353
354
355
356

      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);

      /* Create the tasks and their dependencies? */
357
358
359
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_bpart_rho,
                                ci->mpi.tag, 0, ci, cj);

360
361
      t_bh_merger = scheduler_addtask(
          s, task_type_send, task_subtype_bpart_merger, ci->mpi.tag, 0, ci, cj);
362
363
364
365
366
367
368

      t_gas_swallow = scheduler_addtask(
          s, task_type_send, task_subtype_part_swallow, ci->mpi.tag, 0, ci, cj);

      t_feedback =
          scheduler_addtask(s, task_type_send, task_subtype_bpart_feedback,
                            ci->mpi.tag, 0, ci, cj);
369
370
371
372

      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_bpart,
                               ci->mpi.tag, 0, ci, cj);

373
374
      /* The send_black_holes task should unlock the super_cell's BH exit point
       * task. */
375
376
377
      scheduler_addunlock(s, t_feedback,
                          ci->hydro.super->black_holes.black_holes_out);

378
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[2],
379
380
                          t_feedback);

381
      /* Ghost before you send */
382
      scheduler_addunlock(s, ci->hydro.super->black_holes.drift, t_rho);
383
384
385
      scheduler_addunlock(s, ci->hydro.super->black_holes.density_ghost, t_rho);
      scheduler_addunlock(s, t_rho,
                          ci->hydro.super->black_holes.swallow_ghost[0]);
386

387
388
389
390
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_bh_merger);
      scheduler_addunlock(s, t_bh_merger,
                          ci->hydro.super->black_holes.swallow_ghost[2]);
391

392
393
394
395
      scheduler_addunlock(s, ci->hydro.super->black_holes.swallow_ghost[0],
                          t_gas_swallow);
      scheduler_addunlock(s, t_gas_swallow,
                          ci->hydro.super->black_holes.swallow_ghost[1]);
396
397
398
399

      scheduler_addunlock(s, ci->super->timestep, t_ti);
    }

400
    engine_addlink(e, &ci->mpi.send, t_rho);
401
    engine_addlink(e, &ci->mpi.send, t_bh_merger);
402
    engine_addlink(e, &ci->mpi.send, t_gas_swallow);
403
404
    engine_addlink(e, &ci->mpi.send, t_feedback);
    engine_addlink(e, &ci->mpi.send, t_ti);
405
406
407
408
409
410
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
411
        engine_addtasks_send_black_holes(e, ci->progeny[k], cj, t_rho,
412
                                         t_bh_merger, t_gas_swallow, t_feedback,
413
414
415
416
417
418
419
                                         t_ti);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

420
421
422
423
424
425
426
427
/**
 * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 * @param t_gradient The recv_gradient #task, if it has already been created.
428
 * @param t_ti The recv_ti_end #task, if it has already been created.
429
430
431
 */
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
                                struct task *t_xv, struct task *t_rho,
432
                                struct task *t_gradient, struct task *t_ti) {
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any hydro tasks ? */
  if (t_xv == NULL && c->hydro.density != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
                             c, NULL);
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
                              0, c, NULL);
#ifdef EXTRA_HYDRO_LOOP
    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
                                   c->mpi.tag, 0, c, NULL);
#endif
454
455
456

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_part,
                             c->mpi.tag, 0, c, NULL);
457
458
  }

459
460
461
462
463
464
465
  if (t_xv != NULL) {
    engine_addlink(e, &c->mpi.recv, t_xv);
    engine_addlink(e, &c->mpi.recv, t_rho);
#ifdef EXTRA_HYDRO_LOOP
    engine_addlink(e, &c->mpi.recv, t_gradient);
#endif
    engine_addlink(e, &c->mpi.recv, t_ti);
466

467
468
469
470
471
    /* Add dependencies. */
    if (c->hydro.sorts != NULL) {
      scheduler_addunlock(s, t_xv, c->hydro.sorts);
      scheduler_addunlock(s, c->hydro.sorts, t_rho);
    }
472

473
474
475
476
    for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_xv, l->t);
      scheduler_addunlock(s, l->t, t_rho);
    }
477
#ifdef EXTRA_HYDRO_LOOP
478
479
480
481
482
483
484
485
    for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gradient);
    }
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_gradient, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
486
#else
487
488
489
490
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
491
492
#endif

493
494
495
496
497
    /* Make sure the density has been computed before the stars compute theirs.
     */
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
    }
Matthieu Schaller's avatar
Matthieu Schaller committed
498

499
500
    /* Make sure the part have been received before the BHs compute their
     * accretion rates (depends on particles' rho). */
Matthieu Schaller's avatar
Matthieu Schaller committed
501
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
502
      scheduler_addunlock(s, t_rho, l->t);
Matthieu Schaller's avatar
Matthieu Schaller committed
503
    }
504
  }
505

506
507
508
509
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
510
511
        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient,
                                   t_ti);
512
513
514
515
516
517

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

518
519
520
521
522
/**
 * @brief Add recv tasks for stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
Matthieu Schaller's avatar
Matthieu Schaller committed
523
 * @param t_feedback The recv_feed #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
524
 * @param t_sf_counts The recv_sf_counts, if it has been created.
525
 * @param t_ti The recv_ti_end #task, if it has already been created.
Matthieu Schaller's avatar
Matthieu Schaller committed
526
 * @param with_star_formation Are we running with star formation on?
527
528
 */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
529
530
531
                                struct task *t_feedback,
                                struct task *t_sf_counts, struct task *t_ti,
                                const int with_star_formation) {
532
533
534
535

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

536
537
  if (t_sf_counts == NULL && with_star_formation && c->hydro.count > 0) {
#ifdef SWIFT_DEBUG_CHECKS
538
539
540
541
542
    if (c->depth != 0)
      error(
          "Attaching a sf_count task at a non-top level c->depth=%d "
          "c->count=%d",
          c->depth, c->hydro.count);
543
544
#endif
    t_sf_counts = scheduler_addtask(s, task_type_recv, task_subtype_sf_counts,
545
                                    c->mpi.tag, 0, c, NULL);
546
  }
547

548
549
  /* Have we reached a level where there are any stars tasks ? */
  if (t_feedback == NULL && c->stars.density != NULL) {
550
551
552
553
554
555
556

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
557
    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
558
                                   c->mpi.tag, 0, c, NULL);
559
560
561

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_spart,
                             c->mpi.tag, 0, c, NULL);
562
563
564
565
566
567

    if (with_star_formation && c->hydro.count > 0) {

      /* Receive the stars only once the counts have been received */
      scheduler_addunlock(s, t_sf_counts, t_feedback);
    }
568
569
  }

570
571
572
  if (t_feedback != NULL) {
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
Matthieu Schaller's avatar
Matthieu Schaller committed
573
574
575
    if (with_star_formation && c->hydro.count > 0) {
      engine_addlink(e, &c->mpi.recv, t_sf_counts);
    }
576

577
#ifdef SWIFT_DEBUG_CHECKS
578
    if (c->nodeID == e->nodeID) error("Local cell!");
579
#endif
580
581
    if (c->stars.sorts != NULL)
      scheduler_addunlock(s, t_feedback, c->stars.sorts);
582

583
584
585
    for (struct link *l = c->stars.density; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_feedback);
    }
586

587
588
589
590
    for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
591
592
  }

593
594
595
596
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
597
        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_sf_counts,
598
                                   t_ti, with_star_formation);
599
600
601
602
603
604

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

605
606
607
608
609
/**
 * @brief Add recv tasks for black_holes pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
610
 * @param t_rho The density comm. task, if it has already been created.
611
 * @param t_bh_merger The BH swallow comm. task, if it has already been created.
612
613
 * @param t_gas_swallow The gas swallow comm. task, if it has already been
 * created.
614
615
616
617
 * @param t_feedback The recv_feed #task, if it has already been created.
 * @param t_ti The recv_ti_end #task, if it has already been created.
 */
void engine_addtasks_recv_black_holes(struct engine *e, struct cell *c,
618
                                      struct task *t_rho,
619
                                      struct task *t_bh_merger,
620
                                      struct task *t_gas_swallow,
621
622
623
624
625
626
627
                                      struct task *t_feedback,
                                      struct task *t_ti) {

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any black_holes tasks ? */
628
  if (t_rho == NULL && c->black_holes.density != NULL) {
629
630
631
632
633
634
635

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
636
637
638
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_bpart_rho,
                              c->mpi.tag, 0, c, NULL);

639
640
    t_bh_merger = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_merger, c->mpi.tag, 0, c, NULL);
641
642
643
644
645
646

    t_gas_swallow = scheduler_addtask(
        s, task_type_recv, task_subtype_part_swallow, c->mpi.tag, 0, c, NULL);

    t_feedback = scheduler_addtask(
        s, task_type_recv, task_subtype_bpart_feedback, c->mpi.tag, 0, c, NULL);
647
648
649
650
651

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_bpart,
                             c->mpi.tag, 0, c, NULL);
  }

652
653
  if (t_rho != NULL) {
    engine_addlink(e, &c->mpi.recv, t_rho);
654
    engine_addlink(e, &c->mpi.recv, t_bh_merger);
655
    engine_addlink(e, &c->mpi.recv, t_gas_swallow);
656
657
    engine_addlink(e, &c->mpi.recv, t_feedback);
    engine_addlink(e, &c->mpi.recv, t_ti);
658
659

#ifdef SWIFT_DEBUG_CHECKS
660
    if (c->nodeID == e->nodeID) error("Local cell!");
661
662
#endif

663
    for (struct link *l = c->black_holes.density; l != NULL; l = l->next) {
664
      scheduler_addunlock(s, l->t, t_rho);
665
    }
666

667
668
669
670
671
672
673
    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_gas_swallow);
    }

    for (struct link *l = c->black_holes.swallow; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_rho, l->t);
      scheduler_addunlock(s, l->t, t_gas_swallow);
674
      scheduler_addunlock(s, l->t, t_bh_merger);
675
    }
676
677
    for (struct link *l = c->black_holes.do_gas_swallow; l != NULL;
         l = l->next) {
678
      scheduler_addunlock(s, t_gas_swallow, l->t);
679
680
    }
    for (struct link *l = c->black_holes.do_bh_swallow; l != NULL;
681
         l = l->next) {
682
      scheduler_addunlock(s, t_bh_merger, l->t);
683
684
      scheduler_addunlock(s, l->t, t_feedback);
    }
685
686
687
688
    for (struct link *l = c->black_holes.feedback; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_feedback, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
689
690
691
692
693
694
  }

  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
695
        engine_addtasks_recv_black_holes(e, c->progeny[k], t_rho, t_bh_merger,
696
                                         t_gas_swallow, t_feedback, t_ti);
697
698
699
700
701
702

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

703
704
705
706
707
708
/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_grav The recv_gpart #task, if it has already been created.
709
 * @param t_ti The recv_ti_end #task, if it has already been created.
710
711
 */
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
712
                                  struct task *t_grav, struct task *t_ti) {
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any gravity tasks ? */
  if (t_grav == NULL && c->grav.grav != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
                               c->mpi.tag, 0, c, NULL);

729
730
    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_gpart,
                             c->mpi.tag, 0, c, NULL);
731
732
  }

733
734
735
736
  /* If we have tasks, link them. */
  if (t_grav != NULL) {
    engine_addlink(e, &c->mpi.recv, t_grav);
    engine_addlink(e, &c->mpi.recv, t_ti);
737

738
739
740
741
    for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_grav, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }
742
  }
743

744
745
746
747
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
748
        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_ti);
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- timestep version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
770
  const int with_limiter = (e->policy & engine_policy_limiter);
771
772
773
774
775
776
777
778
779
780
  const int with_star_formation = (e->policy & engine_policy_star_formation);

  /* Are we at the top-level? */
  if (c->top == c && c->nodeID == e->nodeID) {

    if (with_star_formation && c->hydro.count > 0) {
      c->hydro.star_formation = scheduler_addtask(
          s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);
    }
  }
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803

  /* Are we in a super-cell ? */
  if (c->super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the two half kicks */
      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
                                   c, NULL);

#if defined(WITH_LOGGER)
      c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
                                    0, c, NULL);
#endif

      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
                                   c, NULL);

      /* Add the time-step calculation task and its dependency */
      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
                                      0, 0, c, NULL);

804
      scheduler_addunlock(s, c->kick2, c->timestep);
805
806
      scheduler_addunlock(s, c->timestep, c->kick1);

807
808
809
810
811
812
      /* Subgrid tasks: star formation */
      if (with_star_formation && c->hydro.count > 0) {
        scheduler_addunlock(s, c->kick2, c->top->hydro.star_formation);
        scheduler_addunlock(s, c->top->hydro.star_formation, c->timestep);
      }

813
814
815
816
817
818
819
820
821
822
      /* Time-step limiting */
      if (with_limiter) {
        c->timestep_limiter = scheduler_addtask(
            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);

        /* Make sure it is not run before kick2 */
        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
      }

823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
#if defined(WITH_LOGGER)
      scheduler_addunlock(s, c->kick1, c->logger);
#endif
    }
  } else { /* We are above the super-cell so need to go deeper */

    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- gravity version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
  const int periodic = e->s->periodic;
  const int is_self_gravity = (e->policy & engine_policy_self_gravity);

  /* Are we in a super-cell ? */
  if (c->grav.super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
                                        task_subtype_none, 0, 0, c, NULL);

864
865
866
867
868
      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
                                            task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);

869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
      if (is_self_gravity) {

        /* Initialisation of the multipoles */
        c->grav.init = scheduler_addtask(s, task_type_init_grav,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Gravity non-neighbouring pm calculations */
        c->grav.long_range = scheduler_addtask(
            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);

        /* Gravity recursive down-pass */
        c->grav.down = scheduler_addtask(s, task_type_grav_down,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Implicit tasks for the up and down passes */
884
885
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
                                              task_subtype_none, 0, 1, c, NULL);
886
887
888
889
890
891
892
893
894
895
896
897
898
899
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);
        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        /* Gravity mesh force propagation */
        if (periodic)
          c->grav.mesh = scheduler_addtask(s, task_type_grav_mesh,
                                           task_subtype_none, 0, 0, c, NULL);

        if (periodic) scheduler_addunlock(s, c->grav.drift, c->grav.mesh);
        if (periodic) scheduler_addunlock(s, c->grav.mesh, c->grav.down);
        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
900
        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
901
902
903

        /* Link in the implicit tasks */
        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
904
        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
905
906
907
908
909
910
        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
      }
    }
  }

  /* We are below the super-cell but not below the maximal splitting depth */
911
  else if ((c->grav.super != NULL) &&
912
           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
913
914
915
916
917
918

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      if (is_self_gravity) {

919
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
920
                                              task_subtype_none, 0, 1, c, NULL);
921

922
923
924
925
926
927
928
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);

        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
929
        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
930
931
932
933
934
935
        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
      }
    }
  }

  /* Recurse but not below the maximal splitting depth */
936
  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
937
938
939
940
941
942
943
944
945
946
947
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
}

/**
 * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
 */
void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
                       struct task *ghost_out) {

948
949
950
  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

951
  /* If we have reached the leaf OR have to few particles to play with*/
952
  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
953
954
955
956
957
958
959

    /* Add the ghost task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.ghost =
        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
960

961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- hydro version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
981
982
void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c,
                                          struct cell *star_resort_cell) {
983
984

  struct scheduler *s = &e->sched;
985
  const int with_stars = (e->policy & engine_policy_stars);
986
  const int with_feedback = (e->policy & engine_policy_feedback);
987
988
  const int with_cooling = (e->policy & engine_policy_cooling);
  const int with_star_formation = (e->policy & engine_policy_star_formation);
989
  const int with_black_holes = (e->policy & engine_policy_black_holes);
990

991
992
993
994
995
  /* Are we are the level where we create the stars' resort tasks?
   * If the tree is shallow, we need to do this at the super-level if the
   * super-level is above the level we want */
  if ((c->nodeID == e->nodeID) && (star_resort_cell == NULL) &&
      (c->depth == engine_star_resort_task_depth || c->hydro.super == c)) {
996
997

    if (with_star_formation && c->hydro.count > 0) {
998

999
1000
      /* Record this is the level where we re-sort */
      star_resort_cell = c;
For faster browsing, not all history is shown. View entire blame