engine_maketasks.c 103 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
/*******************************************************************************
 * This file is part of SWIFT.
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *                    Angus Lepper (angus.lepper@ed.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <stdlib.h>
#include <unistd.h>

/* MPI headers. */
#ifdef WITH_MPI
#include <mpi.h>
#endif

/* Load the profiler header, if needed. */
#ifdef WITH_PROFILER
#include <gperftools/profiler.h>
#endif

/* This object's header. */
#include "engine.h"

/* Local headers. */
#include "atomic.h"
#include "cell.h"
#include "clocks.h"
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "proxy.h"
#include "timers.h"

/**
 * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_grav The send_grav #task, if it has already been created.
 */
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
                                  struct cell *cj, struct task *t_grav) {

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_grav == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
83
84
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
                                 ci->mpi.tag, 0, ci, cj);

      /* The sends should unlock the down pass. */
      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);

      /* Drift before you send */
      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
    }

    /* Add them to the local cell. */
    engine_addlink(e, &ci->mpi.grav.send, t_grav);
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
 * @param t_rho The send_rho #task, if it has already been created.
 * @param t_gradient The send_gradient #task, if already created.
 */
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
                                struct task *t_rho, struct task *t_gradient) {

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
  for (l = ci->hydro.density; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_xv == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
142
143
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
144
145
146
147
148
149
150
151
152
153
154
155

      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
                               0, ci, cj);
      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
                                ci->mpi.tag, 0, ci, cj);
#ifdef EXTRA_HYDRO_LOOP
      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
                                     ci->mpi.tag, 0, ci, cj);
#endif

#ifdef EXTRA_HYDRO_LOOP

156
      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);

      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
       * task. */
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#else
      /* The send_rho task should unlock the super_hydro-cell's kick task. */
172
      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205

      /* The send_rho task depends on the cell's ghost task. */
      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);

      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);

#endif

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
    }

    /* Add them to the local cell. */
    engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv);
    engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho);
#ifdef EXTRA_HYDRO_LOOP
    engine_addlink(e, &ci->mpi.hydro.send_gradient, t_gradient);
#endif
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
                                   t_gradient);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

206
207
208
209
210
211
212
/**
 * @brief Add send tasks for the stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_xv The send_xv #task, if it has already been created.
213
 * @param t_feedback The send_feed #task, if it has already been created.
214
215
216
 */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
                                struct cell *cj, struct task *t_xv,
217
                                struct task *t_feedback) {
218
219

#ifdef WITH_MPI
220

221
222
223
224
225
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the density tasks are for the target node. */
226
  for (l = ci->stars.density; l != NULL; l = l->next)
227
    if (l->t->ci->nodeID == nodeID ||
228
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
229
230
231
232
      break;

  /* If so, attach send tasks. */
  if (l != NULL) {
233

234
    if (t_feedback == NULL) {
235

236
237
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
238

239
      /* Create the tasks and their dependencies? */
240
      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
241
242
                                     ci->mpi.tag, 0, ci, cj);

243
      /* The send_stars task should unlock the super_cell's kick task. */
244
      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
245

246
      /* Ghost before you send */
247
      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
248
249
250

      /* Drift before you send */
      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
251
252
    }

253
    engine_addlink(e, &ci->mpi.stars.send, t_feedback);
254
255
256
257
258
259
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
260
        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_xv, t_feedback);
261
262
263
264
265
266

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

267
268
269
270
271
272
273
/**
 * @brief Add send tasks for the time-step to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param ci The sending #cell.
 * @param cj Dummy cell containing the nodeID of the receiving node.
 * @param t_ti The send_ti #task, if it has already been created.
274
275
 * @param t_limiter The send_limiter #task, if already created.
 * @param with_limiter Are we running with the time-step limiter?
276
277
 */
void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
278
279
280
                                   struct cell *cj, struct task *t_ti,
                                   struct task *t_limiter,
                                   const int with_limiter) {
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299

#ifdef WITH_MPI
  struct link *l = NULL;
  struct scheduler *s = &e->sched;
  const int nodeID = cj->nodeID;

  /* Check if any of the gravity tasks are for the target node. */
  for (l = ci->grav.grav; l != NULL; l = l->next)
    if (l->t->ci->nodeID == nodeID ||
        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
      break;

  /* Check whether instead any of the hydro tasks are for the target node. */
  if (l == NULL)
    for (l = ci->hydro.density; l != NULL; l = l->next)
      if (l->t->ci->nodeID == nodeID ||
          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
        break;

300
301
302
303
304
305
  if (l == NULL)
    for (l = ci->stars.density; l != NULL; l = l->next)
      if (l->t->ci->nodeID == nodeID ||
          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
        break;

306
307
308
309
310
311
  /* If found anything, attach send tasks. */
  if (l != NULL) {

    /* Create the tasks and their dependencies? */
    if (t_ti == NULL) {

Matthieu Schaller's avatar
Matthieu Schaller committed
312
313
      /* Make sure this cell is tagged. */
      cell_ensure_tagged(ci);
314
315
316
317

      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend,
                               ci->mpi.tag, 0, ci, cj);

318
319
320
321
      if (with_limiter)
        t_limiter = scheduler_addtask(s, task_type_send, task_subtype_limiter,
                                      ci->mpi.tag, 0, ci, cj);

322
323
      /* The super-cell's timestep task should unlock the send_ti task. */
      scheduler_addunlock(s, ci->super->timestep, t_ti);
324
325
326
327
328
329
      if (with_limiter) scheduler_addunlock(s, t_limiter, ci->super->timestep);
      if (with_limiter)
        scheduler_addunlock(s, t_limiter, ci->super->timestep_limiter);
      if (with_limiter) scheduler_addunlock(s, ci->super->kick2, t_limiter);
      if (with_limiter)
        scheduler_addunlock(s, ci->super->timestep_limiter, t_ti);
330
331
332
333
    }

    /* Add them to the local cell. */
    engine_addlink(e, &ci->mpi.send_ti, t_ti);
334
    if (with_limiter) engine_addlink(e, &ci->mpi.limiter.send, t_limiter);
335
336
337
338
339
340
  }

  /* Recurse? */
  if (ci->split)
    for (int k = 0; k < 8; k++)
      if (ci->progeny[k] != NULL)
341
342
        engine_addtasks_send_timestep(e, ci->progeny[k], cj, t_ti, t_limiter,
                                      with_limiter);
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_xv The recv_xv #task, if it has already been created.
 * @param t_rho The recv_rho #task, if it has already been created.
 * @param t_gradient The recv_gradient #task, if it has already been created.
 */
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
                                struct task *t_xv, struct task *t_rho,
                                struct task *t_gradient) {

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any hydro tasks ? */
  if (t_xv == NULL && c->hydro.density != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
                             c, NULL);
    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
                              0, c, NULL);
#ifdef EXTRA_HYDRO_LOOP
    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
                                   c->mpi.tag, 0, c, NULL);
#endif
  }

  c->mpi.hydro.recv_xv = t_xv;
  c->mpi.hydro.recv_rho = t_rho;
  c->mpi.hydro.recv_gradient = t_gradient;

  /* Add dependencies. */
  if (c->hydro.sorts != NULL) scheduler_addunlock(s, t_xv, c->hydro.sorts);

  for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
    scheduler_addunlock(s, t_xv, l->t);
    scheduler_addunlock(s, l->t, t_rho);
  }
#ifdef EXTRA_HYDRO_LOOP
  for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
    scheduler_addunlock(s, t_rho, l->t);
    scheduler_addunlock(s, l->t, t_gradient);
  }
400
  for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
401
    scheduler_addunlock(s, t_gradient, l->t);
402
  }
403
#else
404
  for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
405
    scheduler_addunlock(s, t_rho, l->t);
406
  }
407
408
#endif

409
  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
410
    scheduler_addunlock(s, t_xv, l->t);
411
412
    scheduler_addunlock(s, t_rho, l->t);
  }
413

414
415
416
417
418
419
420
421
422
423
424
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

425
426
427
428
429
/**
 * @brief Add recv tasks for stars pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
Matthieu Schaller's avatar
Matthieu Schaller committed
430
 * @param t_feedback The recv_feed #task, if it has already been created.
431
432
 */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
Matthieu Schaller's avatar
Matthieu Schaller committed
433
                                struct task *t_feedback) {
434
435
436
437

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

438
439
  /* Have we reached a level where there are any stars tasks ? */
  if (t_feedback == NULL && c->stars.density != NULL) {
440
441
442
443
444
445
446

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
447
    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
448
                                   c->mpi.tag, 0, c, NULL);
449
450
  }

451
  c->mpi.stars.recv = t_feedback;
452

453
#ifdef SWIFT_DEBUG_CHECKS
454
  if (c->nodeID == e->nodeID) error("Local cell!");
455
#endif
456
457
458
  if (c->stars.sorts != NULL)
    scheduler_addunlock(s, t_feedback, c->stars.sorts);

459
  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
460
    scheduler_addunlock(s, l->t, t_feedback);
461
  }
462

463
  for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
464
    scheduler_addunlock(s, t_feedback, l->t);
465
466
  }

467
468
469
470
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
Matthieu Schaller's avatar
Matthieu Schaller committed
471
        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback);
472
473
474
475
476
477

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_grav The recv_gpart #task, if it has already been created.
 */
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
                                  struct task *t_grav) {

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any gravity tasks ? */
  if (t_grav == NULL && c->grav.grav != NULL) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    /* Create the tasks. */
    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
                               c->mpi.tag, 0, c, NULL);
  }

  c->mpi.grav.recv = t_grav;

  for (struct link *l = c->grav.grav; l != NULL; l = l->next)
    scheduler_addunlock(s, t_grav, l->t);

  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
 *
 * @param e The #engine.
 * @param c The foreign #cell.
 * @param t_ti The recv_ti #task, if already been created.
526
527
 * @param t_limiter The recv_limiter #task, if already created.
 * @param with_limiter Are we running with the time-step limiter?
528
529
 */
void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
530
531
                                   struct task *t_ti, struct task *t_limiter,
                                   const int with_limiter) {
532
533
534
535
536
537
538
539
540
541
542
543
544
545

#ifdef WITH_MPI
  struct scheduler *s = &e->sched;

  /* Have we reached a level where there are any self/pair tasks ? */
  if (t_ti == NULL && (c->grav.grav != NULL || c->hydro.density != NULL)) {

#ifdef SWIFT_DEBUG_CHECKS
    /* Make sure this cell has a valid tag. */
    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
#endif  // SWIFT_DEBUG_CHECKS

    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend, c->mpi.tag,
                             0, c, NULL);
546
547
548
549

    if (with_limiter)
      t_limiter = scheduler_addtask(s, task_type_recv, task_subtype_limiter,
                                    c->mpi.tag, 0, c, NULL);
550
551
552
553
  }

  c->mpi.recv_ti = t_ti;

554
  for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
555
    scheduler_addunlock(s, l->t, t_ti);
556
  }
557

558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
  if (with_limiter) {

    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_limiter);
    }

    for (struct link *l = c->hydro.limiter; l != NULL; l = l->next) {
      scheduler_addunlock(s, t_limiter, l->t);
      scheduler_addunlock(s, l->t, t_ti);
    }

  } else {

    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
      scheduler_addunlock(s, l->t, t_ti);
    }
  }
575

576
577
578
  for (struct link *l = c->stars.feedback; l != NULL; l = l->next)
    scheduler_addunlock(s, l->t, t_ti);

579
580
581
582
  /* Recurse? */
  if (c->split)
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
583
584
        engine_addtasks_recv_timestep(e, c->progeny[k], t_ti, t_limiter,
                                      with_limiter);
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- timestep version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
606
  const int with_limiter = (e->policy & engine_policy_limiter);
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629

  /* Are we in a super-cell ? */
  if (c->super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the two half kicks */
      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
                                   c, NULL);

#if defined(WITH_LOGGER)
      c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
                                    0, c, NULL);
#endif

      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
                                   c, NULL);

      /* Add the time-step calculation task and its dependency */
      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
                                      0, 0, c, NULL);

630
      scheduler_addunlock(s, c->kick2, c->timestep);
631
632
      scheduler_addunlock(s, c->timestep, c->kick1);

633
634
635
636
637
638
639
640
641
642
      /* Time-step limiting */
      if (with_limiter) {
        c->timestep_limiter = scheduler_addtask(
            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);

        /* Make sure it is not run before kick2 */
        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
      }

643
644
645
646
#if defined(WITH_LOGGER)
      scheduler_addunlock(s, c->kick1, c->logger);
#endif
    }
647
  } else { /* We are above the super-cell so need to go deeper */
648

649
650
651
    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
652
653
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- gravity version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
  const int periodic = e->s->periodic;
  const int is_self_gravity = (e->policy & engine_policy_self_gravity);

  /* Are we in a super-cell ? */
  if (c->grav.super == c) {

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
                                        task_subtype_none, 0, 0, c, NULL);

684
685
686
687
688
      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
                                            task_subtype_none, 0, 0, c, NULL);

      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);

689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
      if (is_self_gravity) {

        /* Initialisation of the multipoles */
        c->grav.init = scheduler_addtask(s, task_type_init_grav,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Gravity non-neighbouring pm calculations */
        c->grav.long_range = scheduler_addtask(
            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);

        /* Gravity recursive down-pass */
        c->grav.down = scheduler_addtask(s, task_type_grav_down,
                                         task_subtype_none, 0, 0, c, NULL);

        /* Implicit tasks for the up and down passes */
704
705
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
                                              task_subtype_none, 0, 1, c, NULL);
706
707
708
709
710
711
712
713
714
715
716
717
718
719
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);
        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        /* Gravity mesh force propagation */
        if (periodic)
          c->grav.mesh = scheduler_addtask(s, task_type_grav_mesh,
                                           task_subtype_none, 0, 0, c, NULL);

        if (periodic) scheduler_addunlock(s, c->grav.drift, c->grav.mesh);
        if (periodic) scheduler_addunlock(s, c->grav.mesh, c->grav.down);
        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
720
        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
721
722
723

        /* Link in the implicit tasks */
        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
724
        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
725
726
727
728
729
730
        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
      }
    }
  }

  /* We are below the super-cell but not below the maximal splitting depth */
731
  else if ((c->grav.super != NULL) &&
732
           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
733
734
735
736
737
738

    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      if (is_self_gravity) {

739
        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
740
                                              task_subtype_none, 0, 1, c, NULL);
741

742
743
744
745
746
747
748
        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
                                             task_subtype_none, 0, 1, c, NULL);

        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
                                            task_subtype_none, 0, 1, c, NULL);

        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
749
        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
750
751
752
753
754
755
        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
      }
    }
  }

  /* Recurse but not below the maximal splitting depth */
756
  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
757
758
759
760
761
762
763
764
765
766
767
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
}

/**
 * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
 */
void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
                       struct task *ghost_out) {

768
769
770
  /* Abort as there are no hydro particles here? */
  if (c->hydro.count_total == 0) return;

771
  /* If we have reached the leaf OR have to few particles to play with*/
772
  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
773
774
775
776
777
778
779

    /* Add the ghost task and its dependencies */
    struct scheduler *s = &e->sched;
    c->hydro.ghost =
        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
780

781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
  } else {
    /* Keep recursing */
    for (int k = 0; k < 8; k++)
      if (c->progeny[k] != NULL)
        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
  }
}

/**
 * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
 * i.e. all the O(Npart) tasks -- hydro version
 *
 * Tasks are only created here. The dependencies will be added later on.
 *
 * Note that there is no need to recurse below the super-cell. Note also
 * that we only add tasks if the relevant particles are present in the cell.
 *
 * @param e The #engine.
 * @param c The #cell.
 */
void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c) {

  struct scheduler *s = &e->sched;
804
  const int with_stars = (e->policy & engine_policy_stars);
805
  const int with_feedback = (e->policy & engine_policy_feedback);
806
807
  const int with_cooling = (e->policy & engine_policy_cooling);
  const int with_star_formation = (e->policy & engine_policy_star_formation);
808

809
810
811
812
813
814
815
  /* Are we in a super-cell ? */
  if (c->hydro.super == c) {

    /* Add the sort task. */
    c->hydro.sorts =
        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);

816
    if (with_feedback) {
817
818
      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
                                         task_subtype_none, 0, 0, c, NULL);
819
    }
820

821
822
823
824
825
826
    /* Local tasks only... */
    if (c->nodeID == e->nodeID) {

      /* Add the drift task. */
      c->hydro.drift = scheduler_addtask(s, task_type_drift_part,
                                         task_subtype_none, 0, 0, c, NULL);
827
828
829
830

      /* Add the task finishing the force calculation */
      c->hydro.end_force = scheduler_addtask(s, task_type_end_hydro_force,
                                             task_subtype_none, 0, 0, c, NULL);
831

832
833
834
835
836
837
838
839
840
841
      /* Generate the ghost tasks. */
      c->hydro.ghost_in =
          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      c->hydro.ghost_out =
          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
                            /* implicit = */ 1, c, NULL);
      engine_add_ghosts(e, c, c->hydro.ghost_in, c->hydro.ghost_out);

      /* Generate the extra ghost task. */
842
#ifdef EXTRA_HYDRO_LOOP
843
844
845
846
      c->hydro.extra_ghost = scheduler_addtask(
          s, task_type_extra_ghost, task_subtype_none, 0, 0, c, NULL);
#endif

847
848
      /* Stars */
      if (with_stars) {
849
        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
850
                                           task_subtype_none, 0, 0, c, NULL);
851
        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
852
      }
853

854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
      /* Subgrid tasks: cooling */
      if (with_cooling) {

        c->hydro.cooling = scheduler_addtask(s, task_type_cooling,
                                             task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->hydro.end_force, c->hydro.cooling);
        scheduler_addunlock(s, c->hydro.cooling, c->super->kick2);

      } else {
        scheduler_addunlock(s, c->hydro.end_force, c->super->kick2);
      }

      /* Subgrid tasks: star formation */
      if (with_star_formation) {

        c->hydro.star_formation = scheduler_addtask(
            s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->super->kick2, c->hydro.star_formation);
        scheduler_addunlock(s, c->hydro.star_formation, c->super->timestep);
      }

      /* Subgrid tasks: feedback */
      if (with_feedback) {

        c->stars.stars_in =
            scheduler_addtask(s, task_type_stars_in, task_subtype_none, 0,
                              /* implicit = */ 1, c, NULL);

        c->stars.stars_out =
            scheduler_addtask(s, task_type_stars_out, task_subtype_none, 0,
                              /* implicit = */ 1, c, NULL);

        c->stars.ghost = scheduler_addtask(s, task_type_stars_ghost,
                                           task_subtype_none, 0, 0, c, NULL);

        scheduler_addunlock(s, c->super->kick2, c->stars.stars_in);
        scheduler_addunlock(s, c->stars.stars_out, c->super->timestep);

        if (with_star_formation)
          scheduler_addunlock(s, c->hydro.star_formation, c->stars.stars_in);
      }
    }
898
899
900
901
902
903
904
905
906
907
  } else { /* We are above the super-cell so need to go deeper */

    /* Recurse. */
    if (c->split)
      for (int k = 0; k < 8; k++)
        if (c->progeny[k] != NULL)
          engine_make_hierarchical_tasks_hydro(e, c->progeny[k]);
  }
}

Matthieu Schaller's avatar
Matthieu Schaller committed
908
909
910
911
912
913
914
915
void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
                                           void *extra_data) {
  struct engine *e = (struct engine *)extra_data;
  const int is_with_hydro = (e->policy & engine_policy_hydro);
  const int is_with_self_gravity = (e->policy & engine_policy_self_gravity);
  const int is_with_external_gravity =
      (e->policy & engine_policy_external_gravity);
  /* const int is_with_feedback = (e->policy & engine_policy_feedback); */
916

Matthieu Schaller's avatar
Matthieu Schaller committed
917
918
919
920
921
922
923
924
925
926
927
928
  for (int ind = 0; ind < num_elements; ind++) {
    struct cell *c = &((struct cell *)map_data)[ind];
    /* Make the common tasks (time integration) */
    engine_make_hierarchical_tasks_common(e, c);
    /* Add the hydro stuff */
    if (is_with_hydro) engine_make_hierarchical_tasks_hydro(e, c);
    /* And the gravity stuff */
    if (is_with_self_gravity || is_with_external_gravity)
      engine_make_hierarchical_tasks_gravity(e, c);
    /* if (is_with_feedback) engine_make_hierarchical_tasks_stars(e, c); */
  }
}
929
930
931
932
933
934
935
936
937
938
939
940

/**
 * @brief Constructs the top-level tasks for the short-range gravity
 * and long-range gravity interactions.
 *
 * - All top-cells get a self task.
 * - All pairs within range according to the multipole acceptance
 *   criterion get a pair task.
 */
void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements,
                                           void *extra_data) {

Matthieu Schaller's avatar
Matthieu Schaller committed
941
  struct engine *e = (struct engine *)extra_data;
942
943
944
945
946
947
948
949
950
  struct space *s = e->s;
  struct scheduler *sched = &e->sched;
  const int nodeID = e->nodeID;
  const int periodic = s->periodic;
  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
  struct cell *cells = s->cells_top;
  const double theta_crit = e->gravity_properties->theta_crit;
  const double max_distance = e->mesh->r_cut_max;
951
  const double max_distance2 = max_distance * max_distance;
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986

  /* Compute how many cells away we need to walk */
  const double distance = 2.5 * cells[0].width[0] / theta_crit;
  int delta = (int)(distance / cells[0].width[0]) + 1;
  int delta_m = delta;
  int delta_p = delta;

  /* Special case where every cell is in range of every other one */
  if (delta >= cdim[0] / 2) {
    if (cdim[0] % 2 == 0) {
      delta_m = cdim[0] / 2;
      delta_p = cdim[0] / 2 - 1;
    } else {
      delta_m = cdim[0] / 2;
      delta_p = cdim[0] / 2;
    }
  }

  /* Loop through the elements, which are just byte offsets from NULL. */
  for (int ind = 0; ind < num_elements; ind++) {

    /* Get the cell index. */
    const int cid = (size_t)(map_data) + ind;

    /* Integer indices of the cell in the top-level grid */
    const int i = cid / (cdim[1] * cdim[2]);
    const int j = (cid / cdim[2]) % cdim[1];
    const int k = cid % cdim[2];

    /* Get the cell */
    struct cell *ci = &cells[cid];

    /* Skip cells without gravity particles */
    if (ci->grav.count == 0) continue;

987
    /* If the cell is local build a self-interaction */
988
    if (ci->nodeID == nodeID) {
989
      scheduler_addtask(sched, task_type_self, task_subtype_grav, 0, 0, ci,
Matthieu Schaller's avatar
Matthieu Schaller committed
990
                        NULL);
991
    }
992
993

    /* Loop over every other cell within (Manhattan) range delta */
994
995
996
997
998
999
1000
    for (int ii = -delta_m; ii <= delta_p; ii++) {
      int iii = i + ii;
      if (!periodic && (iii < 0 || iii >= cdim[0])) continue;
      iii = (iii + cdim[0]) % cdim[0];
      for (int jj = -delta_m; jj <= delta_p; jj++) {
        int jjj = j + jj;
        if (!periodic && (jjj < 0 || jjj >= cdim[1])) continue;