task.c 26.4 KB
Newer Older
1
/*******************************************************************************
2
 * This file is part of SWIFT.
3
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
4
5
6
7
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
 *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
 *               2016 John A. Regan (john.a.regan@durham.ac.uk)
 *                    Tom Theuns (tom.theuns@durham.ac.uk)
8
 *
9
10
11
12
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
13
 *
14
15
16
17
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
18
 *
19
20
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21
 *
22
23
24
25
26
27
28
29
30
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <float.h>
#include <limits.h>
#include <sched.h>
31
32
33
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
34

35
36
/* MPI headers. */
#ifdef WITH_MPI
37
#include <mpi.h>
38
39
#endif

40
41
42
/* This object's header. */
#include "task.h"

43
/* Local headers. */
Pedro Gonnet's avatar
Pedro Gonnet committed
44
#include "atomic.h"
45
#include "engine.h"
46
#include "error.h"
47
#include "inline.h"
48
#include "lock.h"
49
50

/* Task type names. */
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
const char *taskID_names[task_type_count] = {"none",
                                             "sort",
                                             "self",
                                             "pair",
                                             "sub_self",
                                             "sub_pair",
                                             "init_grav",
                                             "init_grav_out",
                                             "ghost_in",
                                             "ghost",
                                             "ghost_out",
                                             "extra_ghost",
                                             "drift_part",
                                             "drift_gpart",
                                             "drift_gpart_out",
                                             "end_force",
                                             "kick1",
                                             "kick2",
                                             "timestep",
                                             "send",
                                             "recv",
                                             "grav_long_range",
                                             "grav_mm",
                                             "grav_down_in",
                                             "grav_down",
                                             "grav_mesh",
                                             "cooling",
                                             "star_formation",
                                             "sourceterms",
                                             "logger",
                                             "stars_ghost_in",
                                             "stars_ghost",
Loic Hausammann's avatar
Loic Hausammann committed
83
84
                                             "stars_ghost_out",
                                             "stars_sort"};
85

86
/* Sub-task type names. */
87
const char *subtaskID_names[task_subtype_count] = {
88
89
90
    "none",          "density", "gradient",     "force", "grav",
    "external_grav", "tend",    "xv",           "rho",   "gpart",
    "multipole",     "spart",   "stars_density"};
91

92
93
94
95
96
#ifdef WITH_MPI
/* MPI communicators for the subtypes. */
MPI_Comm subtaskMPI_comms[task_subtype_count];
#endif

97
98
/**
 * @brief Computes the overlap between the parts array of two given cells.
99
 *
Matthieu Schaller's avatar
Matthieu Schaller committed
100
101
102
 * @param TYPE is the type of parts (e.g. #part, #gpart, #spart)
 * @param ARRAY is the array of this specific type.
 * @param COUNT is the number of elements in the array.
103
 */
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#define TASK_CELL_OVERLAP(TYPE, ARRAY, COUNT)                               \
  __attribute__((always_inline))                                            \
      INLINE static size_t task_cell_overlap_##TYPE(                        \
          const struct cell *restrict ci, const struct cell *restrict cj) { \
                                                                            \
    if (ci == NULL || cj == NULL) return 0;                                 \
                                                                            \
    if (ci->ARRAY <= cj->ARRAY &&                                           \
        ci->ARRAY + ci->COUNT >= cj->ARRAY + cj->COUNT) {                   \
      return cj->COUNT;                                                     \
    } else if (cj->ARRAY <= ci->ARRAY &&                                    \
               cj->ARRAY + cj->COUNT >= ci->ARRAY + ci->COUNT) {            \
      return ci->COUNT;                                                     \
    }                                                                       \
                                                                            \
    return 0;                                                               \
  }
121

122
TASK_CELL_OVERLAP(part, hydro.parts, hydro.count);
123
124
TASK_CELL_OVERLAP(gpart, grav.parts, grav.count);
TASK_CELL_OVERLAP(spart, stars.parts, stars.count);
Loic Hausammann's avatar
Loic Hausammann committed
125

126
127
128
129
130
/**
 * @brief Returns the #task_actions for a given task.
 *
 * @param t The #task.
 */
131
132
__attribute__((always_inline)) INLINE static enum task_actions task_acts_on(
    const struct task *t) {
133
134
135
136
137
138
139

  switch (t->type) {

    case task_type_none:
      return task_action_none;
      break;

140
    case task_type_drift_part:
141
142
    case task_type_sort:
    case task_type_ghost:
143
    case task_type_extra_ghost:
Stefan Arridge's avatar
Stefan Arridge committed
144
    case task_type_cooling:
145
    case task_type_sourceterms:
146
147
148
      return task_action_part;
      break;

149
150
151
    case task_type_star_formation:
      return task_action_all;

152
    case task_type_stars_ghost:
Loic Hausammann's avatar
Loic Hausammann committed
153
    case task_type_stars_sort:
Loic Hausammann's avatar
Loic Hausammann committed
154
155
156
      return task_action_spart;
      break;

157
158
159
160
161
162
163
    case task_type_self:
    case task_type_pair:
    case task_type_sub_self:
    case task_type_sub_pair:
      switch (t->subtype) {

        case task_subtype_density:
164
        case task_subtype_gradient:
165
166
167
168
        case task_subtype_force:
          return task_action_part;
          break;

169
        case task_subtype_stars_density:
170
171
          return task_action_all;
          break;
172

173
        case task_subtype_grav:
174
        case task_subtype_external_grav:
175
176
177
178
179
180
181
182
183
184
          return task_action_gpart;
          break;

        default:
          error("Unknow task_action for task");
          return task_action_none;
          break;
      }
      break;

185
    case task_type_end_force:
186
187
    case task_type_kick1:
    case task_type_kick2:
Loikki's avatar
Loikki committed
188
    case task_type_logger:
189
    case task_type_timestep:
190
191
    case task_type_send:
    case task_type_recv:
192
      if (t->ci->hydro.count > 0 && t->ci->grav.count > 0)
193
        return task_action_all;
194
      else if (t->ci->hydro.count > 0)
195
        return task_action_part;
196
      else if (t->ci->grav.count > 0)
197
198
199
        return task_action_gpart;
      else
        error("Task without particles");
200
201
      break;

202
    case task_type_init_grav:
203
    case task_type_grav_mm:
204
    case task_type_grav_long_range:
205
206
207
      return task_action_multipole;
      break;

208
    case task_type_drift_gpart:
209
    case task_type_grav_down:
210
    case task_type_grav_mesh:
211
      return task_action_gpart;
212
      break;
213

214
    default:
215
      error("Unknown task_action for task");
216
217
218
      return task_action_none;
      break;
  }
219

220
  /* Silence compiler warnings */
221
222
  error("Unknown task_action for task");
  return task_action_none;
223
224
}

225
226
227
228
229
230
231
/**
 * @brief Compute the Jaccard similarity of the data used by two
 *        different tasks.
 *
 * @param ta The first #task.
 * @param tb The second #task.
 */
232
233
float task_overlap(const struct task *restrict ta,
                   const struct task *restrict tb) {
234
235
236
237
238
239

  if (ta == NULL || tb == NULL) return 0.f;

  const enum task_actions ta_act = task_acts_on(ta);
  const enum task_actions tb_act = task_acts_on(tb);

240
241
  /* First check if any of the two tasks are of a type that don't
     use cells. */
242
243
244
245
246
  if (ta_act == task_action_none || tb_act == task_action_none) return 0.f;

  const int ta_part = (ta_act == task_action_part || ta_act == task_action_all);
  const int ta_gpart =
      (ta_act == task_action_gpart || ta_act == task_action_all);
247
248
  const int ta_spart =
      (ta_act == task_action_spart || ta_act == task_action_all);
249
250
251
  const int tb_part = (tb_act == task_action_part || tb_act == task_action_all);
  const int tb_gpart =
      (tb_act == task_action_gpart || tb_act == task_action_all);
252
253
  const int tb_spart =
      (tb_act == task_action_spart || tb_act == task_action_all);
254
255
256
257
258
259

  /* In the case where both tasks act on parts */
  if (ta_part && tb_part) {

    /* Compute the union of the cell data. */
    size_t size_union = 0;
260
261
262
263
    if (ta->ci != NULL) size_union += ta->ci->hydro.count;
    if (ta->cj != NULL) size_union += ta->cj->hydro.count;
    if (tb->ci != NULL) size_union += tb->ci->hydro.count;
    if (tb->cj != NULL) size_union += tb->cj->hydro.count;
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278

    /* Compute the intersection of the cell data. */
    const size_t size_intersect = task_cell_overlap_part(ta->ci, tb->ci) +
                                  task_cell_overlap_part(ta->ci, tb->cj) +
                                  task_cell_overlap_part(ta->cj, tb->ci) +
                                  task_cell_overlap_part(ta->cj, tb->cj);

    return ((float)size_intersect) / (size_union - size_intersect);
  }

  /* In the case where both tasks act on gparts */
  else if (ta_gpart && tb_gpart) {

    /* Compute the union of the cell data. */
    size_t size_union = 0;
279
280
281
282
    if (ta->ci != NULL) size_union += ta->ci->grav.count;
    if (ta->cj != NULL) size_union += ta->cj->grav.count;
    if (tb->ci != NULL) size_union += tb->ci->grav.count;
    if (tb->cj != NULL) size_union += tb->cj->grav.count;
283
284
285
286
287
288
289
290
291

    /* Compute the intersection of the cell data. */
    const size_t size_intersect = task_cell_overlap_gpart(ta->ci, tb->ci) +
                                  task_cell_overlap_gpart(ta->ci, tb->cj) +
                                  task_cell_overlap_gpart(ta->cj, tb->ci) +
                                  task_cell_overlap_gpart(ta->cj, tb->cj);

    return ((float)size_intersect) / (size_union - size_intersect);
  }
292

Loic Hausammann's avatar
Loic Hausammann committed
293
294
295
296
297
  /* In the case where both tasks act on sparts */
  else if (ta_spart && tb_spart) {

    /* Compute the union of the cell data. */
    size_t size_union = 0;
298
299
300
301
    if (ta->ci != NULL) size_union += ta->ci->stars.count;
    if (ta->cj != NULL) size_union += ta->cj->stars.count;
    if (tb->ci != NULL) size_union += tb->ci->stars.count;
    if (tb->cj != NULL) size_union += tb->cj->stars.count;
Loic Hausammann's avatar
Loic Hausammann committed
302
303
304
305
306
307
308
309
310
311

    /* Compute the intersection of the cell data. */
    const size_t size_intersect = task_cell_overlap_spart(ta->ci, tb->ci) +
                                  task_cell_overlap_spart(ta->ci, tb->cj) +
                                  task_cell_overlap_spart(ta->cj, tb->ci) +
                                  task_cell_overlap_spart(ta->cj, tb->cj);

    return ((float)size_intersect) / (size_union - size_intersect);
  }

312
313
  /* Else, no overlap */
  return 0.f;
314
}
315

316
317
/**
 * @brief Unlock the cell held by this task.
318
 *
319
320
 * @param t The #task.
 */
321
322
void task_unlock(struct task *t) {

323
324
  const enum task_types type = t->type;
  const enum task_subtypes subtype = t->subtype;
325
326
  struct cell *ci = t->ci, *cj = t->cj;

327
  /* Act based on task type. */
328
329
  switch (type) {

330
    case task_type_end_force:
331
332
    case task_type_kick1:
    case task_type_kick2:
333
    case task_type_logger:
334
    case task_type_timestep:
335
336
337
      cell_unlocktree(ci);
      cell_gunlocktree(ci);
      break;
Matthieu Schaller's avatar
Matthieu Schaller committed
338

339
    case task_type_drift_part:
340
    case task_type_sort:
341
342
343
      cell_unlocktree(ci);
      break;

344
    case task_type_drift_gpart:
345
    case task_type_grav_mesh:
346
347
348
      cell_gunlocktree(ci);
      break;

Loic Hausammann's avatar
Loic Hausammann committed
349
350
351
352
    case task_type_stars_sort:
      cell_sunlocktree(ci);
      break;

353
    case task_type_self:
354
    case task_type_sub_self:
355
356
      if (subtype == task_subtype_grav) {
        cell_gunlocktree(ci);
357
        cell_munlocktree(ci);
Loic Hausammann's avatar
Loic Hausammann committed
358
359
      } else if (subtype == task_subtype_stars_density) {
        cell_sunlocktree(ci);
360
361
362
      } else {
        cell_unlocktree(ci);
      }
363
      break;
364

365
    case task_type_pair:
366
    case task_type_sub_pair:
367
368
369
      if (subtype == task_subtype_grav) {
        cell_gunlocktree(ci);
        cell_gunlocktree(cj);
370
371
        cell_munlocktree(ci);
        cell_munlocktree(cj);
Loic Hausammann's avatar
Loic Hausammann committed
372
373
374
      } else if (subtype == task_subtype_stars_density) {
        cell_sunlocktree(ci);
        cell_sunlocktree(cj);
375
376
377
378
379
380
      } else {
        cell_unlocktree(ci);
        cell_unlocktree(cj);
      }
      break;

381
    case task_type_grav_down:
382
      cell_gunlocktree(ci);
383
384
385
      cell_munlocktree(ci);
      break;

386
    case task_type_grav_long_range:
387
      cell_munlocktree(ci);
388
      break;
389

390
391
392
393
394
    case task_type_grav_mm:
      cell_munlocktree(ci);
      cell_munlocktree(cj);
      break;

395
396
397
398
    default:
      break;
  }
}
399
400
401
402
403
404

/**
 * @brief Try to lock the cells associated with this task.
 *
 * @param t the #task.
 */
405
406
int task_lock(struct task *t) {

407
408
  const enum task_types type = t->type;
  const enum task_subtypes subtype = t->subtype;
409
  struct cell *ci = t->ci, *cj = t->cj;
410
411
412
413
#ifdef WITH_MPI
  int res = 0, err = 0;
  MPI_Status stat;
#endif
414

415
  switch (type) {
416

417
418
419
    /* Communication task? */
    case task_type_recv:
    case task_type_send:
420
#ifdef WITH_MPI
421
422
423
424
425
      /* Check the status of the MPI request. */
      if ((err = MPI_Test(&t->req, &res, &stat)) != MPI_SUCCESS) {
        char buff[MPI_MAX_ERROR_STRING];
        int len;
        MPI_Error_string(err, buff, &len);
426
427
428
429
        error(
            "Failed to test request on send/recv task (type=%s/%s tag=%lld, "
            "%s).",
            taskID_names[t->type], subtaskID_names[t->subtype], t->flags, buff);
430
431
      }
      return res;
432
#else
433
      error("SWIFT was not compiled with MPI support.");
434
#endif
435
      break;
436

437
    case task_type_end_force:
438
439
    case task_type_kick1:
    case task_type_kick2:
Loikki's avatar
Loikki committed
440
    case task_type_logger:
441
    case task_type_timestep:
442
      if (ci->hydro.hold || ci->grav.phold) return 0;
443
444
      if (cell_locktree(ci) != 0) return 0;
      if (cell_glocktree(ci) != 0) {
Matthieu Schaller's avatar
Matthieu Schaller committed
445
446
        cell_unlocktree(ci);
        return 0;
447
448
449
      }
      break;

450
    case task_type_drift_part:
451
    case task_type_sort:
452
      if (ci->hydro.hold) return 0;
453
454
      if (cell_locktree(ci) != 0) return 0;
      break;
455

Loic Hausammann's avatar
Loic Hausammann committed
456
457
458
459
460
    case task_type_stars_sort:
      if (ci->stars.hold) return 0;
      if (cell_slocktree(ci) != 0) return 0;
      break;

461
    case task_type_drift_gpart:
462
    case task_type_grav_mesh:
463
      if (ci->grav.phold) return 0;
464
465
466
      if (cell_glocktree(ci) != 0) return 0;
      break;

467
    case task_type_self:
468
    case task_type_sub_self:
469
      if (subtype == task_subtype_grav) {
470
        /* Lock the gparts and the m-pole */
471
        if (ci->grav.phold || ci->grav.mhold) return 0;
472
473
474
475
476
477
        if (cell_glocktree(ci) != 0)
          return 0;
        else if (cell_mlocktree(ci) != 0) {
          cell_gunlocktree(ci);
          return 0;
        }
Loic Hausammann's avatar
Loic Hausammann committed
478
479
480
      } else if (subtype == task_subtype_stars_density) {
        if (ci->stars.hold) return 0;
        if (cell_slocktree(ci) != 0) return 0;
481
      } else {
Loic Hausammann's avatar
Loic Hausammann committed
482
        if (ci->hydro.hold) return 0;
483
484
485
        if (cell_locktree(ci) != 0) return 0;
      }
      break;
486

487
    case task_type_pair:
488
    case task_type_sub_pair:
489
      if (subtype == task_subtype_grav) {
490
        /* Lock the gparts and the m-pole in both cells */
491
        if (ci->grav.phold || cj->grav.phold) return 0;
492
493
494
495
        if (cell_glocktree(ci) != 0) return 0;
        if (cell_glocktree(cj) != 0) {
          cell_gunlocktree(ci);
          return 0;
496
497
498
499
500
501
502
503
504
        } else if (cell_mlocktree(ci) != 0) {
          cell_gunlocktree(ci);
          cell_gunlocktree(cj);
          return 0;
        } else if (cell_mlocktree(cj) != 0) {
          cell_gunlocktree(ci);
          cell_gunlocktree(cj);
          cell_munlocktree(ci);
          return 0;
505
        }
Loic Hausammann's avatar
Loic Hausammann committed
506
507
508
509
510
511
512
      } else if (subtype == task_subtype_stars_density) {
        if (ci->stars.hold || cj->stars.hold) return 0;
        if (cell_slocktree(ci) != 0) return 0;
        if (cell_slocktree(cj) != 0) {
          cell_sunlocktree(ci);
          return 0;
        }
513
      } else {
514
        /* Lock the parts in both cells */
515
        if (ci->hydro.hold || cj->hydro.hold) return 0;
516
517
518
519
520
521
522
        if (cell_locktree(ci) != 0) return 0;
        if (cell_locktree(cj) != 0) {
          cell_unlocktree(ci);
          return 0;
        }
      }
      break;
523

524
525
    case task_type_grav_down:
      /* Lock the gparts and the m-poles */
526
      if (ci->grav.phold || ci->grav.mhold) return 0;
527
528
529
530
531
532
533
534
      if (cell_glocktree(ci) != 0)
        return 0;
      else if (cell_mlocktree(ci) != 0) {
        cell_gunlocktree(ci);
        return 0;
      }
      break;

535
    case task_type_grav_long_range:
536
      /* Lock the m-poles */
537
      if (ci->grav.mhold) return 0;
538
      if (cell_mlocktree(ci) != 0) return 0;
Matthieu Schaller's avatar
Matthieu Schaller committed
539
540
      break;

541
542
    case task_type_grav_mm:
      /* Lock both m-poles */
543
      if (ci->grav.mhold || cj->grav.mhold) return 0;
544
545
546
547
548
549
      if (cell_mlocktree(ci) != 0) return 0;
      if (cell_mlocktree(cj) != 0) {
        cell_munlocktree(ci);
        return 0;
      }

550
551
    default:
      break;
552
553
554
555
556
  }

  /* If we made it this far, we've got a lock. */
  return 1;
}
557

558
559
560
561
562
563
564
565
566
567
568
/**
 * @brief Print basic information about a task.
 *
 * @param t The #task.
 */
void task_print(const struct task *t) {

  message("Type:'%s' sub_type:'%s' wait=%d nr_unlocks=%d skip=%d",
          taskID_names[t->type], subtaskID_names[t->subtype], t->wait,
          t->nr_unlock_tasks, t->skip);
}
569
570
571
572
573
574
575
576
577
578
579

#ifdef WITH_MPI
/**
 * @brief Create global communicators for each of the subtasks.
 */
void task_create_mpi_comms(void) {
  for (int i = 0; i < task_subtype_count; i++) {
    MPI_Comm_dup(MPI_COMM_WORLD, &subtaskMPI_comms[i]);
  }
}
#endif
580
581

/**
582
583
 * @brief dump all the tasks of all the known engines into a file for
 * postprocessing.
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
 *
 * Dumps the information to a file "thread_info-stepn.dat" where n is the
 * given step value, or "thread_info_MPI-stepn.dat", if we are running
 * under MPI. Note if running under MPIU all the ranks are dumped into this
 * one file, which has an additional field to identify the rank.
 *
 * @param e the #engine
 * @param step the current step.
 */
void task_dump_all(struct engine *e, int step) {

#ifdef SWIFT_DEBUG_TASKS

  /* Need this to convert ticks to seconds. */
  unsigned long long cpufreq = clocks_get_cpufreq();

#ifdef WITH_MPI
  /* Make sure output file is empty, only on one rank. */
  char dumpfile[35];
  snprintf(dumpfile, sizeof(dumpfile), "thread_info_MPI-step%d.dat", step);
  FILE *file_thread;
  if (engine_rank == 0) {
    file_thread = fopen(dumpfile, "w");
    fclose(file_thread);
  }
  MPI_Barrier(MPI_COMM_WORLD);

  for (int i = 0; i < e->nr_nodes; i++) {

    /* Rank 0 decides the index of the writing node, this happens
     * one-by-one. */
    int kk = i;
    MPI_Bcast(&kk, 1, MPI_INT, 0, MPI_COMM_WORLD);

    if (i == engine_rank) {

      /* Open file and position at end. */
      file_thread = fopen(dumpfile, "a");

      /* Add some information to help with the plots and conversion of ticks to
       * seconds. */
      fprintf(file_thread, " %03d 0 0 0 0 %lld %lld %lld %lld %lld 0 0 %lld\n",
              engine_rank, e->tic_step, e->toc_step, e->updates, e->g_updates,
              e->s_updates, cpufreq);
      int count = 0;
      for (int l = 0; l < e->sched.nr_tasks; l++) {
        if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0) {
          fprintf(
              file_thread, " %03i %i %i %i %i %lli %lli %i %i %i %i %lli %i\n",
              engine_rank, e->sched.tasks[l].rid, e->sched.tasks[l].type,
              e->sched.tasks[l].subtype, (e->sched.tasks[l].cj == NULL),
              e->sched.tasks[l].tic, e->sched.tasks[l].toc,
              (e->sched.tasks[l].ci != NULL) ? e->sched.tasks[l].ci->hydro.count
                                             : 0,
              (e->sched.tasks[l].cj != NULL) ? e->sched.tasks[l].cj->hydro.count
                                             : 0,
              (e->sched.tasks[l].ci != NULL) ? e->sched.tasks[l].ci->grav.count
                                             : 0,
              (e->sched.tasks[l].cj != NULL) ? e->sched.tasks[l].cj->grav.count
                                             : 0,
              e->sched.tasks[l].flags, e->sched.tasks[l].sid);
        }
        count++;
      }
      fclose(file_thread);
    }

    /* And we wait for all to synchronize. */
    MPI_Barrier(MPI_COMM_WORLD);
  }

#else
  /* Non-MPI, so just a single engine's worth of tasks to dump. */
  char dumpfile[32];
  snprintf(dumpfile, sizeof(dumpfile), "thread_info-step%d.dat", step);
  FILE *file_thread;
  file_thread = fopen(dumpfile, "w");

  /* Add some information to help with the plots and conversion of ticks to
   * seconds. */
  fprintf(file_thread, " %d %d %d %d %lld %lld %lld %lld %lld %d %lld\n", -2,
          -1, -1, 1, e->tic_step, e->toc_step, e->updates, e->g_updates,
          e->s_updates, 0, cpufreq);
  for (int l = 0; l < e->sched.nr_tasks; l++) {
    if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0) {
      fprintf(
          file_thread, " %i %i %i %i %lli %lli %i %i %i %i %i\n",
          e->sched.tasks[l].rid, e->sched.tasks[l].type,
          e->sched.tasks[l].subtype, (e->sched.tasks[l].cj == NULL),
          e->sched.tasks[l].tic, e->sched.tasks[l].toc,
          (e->sched.tasks[l].ci == NULL) ? 0
                                         : e->sched.tasks[l].ci->hydro.count,
          (e->sched.tasks[l].cj == NULL) ? 0
                                         : e->sched.tasks[l].cj->hydro.count,
          (e->sched.tasks[l].ci == NULL) ? 0 : e->sched.tasks[l].ci->grav.count,
          (e->sched.tasks[l].cj == NULL) ? 0 : e->sched.tasks[l].cj->grav.count,
          e->sched.tasks[l].sid);
    }
  }
  fclose(file_thread);
#endif  // WITH_MPI
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
#endif  // SWIFT_DEBUG_TASKS
}

/**
 * @brief Generate simple statistics about the times used by the tasks of
 *        all the engines and write these into two files, a human readable
 *        version and one intented for inclusion as the fixed costs for
 *        repartitioning.
 *
 * Dumps the human readable information to a file "thread_stats-stepn.dat"
 * where n is the given step value. When running under MPI all the tasks are
 * summed into this single file.
 *
 * The fixed costs file will be called "thread_stats-stepn.h".
 *
 * @param e the #engine
 * @param step the current step.
 */
void task_dump_stats(struct engine *e, int step) {

#ifdef SWIFT_DEBUG_TASKS
  char dumpfile[40];
  snprintf(dumpfile, 40, "thread_stats-step%d.dat", step);

  char costsfile[40];
  snprintf(costsfile, 40, "thread_stats-step%d.h", step);

  /* Need arrays for sum, min and max across all types and subtypes. */
  double sum[task_type_count][task_subtype_count];
  double min[task_type_count][task_subtype_count];
  double max[task_type_count][task_subtype_count];
  int count[task_type_count][task_subtype_count];

  for (int j = 0; j < task_type_count; j++) {
    for (int k = 0; k < task_subtype_count; k++) {
      sum[j][k] = 0.0;
      count[j][k] = 0;
      min[j][k] = DBL_MAX;
      max[j][k] = 0.0;
    }
  }

  double total[1] = {0.0};
  double minmin[1] = {DBL_MAX};
  for (int l = 0; l < e->sched.nr_tasks; l++) {
    int type = e->sched.tasks[l].type;

    /* Skip implicit tasks, tasks that didn't run and MPI send/recv as these
     * are not interesting (or meaningfully measured). */
    if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0 &&
        type != task_type_send && type != task_type_recv) {
      int subtype = e->sched.tasks[l].subtype;

      double dt = e->sched.tasks[l].toc - e->sched.tasks[l].tic;
      sum[type][subtype] += dt;
      count[type][subtype] += 1;
      if (dt < min[type][subtype]) {
        min[type][subtype] = dt;
      }
      if (dt > max[type][subtype]) {
        max[type][subtype] = dt;
      }
      total[0] += dt;
      if (dt < minmin[0]) minmin[0] = dt;
    }
  }

#ifdef WITH_MPI
  /* Get these from all ranks for output from rank 0. Could wrap these into a
   * single operation. */
  size_t size = task_type_count * task_subtype_count;
  int res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : sum), sum, size,
                       MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task sums");

  res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : count), count, size,
                   MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task counts");

  res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : min), min, size,
                   MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima");

  res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : max), max, size,
                   MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima");

  res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : total), total, 1,
                   MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task total time");

  res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : minmin), minmin, 1,
                   MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
  if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task global min");

  if (engine_rank == 0) {
#endif

    FILE *dfile = fopen(dumpfile, "w");
    FILE *cfile = fopen(costsfile, "w");

    fprintf(dfile, "# task ntasks min max sum mean percent fixed_cost\n");
    for (int j = 0; j < task_type_count; j++) {
      const char *taskID = taskID_names[j];
      for (int k = 0; k < task_subtype_count; k++) {
        if (sum[j][k] > 0.0) {
          double mean = sum[j][k] / (double)count[j][k];
          double perc = 100.0 * sum[j][k] / total[0];
          int fixed_cost = (int)mean / minmin[0];
          fprintf(dfile,
                  "%15s/%-10s %10d %14.2f %14.2f %14.2f %14.2f %14.2f %10d\n",
                  taskID, subtaskID_names[k], count[j][k], min[j][k], max[j][k],
                  sum[j][k], mean, perc, fixed_cost);
          fprintf(cfile, "repartition_costs[%d][%d] = %10d; /* %s/%s */\n", j,
                  k, fixed_cost, taskID, subtaskID_names[k]);
        }
      }
    }
    fclose(dfile);
    fclose(cfile);
#ifdef WITH_MPI
  }
#endif

#endif  // SWIFT_DEBUG_TASKS
810
}