Commit 2f9ebf13 authored by Stefan Arridge's avatar Stefan Arridge
Browse files

Added external potential energy calculation to statistics.c

parents 83b901b1 4128aff0
......@@ -120,6 +120,10 @@ int main(int argc, char *argv[]) {
error("MPI_Comm_size failed with error %i.", res);
if ((res = MPI_Comm_rank(MPI_COMM_WORLD, &myrank)) != MPI_SUCCESS)
error("Call to MPI_Comm_rank failed with error %i.", res);
/* Make sure messages are stamped with the correct rank. */
engine_rank = myrank;
if ((res = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)) !=
MPI_SUCCESS)
error("Call to MPI_Comm_set_errhandler failed with error %i.", res);
......@@ -131,6 +135,7 @@ int main(int argc, char *argv[]) {
message("WARNING: you should use the non-MPI version of this program.");
}
fflush(stdout);
#endif
/* Let's pin the main thread */
......@@ -572,8 +577,8 @@ int main(int argc, char *argv[]) {
fprintf(file_thread, " %03i 0 0 0 0 %lli %lli 0 0 0 0 %lli\n", myrank,
e.tic_step, e.toc_step, cpufreq);
int count = 0;
for (int l = 0; l < e.sched.nr_tasks; l++)
if (!e.sched.tasks[l].skip && !e.sched.tasks[l].implicit) {
for (int l = 0; l < e.sched.nr_tasks; l++) {
if (!e.sched.tasks[l].implicit && e.sched.tasks[l].toc != 0) {
fprintf(
file_thread, " %03i %i %i %i %i %lli %lli %i %i %i %i %i\n",
myrank, e.sched.tasks[l].rid, e.sched.tasks[l].type,
......@@ -588,11 +593,10 @@ int main(int argc, char *argv[]) {
(e.sched.tasks[l].cj != NULL) ? e.sched.tasks[l].cj->gcount
: 0,
e.sched.tasks[l].flags);
fflush(stdout);
count++;
}
message("rank %d counted %d tasks", myrank, count);
fflush(stdout);
count++;
}
fclose(file_thread);
}
......@@ -608,8 +612,8 @@ int main(int argc, char *argv[]) {
/* Add some information to help with the plots */
fprintf(file_thread, " %i %i %i %i %lli %lli %i %i %i %lli\n", -2, -1, -1,
1, e.tic_step, e.toc_step, 0, 0, 0, cpufreq);
for (int l = 0; l < e.sched.nr_tasks; l++)
if (!e.sched.tasks[l].skip && !e.sched.tasks[l].implicit)
for (int l = 0; l < e.sched.nr_tasks; l++) {
if (!e.sched.tasks[l].implicit && e.sched.tasks[l].toc != 0) {
fprintf(
file_thread, " %i %i %i %i %lli %lli %i %i %i %i\n",
e.sched.tasks[l].rid, e.sched.tasks[l].type,
......@@ -619,6 +623,8 @@ int main(int argc, char *argv[]) {
(e.sched.tasks[l].cj == NULL) ? 0 : e.sched.tasks[l].cj->count,
(e.sched.tasks[l].ci == NULL) ? 0 : e.sched.tasks[l].ci->gcount,
(e.sched.tasks[l].cj == NULL) ? 0 : e.sched.tasks[l].cj->gcount);
}
}
fclose(file_thread);
#endif
}
......
......@@ -44,7 +44,7 @@ include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \
common_io.h single_io.h multipole.h map.h tools.h partition.h clocks.h parser.h \
physical_constants.h physical_constants_cgs.h potential.h version.h \
hydro_properties.h riemann.h threadpool.h cooling.h cooling_struct.h sourceterms.h \
sourceterms_struct.h
sourceterms_struct.h statistics.h
# Common source files
......@@ -53,7 +53,8 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
units.c common_io.c single_io.c multipole.c version.c map.c \
kernel_hydro.c tools.c part.c partition.c clocks.c parser.c \
physical_constants.c potential.c hydro_properties.c \
runner_doiact_fft.c threadpool.c cooling.c sourceterms.c
runner_doiact_fft.c threadpool.c cooling.c sourceterms.c \
statistics.c
# Include files for distribution, not installation.
nobase_noinst_HEADERS = align.h approx_math.h atomic.h cycle.h error.h inline.h kernel_hydro.h kernel_gravity.h \
......
......@@ -52,6 +52,7 @@
#include "gravity.h"
#include "hydro.h"
#include "hydro_properties.h"
#include "scheduler.h"
#include "space.h"
#include "timers.h"
......@@ -763,6 +764,9 @@ void cell_clean_links(struct cell *c, void *data) {
c->force = NULL;
c->nr_force = 0;
c->grav = NULL;
c->nr_grav = 0;
}
/**
......@@ -893,6 +897,120 @@ int cell_is_drift_needed(struct cell *c, int ti_current) {
return 0;
}
/**
* @brief Un-skips all the tasks associated with a given cell and checks
* if the space needs to be rebuilt.
*
* @param c the #cell.
* @param s the #scheduler.
*
* @return 1 If the space needs rebuilding. 0 otherwise.
*/
int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
/* Un-skip the density tasks involved with this cell. */
for (struct link *l = c->density; l != NULL; l = l->next) {
struct task *t = l->t;
const struct cell *ci = t->ci;
const struct cell *cj = t->cj;
scheduler_activate(s, t);
/* Set the correct sorting flags */
if (t->type == task_type_pair) {
if (!(ci->sorted & (1 << t->flags))) {
atomic_or(&ci->sorts->flags, (1 << t->flags));
scheduler_activate(s, ci->sorts);
}
if (!(cj->sorted & (1 << t->flags))) {
atomic_or(&cj->sorts->flags, (1 << t->flags));
scheduler_activate(s, cj->sorts);
}
}
/* Check whether there was too much particle motion */
if (t->type == task_type_pair || t->type == task_type_sub_pair) {
if (t->tight &&
(max(ci->h_max, cj->h_max) + ci->dx_max + cj->dx_max > cj->dmin ||
ci->dx_max > space_maxreldx * ci->h_max ||
cj->dx_max > space_maxreldx * cj->h_max))
return 1;
#ifdef WITH_MPI
/* Activate the send/recv flags. */
if (ci->nodeID != engine_rank) {
/* Activate the tasks to recv foreign cell ci's data. */
scheduler_activate(s, ci->recv_xv);
scheduler_activate(s, ci->recv_rho);
scheduler_activate(s, ci->recv_ti);
/* Look for the local cell cj's send tasks. */
struct link *l = NULL;
for (l = cj->send_xv; l != NULL && l->t->cj->nodeID != ci->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_xv task.");
scheduler_activate(s, l->t);
for (l = cj->send_rho; l != NULL && l->t->cj->nodeID != ci->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_rho task.");
scheduler_activate(s, l->t);
for (l = cj->send_ti; l != NULL && l->t->cj->nodeID != ci->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_ti task.");
scheduler_activate(s, l->t);
} else if (cj->nodeID != engine_rank) {
/* Activate the tasks to recv foreign cell cj's data. */
scheduler_activate(s, cj->recv_xv);
scheduler_activate(s, cj->recv_rho);
scheduler_activate(s, cj->recv_ti);
/* Look for the local cell ci's send tasks. */
struct link *l = NULL;
for (l = ci->send_xv; l != NULL && l->t->cj->nodeID != cj->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_xv task.");
scheduler_activate(s, l->t);
for (l = ci->send_rho; l != NULL && l->t->cj->nodeID != cj->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_rho task.");
scheduler_activate(s, l->t);
for (l = ci->send_ti; l != NULL && l->t->cj->nodeID != cj->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_ti task.");
scheduler_activate(s, l->t);
}
#endif
}
}
/* Unskip all the other task types. */
for (struct link *l = c->gradient; l != NULL; l = l->next)
scheduler_activate(s, l->t);
for (struct link *l = c->force; l != NULL; l = l->next)
scheduler_activate(s, l->t);
for (struct link *l = c->grav; l != NULL; l = l->next)
scheduler_activate(s, l->t);
if (c->extra_ghost != NULL) scheduler_activate(s, c->extra_ghost);
if (c->ghost != NULL) scheduler_activate(s, c->ghost);
if (c->init != NULL) scheduler_activate(s, c->init);
if (c->kick != NULL) scheduler_activate(s, c->kick);
if (c->cooling != NULL) scheduler_activate(s, c->cooling);
if (c->sourceterms != NULL) scheduler_activate(s, c->sourceterms);
return 0;
}
/**
* @brief Set the super-cell pointers for all cells in a hierarchy.
*
......
......@@ -39,6 +39,7 @@
/* Avoid cyclic inclusions */
struct space;
struct scheduler;
/* Max tag size set to 2^29 to take into account some MPI implementations
* that use 2^31 as the upper bound on MPI tags and the fact that
......@@ -158,9 +159,6 @@ struct cell {
/* Tasks for gravity tree. */
struct task *grav_up, *grav_down;
/* Task for external gravity */
struct task *grav_external;
/* Task for cooling */
struct task *cooling;
......@@ -179,12 +177,6 @@ struct cell {
/* ID of the previous owner, e.g. runner. */
int owner;
/* Momentum of particles in cell. */
double mom[3], ang_mom[3];
/* Mass, potential, internal and kinetic energy of particles in this cell. */
double mass, e_pot_self, e_pot_ext, e_pot, e_int, e_kin, e_rad, entropy;
/* Number of particles updated in this cell. */
int updated, g_updated;
......@@ -236,6 +228,7 @@ int cell_are_neighbours(const struct cell *restrict ci,
void cell_check_multipole(struct cell *c, void *data);
void cell_clean(struct cell *c);
int cell_is_drift_needed(struct cell *c, int ti_current);
int cell_unskip_tasks(struct cell *c, struct scheduler *s);
void cell_set_super(struct cell *c, struct cell *super);
#endif /* SWIFT_CELL_H */
......@@ -63,6 +63,7 @@
#include "runner.h"
#include "serial_io.h"
#include "single_io.h"
#include "statistics.h"
#include "timers.h"
#include "tools.h"
#include "units.h"
......@@ -1249,7 +1250,7 @@ void engine_make_external_gravity_tasks(struct engine *e) {
/* Is that neighbour local ? */
if (ci->nodeID != nodeID) continue;
/* If the cells is local build a self-interaction */
/* If the cell is local, build a self-interaction */
scheduler_addtask(sched, task_type_self, task_subtype_external_grav, 0, 0,
ci, NULL, 0);
}
......@@ -1330,59 +1331,101 @@ void engine_make_hydroloop_tasks(struct engine *e) {
/**
* @brief Counts the tasks associated with one cell and constructs the links
*
* For each hydrodynamic task, construct the links with the corresponding cell.
* Similarly, construct the dependencies for all the sorting tasks.
* For each hydrodynamic and gravity task, construct the links with
* the corresponding cell. Similarly, construct the dependencies for
* all the sorting tasks.
*
* @param e The #engine.
*/
void engine_count_and_link_tasks(struct engine *e) {
struct scheduler *sched = &e->sched;
for (int ind = 0; ind < sched->nr_tasks; ind++) {
struct scheduler *const sched = &e->sched;
const int nr_tasks = sched->nr_tasks;
struct task *t = &sched->tasks[ind];
for (int ind = 0; ind < nr_tasks; ind++) {
if (t->skip) continue;
struct task *const t = &sched->tasks[ind];
struct cell *const ci = t->ci;
struct cell *const cj = t->cj;
/* Link sort tasks together. */
if (t->type == task_type_sort && t->ci->split)
if (t->type == task_type_sort && ci->split)
for (int j = 0; j < 8; j++)
if (t->ci->progeny[j] != NULL && t->ci->progeny[j]->sorts != NULL) {
t->ci->progeny[j]->sorts->skip = 0;
scheduler_addunlock(sched, t->ci->progeny[j]->sorts, t);
if (ci->progeny[j] != NULL && ci->progeny[j]->sorts != NULL) {
scheduler_addunlock(sched, ci->progeny[j]->sorts, t);
}
/* Link density tasks to cells. */
/* Link self tasks to cells. */
if (t->type == task_type_self) {
atomic_inc(&t->ci->nr_tasks);
atomic_inc(&ci->nr_tasks);
if (t->subtype == task_subtype_density) {
engine_addlink(e, &t->ci->density, t);
atomic_inc(&t->ci->nr_density);
engine_addlink(e, &ci->density, t);
atomic_inc(&ci->nr_density);
}
if (t->subtype == task_subtype_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
}
if (t->subtype == task_subtype_external_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
}
/* Link pair tasks to cells. */
} else if (t->type == task_type_pair) {
atomic_inc(&t->ci->nr_tasks);
atomic_inc(&t->cj->nr_tasks);
atomic_inc(&ci->nr_tasks);
atomic_inc(&cj->nr_tasks);
if (t->subtype == task_subtype_density) {
engine_addlink(e, &t->ci->density, t);
atomic_inc(&t->ci->nr_density);
engine_addlink(e, &t->cj->density, t);
atomic_inc(&t->cj->nr_density);
engine_addlink(e, &ci->density, t);
atomic_inc(&ci->nr_density);
engine_addlink(e, &cj->density, t);
atomic_inc(&cj->nr_density);
}
if (t->subtype == task_subtype_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
engine_addlink(e, &cj->grav, t);
atomic_inc(&cj->nr_grav);
}
/* Link sub-self tasks to cells. */
} else if (t->type == task_type_sub_self) {
atomic_inc(&t->ci->nr_tasks);
atomic_inc(&ci->nr_tasks);
if (t->subtype == task_subtype_density) {
engine_addlink(e, &t->ci->density, t);
atomic_inc(&t->ci->nr_density);
engine_addlink(e, &ci->density, t);
atomic_inc(&ci->nr_density);
}
if (t->subtype == task_subtype_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
}
if (t->subtype == task_subtype_external_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
}
/* Link sub-pair tasks to cells. */
} else if (t->type == task_type_sub_pair) {
atomic_inc(&t->ci->nr_tasks);
atomic_inc(&t->cj->nr_tasks);
atomic_inc(&ci->nr_tasks);
atomic_inc(&cj->nr_tasks);
if (t->subtype == task_subtype_density) {
engine_addlink(e, &t->ci->density, t);
atomic_inc(&t->ci->nr_density);
engine_addlink(e, &t->cj->density, t);
atomic_inc(&t->cj->nr_density);
engine_addlink(e, &ci->density, t);
atomic_inc(&ci->nr_density);
engine_addlink(e, &cj->density, t);
atomic_inc(&cj->nr_density);
}
if (t->subtype == task_subtype_grav) {
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
engine_addlink(e, &cj->grav, t);
atomic_inc(&cj->nr_grav);
}
if (t->subtype == task_subtype_external_grav) {
error("Found a sub-pair/external-gravity task...");
engine_addlink(e, &ci->grav, t);
atomic_inc(&ci->nr_grav);
engine_addlink(e, &cj->grav, t);
atomic_inc(&cj->nr_grav);
}
}
}
......@@ -1449,9 +1492,6 @@ void engine_link_gravity_tasks(struct engine *e) {
/* Get a pointer to the task. */
struct task *t = &sched->tasks[k];
/* Skip? */
if (t->skip) continue;
/* Multipole construction */
if (t->type == task_type_grav_up) {
scheduler_addunlock(sched, t, gather);
......@@ -1599,9 +1639,6 @@ void engine_make_extra_hydroloop_tasks(struct engine *e) {
for (int ind = 0; ind < nr_tasks; ind++) {
struct task *t = &sched->tasks[ind];
/* Skip? */
if (t->skip) continue;
/* Self-interaction? */
if (t->type == task_type_self && t->subtype == task_subtype_density) {
......@@ -1874,14 +1911,16 @@ void engine_maketasks(struct engine *e) {
/* Split the tasks. */
scheduler_splittasks(sched);
/* Allocate the list of cell-task links. The maximum number of links
is the number of cells (s->tot_cells) times the number of neighbours (27)
times the number of interaction types (2, density and force). */
/* Allocate the list of cell-task links. The maximum number of links is the
* number of cells (s->tot_cells) times the number of neighbours (26) times
* the number of interaction types, so 26 * 3 (density, force, grav) pairs
* and 4 (density, force, grav, ext_grav) self.
*/
if (e->links != NULL) free(e->links);
#ifdef EXTRA_HYDRO_LOOP
e->size_links = s->tot_cells * 27 * 3;
e->size_links = s->tot_cells * (26 * 4 + 4);
#else
e->size_links = s->tot_cells * 27 * 2;
e->size_links = s->tot_cells * (26 * 3 + 4);
#endif
if ((e->links = malloc(sizeof(struct link) * e->size_links)) == NULL)
error("Failed to allocate cell-task links.");
......@@ -1956,7 +1995,7 @@ void engine_maketasks(struct engine *e) {
}
/**
* @brief Mark tasks to be skipped and set the sort flags accordingly.
* @brief Mark tasks to be un-skipped and set the sort flags accordingly.
* Threadpool mapper function for fixdt version.
*
* @param map_data pointer to the tasks
......@@ -1967,11 +2006,15 @@ void engine_marktasks_fixdt_mapper(void *map_data, int num_elements,
void *extra_data) {
/* Unpack the arguments. */
struct task *tasks = (struct task *)map_data;
int *rebuild_space = (int *)extra_data;
size_t *rebuild_space = &((size_t *)extra_data)[0];
struct scheduler *s = (struct scheduler *)(((size_t *)extra_data)[1]);
for (int ind = 0; ind < num_elements; ind++) {
struct task *t = &tasks[ind];
/* All tasks are unskipped (we skip by default). */
scheduler_activate(s, t);
/* Pair? */
if (t->type == task_type_pair || t->type == task_type_sub_pair) {
......@@ -1998,28 +2041,7 @@ void engine_marktasks_fixdt_mapper(void *map_data, int num_elements,
}
/**
* @brief Mark any sort tasks as initially skipped.
* Threadpool mapper function.
*
* @param map_data pointer to the tasks
* @param num_elements number of tasks
* @param extra_data unused
*/
void engine_marktasks_sorts_mapper(void *map_data, int num_elements,
void *extra_data) {
/* Unpack the arguments. */
struct task *tasks = (struct task *)map_data;
for (int ind = 0; ind < num_elements; ind++) {
struct task *t = &tasks[ind];
if (t->type == task_type_sort) {
t->flags = 0;
t->skip = 1;
}
}
}
/**
* @brief Mark tasks to be skipped and set the sort flags accordingly.
* @brief Mark tasks to be un-skipped and set the sort flags accordingly.
* Threadpool mapper function.
*
* @param map_data pointer to the tasks
......@@ -2030,18 +2052,20 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
void *extra_data) {
/* Unpack the arguments. */
struct task *tasks = (struct task *)map_data;
const int ti_end = ((int *)extra_data)[0];
int *rebuild_space = &((int *)extra_data)[1];
const int ti_end = ((size_t *)extra_data)[0];
size_t *rebuild_space = &((size_t *)extra_data)[1];
struct scheduler *s = (struct scheduler *)(((size_t *)extra_data)[2]);
for (int ind = 0; ind < num_elements; ind++) {
struct task *t = &tasks[ind];
/* Single-cell task? */
if (t->type == task_type_self || t->type == task_type_ghost ||
t->type == task_type_sub_self) {
t->type == task_type_extra_ghost || t->type == task_type_cooling ||
t->type == task_type_sourceterms || t->type == task_type_sub_self) {
/* Set this task's skip. */
t->skip = (t->ci->ti_end_min > ti_end);
if (t->ci->ti_end_min <= ti_end) scheduler_activate(s, t);
}
/* Pair? */
......@@ -2058,19 +2082,24 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
cj->dx_max > space_maxreldx * cj->h_max))
*rebuild_space = 1;
/* Set this task's skip. */
if ((t->skip = (ci->ti_end_min > ti_end && cj->ti_end_min > ti_end)) == 1)
/* Set this task's skip, otherwise nothing to do. */
if (ci->ti_end_min <= ti_end || cj->ti_end_min <= ti_end)
scheduler_activate(s, t);
else
continue;
/* If this is not a density task, we don't have to do any of the below. */
if (t->subtype != task_subtype_density) continue;
/* Set the sort flags. */
if (t->type == task_type_pair && t->subtype != task_subtype_grav) {
if (t->type == task_type_pair) {
if (!(ci->sorted & (1 << t->flags))) {
atomic_or(&ci->sorts->flags, (1 << t->flags));
ci->sorts->skip = 0;
scheduler_activate(s, ci->sorts);
}
if (!(cj->sorted & (1 << t->flags))) {
atomic_or(&cj->sorts->flags, (1 << t->flags));
cj->sorts->skip = 0;
scheduler_activate(s, cj->sorts);
}
}
......@@ -2080,9 +2109,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if (ci->nodeID != engine_rank) {
/* Activate the tasks to recv foreign cell ci's data. */
ci->recv_xv->skip = 0;
ci->recv_rho->skip = 0;
ci->recv_ti->skip = 0;
scheduler_activate(s, ci->recv_xv);
scheduler_activate(s, ci->recv_rho);
scheduler_activate(s, ci->recv_ti);
/* Look for the local cell cj's send tasks. */
struct link *l = NULL;
......@@ -2090,45 +2119,46 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
l = l->next)
;
if (l == NULL) error("Missing link to send_xv task.");
l->t->skip = 0;
scheduler_activate(s, l->t);
for (l = cj->send_rho; l != NULL && l->t->cj->nodeID != ci->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_rho task.");
l->t->skip = 0;
scheduler_activate(s, l->t);
for (l = cj->send_ti; l != NULL && l->t->cj->nodeID != ci->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_ti task.");
l->t->skip = 0;
scheduler_activate(s, l->t);
} else if (cj->nodeID != engine_rank) {
/* Activate the tasks to recv foreign cell cj's data. */
cj->recv_xv->skip = 0;
cj->recv_rho->skip = 0;
cj->recv_ti->skip = 0;
scheduler_activate(s, cj->recv_xv);
scheduler_activate(s, cj->recv_rho);
scheduler_activate(s, cj->recv_ti);
/* Look for the local cell ci's send tasks. */
struct link *l = NULL;
for (l = ci->send_xv; l != NULL && l->t->cj->nodeID != cj->nodeID;
l = l->next)
;
if (l == NULL) error("Missing link to send_xv task.");