Commit 997e69f7 authored by James Willis's avatar James Willis
Browse files

Added debug statements and barriers.

parent 9a1068a2
......@@ -1161,7 +1161,7 @@ int main(int argc, char *argv[]) {
#else
snprintf(dumpfile, 40, "memuse_report-step%d.dat", 0);
#endif // WITH_MPI
memuse_log_dump(dumpfile);
//memuse_log_dump(dumpfile);
}
#endif
......@@ -1222,7 +1222,7 @@ int main(int argc, char *argv[]) {
#else
snprintf(dumpfile, 40, "memuse_report-step%d.dat", j + 1);
#endif // WITH_MPI
memuse_log_dump(dumpfile);
//memuse_log_dump(dumpfile);
}
#endif
......
......@@ -602,7 +602,7 @@ int main(int argc, char *argv[]) {
engine_fof(&e, /*dump_results=*/1, /*seed_black_holes=*/0);
/* Write output. */
engine_dump_snapshot(&e);
//engine_dump_snapshot(&e);
#ifdef WITH_MPI
MPI_Barrier(MPI_COMM_WORLD);
......
......@@ -2618,6 +2618,13 @@ void engine_rebuild(struct engine *e, int repartitioned,
engine_exchange_cells(e);
#endif
MPI_Barrier(MPI_COMM_WORLD);
message("engine_exchange_cells() finished.");
char dumpfile[40];
snprintf(dumpfile, 40, "memuse_report-rank%d-exchange_cells.dat", engine_rank);
memuse_log_dump(dumpfile);
#ifdef SWIFT_DEBUG_CHECKS
/* Let's check that what we received makes sense */
......
......@@ -2924,6 +2924,8 @@ void engine_maketasks(struct engine *e) {
threadpool_map(&e->threadpool, engine_make_hydroloop_tasks_mapper, NULL,
s->nr_cells, 1, 0, e);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Making hydro tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -2936,6 +2938,8 @@ void engine_maketasks(struct engine *e) {
s->nr_cells, 1, 0, e);
}
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Making gravity tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -2952,6 +2956,8 @@ void engine_maketasks(struct engine *e) {
/* Split the tasks. */
scheduler_splittasks(sched, /*fof_tasks=*/0);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Splitting tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -2968,6 +2974,9 @@ void engine_maketasks(struct engine *e) {
if (e->links != NULL) swift_free("links", e->links);
e->size_links = e->sched.nr_tasks * e->links_per_tasks;
MPI_Barrier(MPI_COMM_WORLD);
message("Freed old list of cell-task links.");
/* Make sure that we have space for more links than last time. */
if (e->size_links < e->nr_links * engine_rebuild_link_alloc_margin)
e->size_links = e->nr_links * engine_rebuild_link_alloc_margin;
......@@ -2977,6 +2986,9 @@ void engine_maketasks(struct engine *e) {
"links", sizeof(struct link) * e->size_links)) == NULL)
error("Failed to allocate cell-task links.");
e->nr_links = 0;
MPI_Barrier(MPI_COMM_WORLD);
message("Allocated new link list.");
tic2 = getticks();
......@@ -2986,6 +2998,8 @@ void engine_maketasks(struct engine *e) {
threadpool_map(&e->threadpool, engine_count_and_link_tasks_mapper,
sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Counting and linking tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -3002,6 +3016,8 @@ void engine_maketasks(struct engine *e) {
* pointers. */
threadpool_map(&e->threadpool, cell_set_super_mapper, cells, nr_cells,
sizeof(struct cell), 0, e);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Setting super-pointers took %.3f %s.",
......@@ -3011,6 +3027,9 @@ void engine_maketasks(struct engine *e) {
threadpool_map(&e->threadpool, engine_make_hierarchical_tasks_mapper, cells,
nr_cells, sizeof(struct cell), 0, e);
MPI_Barrier(MPI_COMM_WORLD);
message("Appended hierarchical tasks to each cell.");
tic2 = getticks();
/* Run through the tasks and make force tasks for each density task.
......@@ -3020,6 +3039,8 @@ void engine_maketasks(struct engine *e) {
threadpool_map(&e->threadpool, engine_make_extra_hydroloop_tasks_mapper,
sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Making extra hydroloop tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -3029,6 +3050,8 @@ void engine_maketasks(struct engine *e) {
/* Add the dependencies for the gravity stuff */
if (e->policy & (engine_policy_self_gravity | engine_policy_external_gravity))
engine_link_gravity_tasks(e);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Linking gravity tasks took %.3f %s.",
......@@ -3036,6 +3059,10 @@ void engine_maketasks(struct engine *e) {
tic2 = getticks();
char dumpfile[40];
snprintf(dumpfile, 40, "memuse_report-rank%d-linked-gravity-tasks.dat", engine_rank);
memuse_log_dump(dumpfile);
#ifdef WITH_MPI
/* Add the communication tasks if MPI is being used. */
if (e->policy & engine_policy_mpi) {
......@@ -3071,16 +3098,25 @@ void engine_maketasks(struct engine *e) {
/*chunk=*/0, e);
free(send_cell_type_pairs);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Creating send tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
char dumpfile2[40];
snprintf(dumpfile2, 40, "memuse_report-rank%d-after-send-tasks.dat", engine_rank);
memuse_log_dump(dumpfile2);
tic2 = getticks();
/* Exchange the cell tags. */
proxy_tags_exchange(e->proxies, e->nr_proxies, s);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Exchanging cell tags took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
......@@ -3111,6 +3147,8 @@ void engine_maketasks(struct engine *e) {
sizeof(struct cell_type_pair),
/*chunk=*/0, e);
free(recv_cell_type_pairs);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Creating recv tasks took %.3f %s.",
......@@ -3119,6 +3157,9 @@ void engine_maketasks(struct engine *e) {
/* Allocate memory for foreign particles */
engine_allocate_foreign_particles(e);
MPI_Barrier(MPI_COMM_WORLD);
message("Allocated memory for foreign particles.");
#endif
......@@ -3141,6 +3182,8 @@ void engine_maketasks(struct engine *e) {
/* Set the unlocks per task. */
scheduler_set_unlocks(sched);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Setting unlocks took %.3f %s.",
......@@ -3150,6 +3193,8 @@ void engine_maketasks(struct engine *e) {
/* Rank the tasks. */
scheduler_ranktasks(sched);
MPI_Barrier(MPI_COMM_WORLD);
if (e->verbose)
message("Ranking the tasks took %.3f %s.",
......@@ -3157,6 +3202,8 @@ void engine_maketasks(struct engine *e) {
/* Weight the tasks. */
scheduler_reweight(sched, e->verbose);
MPI_Barrier(MPI_COMM_WORLD);
/* Set the tasks age. */
e->tasks_age = 0;
......
......@@ -21,15 +21,15 @@
__attribute__((always_inline)) INLINE static void gravity_debug_particle(
const struct gpart* p) {
printf(
"mass=%.3e time_bin=%d\n"
"x=[%.5e,%.5e,%.5e], v_full=[%.5e,%.5e,%.5e], a=[%.5e,%.5e,%.5e]\n",
p->mass, p->time_bin, p->x[0], p->x[1], p->x[2], p->v_full[0],
p->v_full[1], p->v_full[2], p->a_grav[0], p->a_grav[1], p->a_grav[2]);
#ifdef SWIFT_DEBUG_CHECKS
printf("num_interacted=%lld ti_drift=%lld ti_kick=%lld\n", p->num_interacted,
p->ti_drift, p->ti_kick);
#endif
// printf(
// "mass=%.3e time_bin=%d\n"
// "x=[%.5e,%.5e,%.5e], v_full=[%.5e,%.5e,%.5e], a=[%.5e,%.5e,%.5e]\n",
// p->mass, p->time_bin, p->x[0], p->x[1], p->x[2], p->v_full[0],
// p->v_full[1], p->v_full[2], p->a_grav[0], p->a_grav[1], p->a_grav[2]);
//#ifdef SWIFT_DEBUG_CHECKS
// printf("num_interacted=%lld ti_drift=%lld ti_kick=%lld\n", p->num_interacted,
// p->ti_drift, p->ti_kick);
//#endif
}
#endif /* SWIFT_DEFAULT_GRAVITY_DEBUG_H */
......@@ -26,15 +26,12 @@ struct gpart {
which this gpart is linked. */
long long id_or_neg_offset;
/* Particle group ID and size in the FOF. */
size_t group_id, group_size;
/*! Particle position. */
double x[3];
/*! Particle velocity. */
float v_full[3];
/*! Particle acceleration. */
float a_grav[3];
/*! Particle mass. */
float mass;
......@@ -44,8 +41,11 @@ struct gpart {
/*! Type of the #gpart (DM, gas, star, ...) */
enum part_type type;
/* Particle group ID and size in the FOF. */
size_t group_id, group_size;
/*! Particle velocity. */
char v_full[3];
/*! Particle acceleration. */
char a_grav[3];
#ifdef SWIFT_DEBUG_CHECKS
......
......@@ -66,6 +66,9 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
ticks tic2 = getticks();
message("Getting size of outgoing tags...");
MPI_Barrier(MPI_COMM_WORLD);
/* Run through the cells and get the size of the tags that will be sent off.
*/
int count_out = 0;
......@@ -77,6 +80,9 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
}
}
message("Getting size of incoming tags...");
MPI_Barrier(MPI_COMM_WORLD);
/* Run through the proxies and get the count of incoming tags. */
int count_in = 0;
int offset_in[s->nr_cells];
......@@ -87,6 +93,10 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
}
}
message("Allocating %d tags_in and %d tags_out.", count_in, count_out);
MPI_Barrier(MPI_COMM_WORLD);
/* Allocate the tags. */
int *tags_in = NULL;
int *tags_out = NULL;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment