Skip to content
Snippets Groups Projects
Commit 2f9a746e authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Allocate fewer links. Base the estimate on the number of tasks created at this point.

parent 1c89e79c
No related branches found
No related tags found
1 merge request!712Better link size estimates
......@@ -68,6 +68,7 @@ Scheduler:
cell_extra_sparts: 400 # (Optional) Number of spare sparts per top-level allocated at rebuild time for on-the-fly creation.
max_top_level_cells: 12 # (Optional) Maximal number of top-level cells in any dimension. The number of top-level cells will be the cube of this (this is the default value).
tasks_per_cell: 0 # (Optional) The average number of tasks per cell. If not large enough the simulation will fail (means guess...).
links_per_tasks: 10 # (Optional) The average number of links per tasks (before splitting and communications). Defaults to 10.
mpi_message_limit: 4096 # (Optional) Maximum MPI task message size to send non-buffered, KB.
# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
......
......@@ -145,7 +145,9 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
/* Get the next free link. */
const size_t ind = atomic_inc(&e->nr_links);
if (ind >= e->size_links) {
error("Link table overflow.");
error(
"Link table overflow. Increase the value of "
"`Scheduler:links_per_tasks`.");
}
struct link *res = &e->links[ind];
......@@ -4661,6 +4663,10 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
else
maxtasks = engine_estimate_nr_tasks(e);
/* Estimated number of links per tasks */
e->links_per_tasks =
parser_get_opt_param_int(params, "Scheduler:links_per_tasks", 10);
/* Init the scheduler. */
scheduler_init(&e->sched, e->s, maxtasks, nr_queues,
(e->policy & scheduler_flag_steal), e->nodeID, &e->threadpool);
......
......@@ -99,6 +99,7 @@ enum engine_step_properties {
#define engine_parts_size_grow 1.05
#define engine_max_proxy_centre_frac 0.2
#define engine_redistribute_alloc_margin 1.2
#define engine_rebuild_link_alloc_margin 1.2
#define engine_default_energy_file_name "energy"
#define engine_default_timesteps_file_name "timesteps"
#define engine_max_parts_per_ghost 1000
......@@ -329,6 +330,11 @@ struct engine {
* of the various task arrays. */
size_t tasks_per_cell;
/* Average number of links per tasks. This number is used before
the splitting and creation of communications so needs to be large
enough. */
size_t links_per_tasks;
/* Are we talkative ? */
int verbose;
......
......@@ -2070,30 +2070,11 @@ void engine_maketasks(struct engine *e) {
/* Free the old list of cell-task links. */
if (e->links != NULL) free(e->links);
e->size_links = 0;
e->size_links = e->sched.nr_tasks * e->links_per_tasks;
/* The maximum number of links is the
* number of cells (s->tot_cells) times the number of neighbours (26) times
* the number of interaction types, so 26 * 2 (density, force) pairs
* and 2 (density, force) self.
*/
#ifdef EXTRA_HYDRO_LOOP
const size_t hydro_tasks_per_cell = 27 * 3;
#else
const size_t hydro_tasks_per_cell = 27 * 2;
#endif
const size_t self_grav_tasks_per_cell = 125;
const size_t ext_grav_tasks_per_cell = 1;
const size_t stars_tasks_per_cell = 27;
if (e->policy & engine_policy_hydro)
e->size_links += s->tot_cells * hydro_tasks_per_cell;
if (e->policy & engine_policy_external_gravity)
e->size_links += s->tot_cells * ext_grav_tasks_per_cell;
if (e->policy & engine_policy_self_gravity)
e->size_links += s->tot_cells * self_grav_tasks_per_cell;
if (e->policy & engine_policy_stars)
e->size_links += s->tot_cells * stars_tasks_per_cell;
/* Make sure that we have space for more links than last time. */
if (e->size_links < e->nr_links * engine_rebuild_link_alloc_margin)
e->size_links = e->nr_links * engine_rebuild_link_alloc_margin;
/* Allocate the new link list */
if ((e->links = (struct link *)malloc(sizeof(struct link) * e->size_links)) ==
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment