Skip to content
Snippets Groups Projects
Commit 05795237 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Merge branch 'newTimeDefinition' of gitlab.cosma.dur.ac.uk:swift/swiftsim into newTimeDefinition

parents b3a684df 43719f43
Branches
Tags
2 merge requests!136Master,!79First version of the multiple time-stepping
...@@ -361,7 +361,8 @@ void engine_repartition(struct engine *e) { ...@@ -361,7 +361,8 @@ void engine_repartition(struct engine *e) {
/* Skip un-interesting tasks. */ /* Skip un-interesting tasks. */
if (t->type != task_type_self && t->type != task_type_pair && if (t->type != task_type_self && t->type != task_type_pair &&
t->type != task_type_sub && t->type != task_type_ghost && t->type != task_type_sub && t->type != task_type_ghost &&
t->type != task_type_kick1 && t->type != task_type_kick) t->type != task_type_drift && t->type != task_type_kick &&
t->type != task_type_init)
continue; continue;
/* Get the task weight. */ /* Get the task weight. */
...@@ -391,8 +392,8 @@ void engine_repartition(struct engine *e) { ...@@ -391,8 +392,8 @@ void engine_repartition(struct engine *e) {
cid = ci - cells; cid = ci - cells;
/* Different weights for different tasks. */ /* Different weights for different tasks. */
if (t->type == task_type_ghost || t->type == task_type_kick1 || if (t->type == task_type_ghost || t->type == task_type_drift ||
t->type == task_type_kick) { t->type == task_type_kick || t->type == task_type_drift) {
/* Particle updates add only to vertex weight. */ /* Particle updates add only to vertex weight. */
weights_v[cid] += w; weights_v[cid] += w;
...@@ -1800,16 +1801,16 @@ void engine_step(struct engine *e) { ...@@ -1800,16 +1801,16 @@ void engine_step(struct engine *e) {
/* Aggregate the data from the different nodes. */ /* Aggregate the data from the different nodes. */
#ifdef WITH_MPI #ifdef WITH_MPI
double in[3], out[3]; double in[3], out[3];
out[0] = dt_min; out[0] = t_end_min;
if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD) != if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD) !=
MPI_SUCCESS) MPI_SUCCESS)
error("Failed to aggregate dt_min."); error("Failed to aggregate dt_min.");
dt_min = in[0]; t_end_min = in[0];
out[0] = dt_max; out[0] = t_end_max;
if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD) != if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD) !=
MPI_SUCCESS) MPI_SUCCESS)
error("Failed to aggregate dt_max."); error("Failed to aggregate dt_max.");
dt_max = in[0]; t_end_max = in[0];
out[0] = count; out[0] = count;
out[1] = ekin; out[1] = ekin;
out[2] = epot; out[2] = epot;
...@@ -2052,9 +2053,11 @@ void engine_split(struct engine *e, int *grid) { ...@@ -2052,9 +2053,11 @@ void engine_split(struct engine *e, int *grid) {
* @param dt The initial time step to use. * @param dt The initial time step to use.
* @param nr_threads The number of threads to spawn. * @param nr_threads The number of threads to spawn.
* @param nr_queues The number of task queues to create. * @param nr_queues The number of task queues to create.
* @param nr_nodes The number of MPI ranks * @param nr_nodes The number of MPI ranks.
* @param nodeID The MPI rank of this node * @param nodeID The MPI rank of this node.
* @param policy The queueing policy to use. * @param policy The queueing policy to use.
* @param timeBegin Time at the begininning of the simulation.
* @param timeEnd Time at the end of the simulation.
*/ */
void engine_init(struct engine *e, struct space *s, float dt, int nr_threads, void engine_init(struct engine *e, struct space *s, float dt, int nr_threads,
......
...@@ -274,8 +274,8 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts, ...@@ -274,8 +274,8 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
COMPULSORY); COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id, readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id,
COMPULSORY); COMPULSORY);
readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */
OPTIONAL); /* OPTIONAL); */
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a, readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
OPTIONAL); OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho, readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
...@@ -582,8 +582,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us, ...@@ -582,8 +582,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us,
N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS); N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS); N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, N_total, /* writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, N_total, */
mpi_rank, offset, dt, us, UNIT_CONV_TIME); /* mpi_rank, offset, dt, us, UNIT_CONV_TIME); */
writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION); N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total, writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total,
......
...@@ -314,8 +314,8 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N, ...@@ -314,8 +314,8 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N,
u, COMPULSORY); u, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset,
id, COMPULSORY); id, COMPULSORY);
readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */
OPTIONAL); /* OPTIONAL); */
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a, readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
OPTIONAL); OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho, readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
...@@ -627,8 +627,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank, ...@@ -627,8 +627,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank,
us, UNIT_CONV_ENERGY_PER_UNIT_MASS); us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
prepareArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N_total, 1, prepareArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N_total, 1,
us, UNIT_CONV_NO_UNITS); us, UNIT_CONV_NO_UNITS);
prepareArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N_total, 1, us, /* prepareArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N_total, 1, us, */
UNIT_CONV_TIME); /* UNIT_CONV_TIME); */
prepareArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N_total, 3, prepareArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N_total, 3,
us, UNIT_CONV_ACCELERATION); us, UNIT_CONV_ACCELERATION);
prepareArray(h_grp, fileName, xmfFile, "Density", FLOAT, N_total, 1, us, prepareArray(h_grp, fileName, xmfFile, "Density", FLOAT, N_total, 1, us,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment