diff --git a/src/engine.c b/src/engine.c
index 4ac457614b288ec943124a4ab3c08c227c981476..bc08795fab7db6dd327a8499bee936131761e9b9 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -361,7 +361,8 @@ void engine_repartition(struct engine *e) {
     /* Skip un-interesting tasks. */
     if (t->type != task_type_self && t->type != task_type_pair &&
         t->type != task_type_sub && t->type != task_type_ghost &&
-        t->type != task_type_kick1 && t->type != task_type_kick)
+        t->type != task_type_drift && t->type != task_type_kick &&
+	t->type != task_type_init)
       continue;
 
     /* Get the task weight. */
@@ -391,8 +392,8 @@ void engine_repartition(struct engine *e) {
     cid = ci - cells;
 
     /* Different weights for different tasks. */
-    if (t->type == task_type_ghost || t->type == task_type_kick1 ||
-        t->type == task_type_kick) {
+    if (t->type == task_type_ghost || t->type == task_type_drift ||
+        t->type == task_type_kick || t->type == task_type_drift) {
 
       /* Particle updates add only to vertex weight. */
       weights_v[cid] += w;
@@ -1800,16 +1801,16 @@ void engine_step(struct engine *e) {
 /* Aggregate the data from the different nodes. */
 #ifdef WITH_MPI
   double in[3], out[3];
-  out[0] = dt_min;
+  out[0] = t_end_min;
   if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD) !=
       MPI_SUCCESS)
     error("Failed to aggregate dt_min.");
-  dt_min = in[0];
-  out[0] = dt_max;
+  t_end_min = in[0];
+  out[0] = t_end_max;
   if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD) !=
       MPI_SUCCESS)
     error("Failed to aggregate dt_max.");
-  dt_max = in[0];
+  t_end_max = in[0];
   out[0] = count;
   out[1] = ekin;
   out[2] = epot;
@@ -2052,9 +2053,11 @@ void engine_split(struct engine *e, int *grid) {
  * @param dt The initial time step to use.
  * @param nr_threads The number of threads to spawn.
  * @param nr_queues The number of task queues to create.
- * @param nr_nodes The number of MPI ranks
- * @param nodeID The MPI rank of this node
+ * @param nr_nodes The number of MPI ranks.
+ * @param nodeID The MPI rank of this node.
  * @param policy The queueing policy to use.
+ * @param timeBegin Time at the begininning of the simulation.
+ * @param timeEnd Time at the end of the simulation.
  */
 
 void engine_init(struct engine *e, struct space *s, float dt, int nr_threads,
diff --git a/src/parallel_io.c b/src/parallel_io.c
index 8ffd59a591f4f035d912156be08c33ef90b5d8ee..63c9c26b08e497bc809304f8c46942babb4336a9 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -274,8 +274,8 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
             COMPULSORY);
   readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id,
             COMPULSORY);
-  readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt,
-            OPTIONAL);
+  /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */
+  /*           OPTIONAL); */
   readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
             OPTIONAL);
   readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
@@ -582,8 +582,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us,
              N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
   writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
              N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
-  writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, N_total,
-             mpi_rank, offset, dt, us, UNIT_CONV_TIME);
+  /* writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, N_total, */
+  /*            mpi_rank, offset, dt, us, UNIT_CONV_TIME); */
   writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
              N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
   writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total,
diff --git a/src/serial_io.c b/src/serial_io.c
index e3c5fd6de5835bca33398b0c4e14e584c7ab804c..c98135d24055c1a69bf836172cd3b2518703bef7 100644
--- a/src/serial_io.c
+++ b/src/serial_io.c
@@ -314,8 +314,8 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N,
                 u, COMPULSORY);
       readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset,
                 id, COMPULSORY);
-      readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt,
-                OPTIONAL);
+      /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */
+      /*           OPTIONAL); */
       readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
                 OPTIONAL);
       readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
@@ -627,8 +627,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank,
                  us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
     prepareArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N_total, 1,
                  us, UNIT_CONV_NO_UNITS);
-    prepareArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N_total, 1, us,
-                 UNIT_CONV_TIME);
+    /* prepareArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N_total, 1, us, */
+    /*              UNIT_CONV_TIME); */
     prepareArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N_total, 3,
                  us, UNIT_CONV_ACCELERATION);
     prepareArray(h_grp, fileName, xmfFile, "Density", FLOAT, N_total, 1, us,