diff --git a/examples/Logger/SimpleOrbits/README b/examples/Logger/SimpleOrbits/README
new file mode 100644
index 0000000000000000000000000000000000000000..70ab1370584c6f45f776f4ccf554b4f61ff88866
--- /dev/null
+++ b/examples/Logger/SimpleOrbits/README
@@ -0,0 +1,5 @@
+This example simulates a planet orbiting around the Sun and then compares
+the solutions given by the snapshots to those read from the logger.
+
+It requires compilation with the logger, python and the external point mass potential
+(./configure --with-python=/usr --enable-logger --with-ext-potential=point-mass).
\ No newline at end of file
diff --git a/examples/Logger/SimpleOrbits/plotSolution.py b/examples/Logger/SimpleOrbits/plotSolution.py
index b160e24964240b7a8a89fc03594aa4fb149b65bc..a2d216c035dc53e65a29c34b937c143b908f326f 100644
--- a/examples/Logger/SimpleOrbits/plotSolution.py
+++ b/examples/Logger/SimpleOrbits/plotSolution.py
@@ -143,75 +143,40 @@ def doLogger():
     """
     Read the logfile and plot the corresponding variables.
     """
-    basename = "index"
+    basename = "index_0000"
     N = 1000
-    verbose = 0
 
     # Get time limits
-    t_min, t_max = logger.getTimeLimits(basename, verbose)
-    times = np.linspace(t_min, t_max, N)
-
-    # Create output arrays
-    E = np.zeros((N, makeIC.num_part))
-    E_parts = np.zeros((N, makeIC.num_part))
-    p = np.zeros((N, 3))
-    v = np.zeros((N, 3))
-    t_parts = np.zeros((N, makeIC.num_part))
-    p_parts = np.zeros((N, 3))
-    v_parts = np.zeros((N, 3))
-
-    # Read the particles
-    parts = logger.loadSnapshotAtTime(
-        basename, times[0], verbose)
-
-    for i, t in enumerate(times):
-        # Get the next particles
-        interp = logger.moveForwardInTime(
-            basename, parts, t, verbose)
-        ids = interp["ids"]
-        sort = np.argsort(ids)
-        ids = ids[sort]
-        rel_pos = interp["positions"][sort, :] - center
-        vel = interp["velocities"][sort, :]
-
-        rel_pos_parts = parts["positions"][sort, :] - center
-        vel_parts = parts["velocities"][sort, :]
-
-        # Compute the interpolated variables
-        r = np.sum(rel_pos**2, axis=1)**0.5
-        v2 = np.sum(vel**2, axis=1)
-        E[i, :] = 0.5 * v2 - G * M / r
-        ind = ids == id_focus
-        p[i, :] = rel_pos[ind, :]
-        v[i, :] = vel[ind, :]
-
-        # Compute the variables of the last record
-        r = np.sum(rel_pos_parts**2, axis=1)**0.5
-        v2 = np.sum(vel_parts**2, axis=1)
-        E_parts[i, :] = 0.5 * v2 - G * M / r
-        t_parts[i, :] = parts["times"][sort]
-        ind = ids == id_focus
-        p_parts[i, :] = rel_pos_parts[ind, :]
-        v_parts[i, :] = vel_parts[ind, :]
+    with logger.Reader(basename, verbose=0) as reader:
+        t_min, t_max = reader.get_time_limits()
+        times = np.linspace(t_min, t_max, N)
+
+        # Create output arrays
+        E = np.zeros((N, makeIC.num_part))
+        p = np.zeros((N, 3))
+        v = np.zeros((N, 3))
+
+        for i, t in enumerate(times):
+            # Get the next particles
+            pos, vel, ids = reader.get_particle_data(
+                ["Coordinates", "Velocities", "ParticleIDs"], t)
+            sort = np.argsort(ids)
+            ids = ids[sort]
+            rel_pos = pos[sort, :] - center
+            vel = vel[sort, :]
+
+            # Compute the derived values
+            r = np.sum(rel_pos**2, axis=1)**0.5
+            v2 = np.sum(vel**2, axis=1)
+            E[i, :] = 0.5 * v2 - G * M / r
+            ind = ids == id_focus
+            p[i, :] = rel_pos[ind, :]
+            v[i, :] = vel[ind, :]
 
     # compute the plotting variables
     plt.figure(fig_1.number)
-    plotRelative(t_parts, E_parts, "x", label="Logger")
     plotRelative(times, E, "--", label="Logger (Interpolation)")
 
-    # Compute the solution
-    y0 = np.zeros(4)
-    y0[:2] = p[0, :2]
-    y0[2:] = v[0, :2]
-    t_parts, ind = np.unique(t_parts[:, 0], return_index=True)
-
-    # plot the solution
-    plt.figure(fig_2.number)
-    plt.plot(p_parts[:, 0], p_parts[:, 1], "x", label="Logger")
-
-    plt.figure(fig_3.number)
-    plt.plot(v_parts[:, 0], v_parts[:, 1], "x", label="Logger")
-
     # Compute the solution
     y0 = np.zeros(4)
     y0[:2] = p[0, :2]
diff --git a/examples/Logger/SimpleOrbits/run.sh b/examples/Logger/SimpleOrbits/run.sh
index e1742c357d445faf5fc92330c47300fe2dcff602..944a19c0a4791b72d8a106d770abd5aae6df3ad2 100644
--- a/examples/Logger/SimpleOrbits/run.sh
+++ b/examples/Logger/SimpleOrbits/run.sh
@@ -5,7 +5,7 @@ echo "Generating initial conditions for the Simple Orbits example..."
 python makeIC.py
 
 # Run SWIFT
-../../swift --external-gravity --threads=1 simple_orbits.yml 2>&1 | tee output.log
+../../swift --logger --external-gravity --threads=1 simple_orbits.yml 2>&1 | tee output.log
 
 # Plot the solution
 python3 plotSolution.py
diff --git a/logger/logger_tools.h b/logger/logger_tools.h
index 968fc41a81f27cc8f8a0aece897bf0fcc73fcc67..1b5a031019d2cd0dbdf05d46fc56d52615588ec7 100644
--- a/logger/logger_tools.h
+++ b/logger/logger_tools.h
@@ -79,7 +79,7 @@ float logger_tools_cubic_hermite_spline(double t0, float v0, float a0,
                                         double t);
 
 #ifndef HAVE_PYTHON
-#define error_python(...) error(##__VA_ARGS__);
+#define error_python(s, ...) error(s, ##__VA_ARGS__);
 #else
 /**
  * @brief Print the python trace back
diff --git a/src/Makefile.am b/src/Makefile.am
index b75d8cf5bde1779539002c9f398309472c9857bc..94365cdb785eb5a9b8b98735dc5aa321088ea350 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -61,6 +61,7 @@ include_HEADERS += velociraptor_struct.h velociraptor_io.h random.h memuse.h mpi
 include_HEADERS += black_holes.h black_holes_io.h black_holes_properties.h black_holes_struct.h 
 include_HEADERS += feedback.h feedback_struct.h feedback_properties.h 
 include_HEADERS += space_unique_id.h line_of_sight.h io_compression.h
+include_HEADERS += logger_history.h
 
 # source files for EAGLE cooling
 QLA_COOLING_SOURCES =
@@ -125,7 +126,7 @@ AM_SOURCES += gravity_properties.c gravity.c multipole.c
 AM_SOURCES += collectgroup.c hydro_space.c equation_of_state.c io_compression.c 
 AM_SOURCES += chemistry.c cosmology.c mesh_gravity.c velociraptor_interface.c 
 AM_SOURCES += output_list.c velociraptor_dummy.c logger_io.c memuse.c mpiuse.c memuse_rnodes.c fof.c 
-AM_SOURCES += hashmap.c pressure_floor.c 
+AM_SOURCES += hashmap.c pressure_floor.c logger_history.c
 AM_SOURCES += $(QLA_COOLING_SOURCES) 
 AM_SOURCES += $(EAGLE_COOLING_SOURCES) $(EAGLE_FEEDBACK_SOURCES) 
 AM_SOURCES += $(GRACKLE_COOLING_SOURCES) $(GEAR_FEEDBACK_SOURCES) 
diff --git a/src/cell_drift.c b/src/cell_drift.c
index 91292a8742924302b115931968b3ae4e86707b04..9b4c93d6ba3482dcfea0a7e97544b65ba5ba2f3b 100644
--- a/src/cell_drift.c
+++ b/src/cell_drift.c
@@ -182,9 +182,8 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
 #ifdef WITH_LOGGER
             if (e->policy & engine_policy_logger) {
               /* Log the particle one last time. */
-              logger_log_part(
-                  e->logger, p, xp, e, /* log_all */ 1,
-                  logger_pack_flags_and_data(logger_flag_delete, 0));
+              logger_log_part(e->logger, p, xp, e, /* log_all */ 1,
+                              logger_flag_delete, /* data */ 0);
             }
 #endif
 
@@ -363,9 +362,8 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
 #ifdef WITH_LOGGER
               if (e->policy & engine_policy_logger) {
                 /* Log the particle one last time. */
-                logger_log_gpart(
-                    e->logger, gp, e, /* log_all */ 1,
-                    logger_pack_flags_and_data(logger_flag_delete, 0));
+                logger_log_gpart(e->logger, gp, e, /* log_all */ 1,
+                                 logger_flag_delete, /* data */ 0);
               }
 #endif
 
@@ -514,9 +512,8 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
 #ifdef WITH_LOGGER
             if (e->policy & engine_policy_logger) {
               /* Log the particle one last time. */
-              logger_log_spart(
-                  e->logger, sp, e, /* log_all */ 1,
-                  logger_pack_flags_and_data(logger_flag_delete, 0));
+              logger_log_spart(e->logger, sp, e, /* log_all */ 1,
+                               logger_flag_delete, /* data */ 0);
             }
 #endif
 
diff --git a/src/dump.c b/src/dump.c
index 37b570e68bfcc9685491d45d50254d75d8ab8744..5d2c1ae69194f76e5ebd70e873f7064f0d6e5b33 100644
--- a/src/dump.c
+++ b/src/dump.c
@@ -173,8 +173,17 @@ void dump_restart(struct dump *d, const char *filename) {
     error("Failed to open dump file '%s' (%s).", filename, strerror(errno));
   }
 
+  /* Adjust the size to be at least the page size. */
+  const size_t page_mask = ~(sysconf(_SC_PAGE_SIZE) - 1);
+  size_t size = (d->size + ~page_mask) & page_mask;
+
+  /* Pre-allocate the file size. */
+  if (posix_fallocate(d->fd, 0, size) != 0) {
+    error("Failed to pre-allocate the dump file.");
+  }
+
   /* Map memory to the created file. */
-  if ((d->data = mmap(NULL, d->size, PROT_WRITE, MAP_SHARED, d->fd,
+  if ((d->data = mmap(NULL, size, PROT_WRITE, MAP_SHARED, d->fd,
                       d->file_offset)) == MAP_FAILED) {
     error("Failed to allocate map of size %zi bytes (%s).", d->size,
           strerror(errno));
diff --git a/src/engine.c b/src/engine.c
index 19e88a633c7482d7d702afab1dc2f10a88ec1ebb..5f1fcb012f44638887f43009d7b8bc6b8fff73ac 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -2924,6 +2924,7 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params,
 
   engine_init_output_lists(e, params);
 }
+
 /**
  * @brief Prints the current policy of an engine
  *
diff --git a/src/engine_config.c b/src/engine_config.c
index d769f99cf5a9a42f4af97006c80618d711fbbf55..f94900c46ed4fc58496b8a04974967cc7515dddb 100644
--- a/src/engine_config.c
+++ b/src/engine_config.c
@@ -496,7 +496,7 @@ void engine_config(int restart, int fof, struct engine *e,
     if ((e->policy & engine_policy_logger) && e->nodeID == 0)
       message(
           "WARNING: There is currently no way of predicting the output "
-          "size, please use it carefully");
+          "size, please use the logger carefully");
 #endif
 
     /* Find the time of the first snapshot output */
diff --git a/src/engine_io.c b/src/engine_io.c
index ba6dadc0b40b90c5c9443a879da7379eb2242162..0355c69c9d10ffc84a405e5d6ea36fdd0ae14c88 100644
--- a/src/engine_io.c
+++ b/src/engine_io.c
@@ -58,8 +58,16 @@ void engine_check_for_index_dump(struct engine *e) {
   const size_t index_file_size =
       total_nr_parts * sizeof(struct logger_part_data);
 
+  size_t number_part_history = 0;
+  for (int i = 0; i < swift_type_count; i++) {
+    number_part_history +=
+        log->history_new[i].size + log->history_removed[i].size;
+  }
+  const int history_too_large = number_part_history > log->maximal_size_history;
+
   /* Check if we should write a file */
-  if (mem_frac * (dump_size - old_dump_size) > index_file_size) {
+  if (mem_frac * (dump_size - old_dump_size) > index_file_size ||
+      history_too_large) {
     /* Write an index file */
     engine_dump_index(e);
 
diff --git a/src/engine_redistribute.c b/src/engine_redistribute.c
index ddd86886c16af16395d972b040e28386cb356ad0..0e0ea2ad186d27e81c37a2b15bcb6545d0498a6a 100644
--- a/src/engine_redistribute.c
+++ b/src/engine_redistribute.c
@@ -984,7 +984,8 @@ void engine_redistribute(struct engine *e) {
     nr_bparts_new += b_counts[k * nr_nodes + nodeID];
 
 #ifdef WITH_LOGGER
-  if (e->policy & engine_policy_logger) {
+  const int initial_redistribute = e->ti_current == 0;
+  if (!initial_redistribute && e->policy & engine_policy_logger) {
     /* Log the particles before sending them out */
     size_t part_offset = 0;
     size_t spart_offset = 0;
@@ -1002,19 +1003,19 @@ void engine_redistribute(struct engine *e) {
         bpart_offset += b_counts[c_ind];
         continue;
       }
-      const uint32_t flag = logger_pack_flags_and_data(logger_flag_mpi_exit, i);
 
       /* Log the hydro parts. */
       logger_log_parts(e->logger, &parts[part_offset], &xparts[part_offset],
-                       counts[c_ind], e, /* log_all_fields */ 1, flag);
+                       counts[c_ind], e, /* log_all_fields */ 1,
+                       logger_flag_mpi_exit, i);
 
       /* Log the stellar parts. */
       logger_log_sparts(e->logger, &sparts[spart_offset], s_counts[c_ind], e,
-                        /* log_all_fields */ 1, flag);
+                        /* log_all_fields */ 1, logger_flag_mpi_exit, i);
 
       /* Log the gparts */
       logger_log_gparts(e->logger, &gparts[gpart_offset], g_counts[c_ind], e,
-                        /* log_all_fields */ 1, flag);
+                        /* log_all_fields */ 1, logger_flag_mpi_exit, i);
 
       /* Log the bparts */
       if (b_counts[c_ind] > 0) {
@@ -1083,7 +1084,7 @@ void engine_redistribute(struct engine *e) {
      stuff we just received */
 
 #ifdef WITH_LOGGER
-  if (e->policy & engine_policy_logger) {
+  if (!initial_redistribute && e->policy & engine_policy_logger) {
     size_t part_offset = 0;
     size_t spart_offset = 0;
     size_t gpart_offset = 0;
@@ -1101,21 +1102,18 @@ void engine_redistribute(struct engine *e) {
         continue;
       }
 
-      const uint32_t flag =
-          logger_pack_flags_and_data(logger_flag_mpi_enter, i);
-
       /* Log the hydro parts. */
       logger_log_parts(e->logger, &s->parts[part_offset],
                        &s->xparts[part_offset], counts[c_ind], e,
-                       /* log_all_fields */ 1, flag);
+                       /* log_all_fields */ 1, logger_flag_mpi_enter, i);
 
       /* Log the stellar parts. */
       logger_log_sparts(e->logger, &s->sparts[spart_offset], s_counts[c_ind], e,
-                        /* log_all_fields */ 1, flag);
+                        /* log_all_fields */ 1, logger_flag_mpi_enter, i);
 
       /* Log the gparts */
       logger_log_gparts(e->logger, &s->gparts[gpart_offset], g_counts[c_ind], e,
-                        /* log_all_fields */ 1, flag);
+                        /* log_all_fields */ 1, logger_flag_mpi_enter, i);
 
       /* Log the bparts */
       if (b_counts[c_ind] > 0) {
diff --git a/src/engine_strays.c b/src/engine_strays.c
index c096b2d671ca01d9c0b73ebb57b751b6e48fa785..6330ee0bde28597cd137fb4f9d3d9872d7a1cf0e 100644
--- a/src/engine_strays.c
+++ b/src/engine_strays.c
@@ -120,10 +120,9 @@ void engine_exchange_strays(struct engine *e, const size_t offset_parts,
 #ifdef WITH_LOGGER
     if (e->policy & engine_policy_logger) {
       /* Log the particle when leaving a rank. */
-      logger_log_part(
-          e->logger, &s->parts[offset_parts + k], &s->xparts[offset_parts + k],
-          e, /* log_all_fields */ 1,
-          logger_pack_flags_and_data(logger_flag_mpi_exit, node_id));
+      logger_log_part(e->logger, &s->parts[offset_parts + k],
+                      &s->xparts[offset_parts + k], e, /* log_all_fields */ 1,
+                      logger_flag_mpi_exit, node_id);
     }
 #endif
   }
@@ -165,10 +164,8 @@ void engine_exchange_strays(struct engine *e, const size_t offset_parts,
 #ifdef WITH_LOGGER
     if (e->policy & engine_policy_logger) {
       /* Log the particle when leaving a rank. */
-      logger_log_spart(
-          e->logger, &s->sparts[offset_sparts + k], e,
-          /* log_all_fields */ 1,
-          logger_pack_flags_and_data(logger_flag_mpi_exit, node_id));
+      logger_log_spart(e->logger, &s->sparts[offset_sparts + k], e,
+                       /* log_all_fields */ 1, logger_flag_mpi_exit, node_id);
     }
 #endif
   }
@@ -248,10 +245,8 @@ void engine_exchange_strays(struct engine *e, const size_t offset_parts,
         s->gparts[offset_gparts + k].type == swift_type_dark_matter) {
 
       /* Log the particle when leaving a rank. */
-      logger_log_gpart(
-          e->logger, &s->gparts[offset_gparts + k], e,
-          /* log_all_fields */ 1,
-          logger_pack_flags_and_data(logger_flag_mpi_exit, node_id));
+      logger_log_gpart(e->logger, &s->gparts[offset_gparts + k], e,
+                       /* log_all_fields */ 1, logger_flag_mpi_exit, node_id);
     }
 #endif
   }
@@ -476,9 +471,6 @@ void engine_exchange_strays(struct engine *e, const size_t offset_parts,
 
 #ifdef WITH_LOGGER
       if (e->policy & engine_policy_logger) {
-        const uint32_t flag =
-            logger_pack_flags_and_data(logger_flag_mpi_enter, prox->nodeID);
-
         struct part *parts = &s->parts[offset_parts + count_parts];
         struct xpart *xparts = &s->xparts[offset_parts + count_parts];
         struct spart *sparts = &s->sparts[offset_sparts + count_sparts];
@@ -486,15 +478,18 @@ void engine_exchange_strays(struct engine *e, const size_t offset_parts,
 
         /* Log the gas particles */
         logger_log_parts(e->logger, parts, xparts, prox->nr_parts_in, e,
-                         /* log_all_fields */ 1, flag);
+                         /* log_all_fields */ 1, logger_flag_mpi_enter,
+                         prox->nodeID);
 
         /* Log the stellar particles */
         logger_log_sparts(e->logger, sparts, prox->nr_sparts_in, e,
-                          /* log_all_fields */ 1, flag);
+                          /* log_all_fields */ 1, logger_flag_mpi_enter,
+                          prox->nodeID);
 
         /* Log the gparts */
         logger_log_gparts(e->logger, gparts, prox->nr_gparts_in, e,
-                          /* log_all_fields */ 1, flag);
+                          /* log_all_fields */ 1, logger_flag_mpi_enter,
+                          prox->nodeID);
 
         /* Log the bparts */
         if (prox->nr_bparts_in > 0) {
diff --git a/src/gravity/MultiSoftening/gravity_part.h b/src/gravity/MultiSoftening/gravity_part.h
index 9183637c7940b63ec3576ecb8665feacb448f0d2..8cf8e241a5cb3b5427e6138b65738fdb7c180a2c 100644
--- a/src/gravity/MultiSoftening/gravity_part.h
+++ b/src/gravity/MultiSoftening/gravity_part.h
@@ -20,6 +20,7 @@
 #define SWIFT_MULTI_SOFTENING_GRAVITY_PART_H
 
 #include "fof_struct.h"
+#include "logger.h"
 
 /* Gravity particle. */
 struct gpart {
diff --git a/src/hydro/Gadget2/hydro_logger.h b/src/hydro/Gadget2/hydro_logger.h
index 6d70f32601cb6194e5d9be457e4fcbd873c315e4..e7a12a5b0b3defec623aca3101ef879e6fba5ba6 100644
--- a/src/hydro/Gadget2/hydro_logger.h
+++ b/src/hydro/Gadget2/hydro_logger.h
@@ -180,9 +180,9 @@ INLINE static char *hydro_logger_write_particle(
 
     /* Compute the acceleration due to hydro and gravity */
     float *acc = (float *)buff;
-    acc[0] = p->a_hydro[0] + xp->a_grav[0];
-    acc[1] = p->a_hydro[1] + xp->a_grav[1];
-    acc[2] = p->a_hydro[2] + xp->a_grav[2];
+    acc[0] = p->a_hydro[0] + p->gpart->a_grav[0];
+    acc[1] = p->a_hydro[1] + p->gpart->a_grav[1];
+    acc[2] = p->a_hydro[2] + p->gpart->a_grav[2];
 
     memcpy(buff, acc, 3 * sizeof(float));
     buff += 3 * sizeof(float);
diff --git a/src/hydro_logger.h b/src/hydro_logger.h
index fe4a3b75c80ffa0893f3f4d6fb65be11a3acc4be..503a3b4acab6dae349694009943088e4056945b0 100644
--- a/src/hydro_logger.h
+++ b/src/hydro_logger.h
@@ -28,7 +28,7 @@
 #include "part_type.h"
 #include "timeline.h"
 
-/* Import the right functloggerns */
+/* Import the right function */
 #if defined(MINIMAL_SPH)
 #error TODO
 #elif defined(GADGET2_SPH)
diff --git a/src/logger.c b/src/logger.c
index a5d88a96b929d0145c845deed49f59788f20837e..be462bcfe58bf78c3f571bb1c03648604789cbd6 100644
--- a/src/logger.c
+++ b/src/logger.c
@@ -1,6 +1,7 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Copyright (c) 2017 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
+ * Copyright (c) 2020 Loic Hausammann (loic.hausammann@epfl.ch)
+ *               2017 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published
@@ -140,15 +141,17 @@ void logger_log_all_particles(struct logger_writer *log,
 
   /* log the parts. */
   logger_log_parts(log, s->parts, s->xparts, s->nr_parts, e,
-                   /* log_all_fields */ 1, /* Special flags */ 0);
+                   /* log_all_fields= */ 1, /* flag= */ 0, /* flag_data= */ 0);
 
   /* log the gparts */
   logger_log_gparts(log, s->gparts, s->nr_gparts, e,
-                    /* log_all_fields */ 1, /* Special flags */ 0);
+                    /* log_all_fields= */ 1, /* flag= */ 0,
+                    /* flag_data= */ 0);
 
   /* log the parts */
   logger_log_sparts(log, s->sparts, s->nr_sparts, e,
-                    /* log_all_fields */ 1, /* Special flags */ 0);
+                    /* log_all_fields= */ 1, /* flag= */ 0,
+                    /* flag_data= */ 0);
 
   if (e->total_nr_bparts > 0) error("Not implemented");
 }
@@ -208,13 +211,17 @@ void logger_copy_part_fields(const struct logger_writer *log,
  * @param xp The #xpart to dump.
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
- * @param special_flags The value of the special flag.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_part(struct logger_writer *log, const struct part *p,
                      struct xpart *xp, const struct engine *e,
-                     const int log_all_fields, const uint32_t special_flags) {
+                     const int log_all_fields,
+                     const enum logger_special_flags flag,
+                     const int flag_data) {
 
-  logger_log_parts(log, p, xp, /* count */ 1, e, log_all_fields, special_flags);
+  logger_log_parts(log, p, xp, /* count= */ 1, e, log_all_fields, flag,
+                   flag_data);
 }
 
 /**
@@ -226,11 +233,17 @@ void logger_log_part(struct logger_writer *log, const struct part *p,
  * @param count The number of particle to dump.
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
- * @param special_flags The value of the special flags.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_parts(struct logger_writer *log, const struct part *p,
                       struct xpart *xp, int count, const struct engine *e,
-                      const int log_all_fields, const uint32_t special_flags) {
+                      const int log_all_fields,
+                      const enum logger_special_flags flag,
+                      const int flag_data) {
+
+  /* Build the special flag */
+  const uint32_t special_flags = logger_pack_flags_and_data(flag, flag_data);
 
   /* Compute the size of the buffer. */
   size_t size_total = 0;
@@ -268,6 +281,17 @@ void logger_log_parts(struct logger_writer *log, const struct part *p,
                             &xp[i].logger_data.last_offset, offset_new, buff,
                             special_flags);
 
+    /* Write the particle into the history if needed. */
+    if (flag & logger_flag_create || flag & logger_flag_mpi_enter) {
+      logger_history_log(&log->history_new[swift_type_gas], p->id,
+                         xp->logger_data.last_offset);
+
+    } else if (flag & logger_flag_change_type || flag & logger_flag_delete ||
+               flag & logger_flag_mpi_exit) {
+      logger_history_log(&log->history_removed[swift_type_gas], p->id,
+                         xp->logger_data.last_offset);
+    }
+
     /* Update the pointers */
     xp[i].logger_data.last_offset = offset_new;
     xp[i].logger_data.steps_since_last_output = 0;
@@ -329,13 +353,15 @@ void logger_copy_spart_fields(const struct logger_writer *log,
  * @param sp The #spart to dump.
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
- * @param special_flags The value of the special flag.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_spart(struct logger_writer *log, struct spart *sp,
                       const struct engine *e, const int log_all_fields,
-                      const uint32_t special_flags) {
+                      const enum logger_special_flags flag,
+                      const int flag_data) {
 
-  logger_log_sparts(log, sp, /* count */ 1, e, log_all_fields, special_flags);
+  logger_log_sparts(log, sp, /* count */ 1, e, log_all_fields, flag, flag_data);
 }
 
 /**
@@ -346,11 +372,15 @@ void logger_log_spart(struct logger_writer *log, struct spart *sp,
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
  * @param count The number of particle to dump.
- * @param special_flags The value of the special flags.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_sparts(struct logger_writer *log, struct spart *sp, int count,
                        const struct engine *e, const int log_all_fields,
-                       const uint32_t special_flags) {
+                       const enum logger_special_flags flag,
+                       const int flag_data) {
+  /* Build the special flag */
+  const uint32_t special_flags = logger_pack_flags_and_data(flag, flag_data);
 
   /* Compute the size of the buffer. */
   size_t size_total = 0;
@@ -387,6 +417,17 @@ void logger_log_sparts(struct logger_writer *log, struct spart *sp, int count,
                              &sp[i].logger_data.last_offset, offset_new, buff,
                              special_flags);
 
+    /* Write the particle into the history if needed. */
+    if (flag & logger_flag_create || flag & logger_flag_mpi_enter) {
+      logger_history_log(&log->history_new[swift_type_stars], sp->id,
+                         sp->logger_data.last_offset);
+
+    } else if (flag & logger_flag_change_type || flag & logger_flag_delete ||
+               flag & logger_flag_mpi_exit) {
+      logger_history_log(&log->history_removed[swift_type_stars], sp->id,
+                         sp->logger_data.last_offset);
+    }
+
     /* Update the pointers */
     sp[i].logger_data.last_offset = offset_new;
     sp[i].logger_data.steps_since_last_output = 0;
@@ -448,12 +489,14 @@ void logger_copy_gpart_fields(const struct logger_writer *log,
  * @param p The #gpart to dump.
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
- * @param special_flags The value of the special flags.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_gpart(struct logger_writer *log, struct gpart *p,
                       const struct engine *e, const int log_all_fields,
-                      const uint32_t special_flags) {
-  logger_log_gparts(log, p, /* count */ 1, e, log_all_fields, special_flags);
+                      const enum logger_special_flags flag,
+                      const int flag_data) {
+  logger_log_gparts(log, p, /* count */ 1, e, log_all_fields, flag, flag_data);
 }
 
 /**
@@ -464,11 +507,15 @@ void logger_log_gpart(struct logger_writer *log, struct gpart *p,
  * @param count The number of particle to dump.
  * @param e The #engine.
  * @param log_all_fields Should we log all the fields?
- * @param special_flags The value of the special flags.
+ * @param flag The value of the special flags.
+ * @param flag_data The data to write for the flag.
  */
 void logger_log_gparts(struct logger_writer *log, struct gpart *p, int count,
                        const struct engine *e, const int log_all_fields,
-                       const uint32_t special_flags) {
+                       const enum logger_special_flags flag,
+                       const int flag_data) {
+  /* Build the special flag */
+  const uint32_t special_flags = logger_pack_flags_and_data(flag, flag_data);
 
   /* Compute the size of the buffer. */
   size_t size_total = 0;
@@ -510,6 +557,17 @@ void logger_log_gparts(struct logger_writer *log, struct gpart *p, int count,
     logger_copy_gpart_fields(log, &p[i], e, mask, &p[i].logger_data.last_offset,
                              offset_new, buff, special_flags);
 
+    /* Write the particle into the history if needed. */
+    if (flag & logger_flag_create || flag & logger_flag_mpi_enter) {
+      logger_history_log(&log->history_new[swift_type_dark_matter],
+                         p->id_or_neg_offset, p->logger_data.last_offset);
+
+    } else if (flag & logger_flag_change_type || flag & logger_flag_delete ||
+               flag & logger_flag_mpi_exit) {
+      logger_history_log(&log->history_removed[swift_type_dark_matter],
+                         p->id_or_neg_offset, p->logger_data.last_offset);
+    }
+
     /* Update the pointers */
     p[i].logger_data.last_offset = offset_new;
     p[i].logger_data.steps_since_last_output = 0;
@@ -790,6 +848,7 @@ void logger_init(struct logger_writer *log, const struct engine *e,
   /* set initial value of parameters. */
   log->timestamp_offset = 0;
   log->index.dump_size_last_output = 0;
+  log->index_file_number = 0;
 
   /* generate dump filename. */
   char logger_name_file[PARSER_MAX_LINE_SIZE];
@@ -809,6 +868,23 @@ void logger_init(struct logger_writer *log, const struct engine *e,
 
   /* init dump. */
   dump_init(&log->dump, logger_name_file, buffer_size);
+
+  /* Read the maximal size of the history. */
+  const float max_memory_size =
+      parser_get_opt_param_float(params, "Logger:maximal_memory_size", 1.);
+  log->maximal_size_history =
+      max_memory_size / sizeof(struct logger_index_data);
+
+  if (e->nodeID == 0) {
+    message("Maximal memory size for the logger history: %g GB",
+            max_memory_size);
+  }
+
+  /* initialize the history */
+  for (int i = 0; i < swift_type_count; i++) {
+    logger_history_init(&log->history_removed[i]);
+    logger_history_init(&log->history_new[i]);
+  }
 }
 
 /**
@@ -822,6 +898,11 @@ void logger_free(struct logger_writer *log) {
   free(log->logger_mask_data);
   log->logger_mask_data = NULL;
   log->logger_count_mask = 0;
+
+  for (int i = 0; i < swift_type_count; i++) {
+    logger_history_free(&log->history_new[i]);
+    logger_history_free(&log->history_removed[i]);
+  }
 }
 
 /**
@@ -1094,6 +1175,12 @@ void logger_struct_dump(const struct logger_writer *log, FILE *stream) {
   restart_write_blocks((void *)log->logger_mask_data, sizeof(struct mask_data),
                        log->logger_count_mask, stream, "logger_masks",
                        "logger_masks");
+
+  /* Dump the logger mpi history */
+  for (int i = 0; i < swift_type_count; i++) {
+    logger_history_dump(&log->history_new[i], stream);
+    logger_history_dump(&log->history_removed[i], stream);
+  }
 }
 
 /**
@@ -1109,17 +1196,35 @@ void logger_struct_restore(struct logger_writer *log, FILE *stream) {
                       NULL, "logger");
 
   /* Read the masks */
+  const struct mask_data *old_logger_mask_data = log->logger_mask_data;
   log->logger_mask_data = (struct mask_data *)malloc(sizeof(struct mask_data) *
                                                      log->logger_count_mask);
 
   restart_read_blocks((void *)log->logger_mask_data, sizeof(struct mask_data),
                       log->logger_count_mask, stream, NULL, "logger_masks");
 
-  /* generate dump filename */
+  /* Restore the pointers */
+  log->mask_data_pointers.hydro =
+      log->logger_mask_data +
+      (log->mask_data_pointers.hydro - old_logger_mask_data);
+  log->mask_data_pointers.gravity =
+      log->logger_mask_data +
+      (log->mask_data_pointers.gravity - old_logger_mask_data);
+  log->mask_data_pointers.stars =
+      log->logger_mask_data +
+      (log->mask_data_pointers.stars - old_logger_mask_data);
+
+  /* Restart the dump file. */
   char logger_name_file[PARSER_MAX_LINE_SIZE];
   logger_get_dump_name(log, logger_name_file);
 
   dump_restart(&log->dump, logger_name_file);
+
+  /* Restore the logger mpi history */
+  for (int i = 0; i < swift_type_count; i++) {
+    logger_history_restore(&log->history_new[i], stream);
+    logger_history_restore(&log->history_removed[i], stream);
+  }
 }
 
 #endif /* WITH_LOGGER */
diff --git a/src/logger.h b/src/logger.h
index 78d8bbeabeaca191de1ec3dc928f5a7cc1a2a96f..c2810219f7f8dc4ba695ab1df6edf433a442be65 100644
--- a/src/logger.h
+++ b/src/logger.h
@@ -29,6 +29,7 @@
 #include "dump.h"
 #include "error.h"
 #include "inline.h"
+#include "logger_history.h"
 #include "timeline.h"
 #include "units.h"
 
@@ -39,7 +40,7 @@ struct part;
 struct engine;
 
 #define logger_major_version 0
-#define logger_minor_version 4
+#define logger_minor_version 5
 /* Size of the strings. */
 #define logger_string_length 200
 
@@ -108,6 +109,18 @@ struct logger_writer {
     size_t dump_size_last_output;
   } index;
 
+  /* Index file number for the filename. */
+  int index_file_number;
+
+  /* History of the new particles since the last index file. */
+  struct logger_history history_new[swift_type_count];
+
+  /* History of the particles removed since the last index file. */
+  struct logger_history history_removed[swift_type_count];
+
+  /* Maximal number of particle stored in the history. */
+  size_t maximal_size_history;
+
   /*  Dump file (In the reader, the dump is cleaned, therefore it is renamed
    * logfile). */
   struct dump dump;
@@ -164,22 +177,29 @@ void logger_log_all_particles(struct logger_writer *log,
                               const struct engine *e);
 void logger_log_part(struct logger_writer *log, const struct part *p,
                      struct xpart *xp, const struct engine *e,
-                     const int log_all_fields, const uint32_t special_flags);
+                     const int log_all_fields,
+                     const enum logger_special_flags flag, const int flag_data);
 void logger_log_parts(struct logger_writer *log, const struct part *p,
                       struct xpart *xp, int count, const struct engine *e,
-                      const int log_all_fields, const uint32_t special_flags);
+                      const int log_all_fields,
+                      const enum logger_special_flags flag,
+                      const int flag_data);
 void logger_log_spart(struct logger_writer *log, struct spart *p,
                       const struct engine *e, const int log_all_fields,
-                      const uint32_t special_flags);
+                      const enum logger_special_flags flag,
+                      const int flag_data);
 void logger_log_sparts(struct logger_writer *log, struct spart *sp, int count,
                        const struct engine *e, const int log_all_fields,
-                       const uint32_t special_flags);
+                       const enum logger_special_flags flag,
+                       const int flag_data);
 void logger_log_gpart(struct logger_writer *log, struct gpart *p,
                       const struct engine *e, const int log_all_fields,
-                      const uint32_t special_flags);
+                      const enum logger_special_flags flag,
+                      const int flag_data);
 void logger_log_gparts(struct logger_writer *log, struct gpart *gp, int count,
                        const struct engine *e, const int log_all_fields,
-                       const uint32_t special_flags);
+                       const enum logger_special_flags flag,
+                       const int flag_data);
 void logger_init(struct logger_writer *log, const struct engine *e,
                  struct swift_params *params);
 void logger_free(struct logger_writer *log);
@@ -202,23 +222,23 @@ void logger_struct_restore(struct logger_writer *log, FILE *stream);
  * @brief Generate the data for the special flags.
  *
  * @param flag The special flag to use.
- * @param data The data to write in the .
+ * @param flag_data The data to write in the record.
  */
 INLINE static uint32_t logger_pack_flags_and_data(
-    enum logger_special_flags flag, int data) {
+    enum logger_special_flags flag, int flag_data) {
 #ifdef SWIFT_DEBUG_CHECKS
   if (flag & 0xFFFFFF00) {
     error(
         "The special flag in the particle logger cannot be larger than 1 "
         "byte.");
   }
-  if (data & ~0xFFFFFF) {
+  if (flag_data & ~0xFFFFFF) {
     error(
         "The data for the special flag in the particle logger cannot be larger "
         "than 3 bytes.");
   }
 #endif
-  return ((uint32_t)flag << (3 * 8)) | (data & 0xFFFFFF);
+  return ((uint32_t)flag << (3 * 8)) | (flag_data & 0xFFFFFF);
 }
 
 /**
diff --git a/src/logger_history.c b/src/logger_history.c
new file mode 100644
index 0000000000000000000000000000000000000000..6ae10d4aa0f655a9db9241c6764e37a3d7a97cad
--- /dev/null
+++ b/src/logger_history.c
@@ -0,0 +1,176 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2020 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Include header */
+#include "logger_history.h"
+
+/* Standard includes */
+#include <string.h>
+
+/* Local include */
+#include "logger_io.h"
+#include "part.h"
+
+#if defined(WITH_LOGGER)
+
+#define LOGGER_HISTORY_INIT_SIZE 1024
+
+/**
+ * @brief Initialize the structure for the first time.
+ *
+ * @param hist The #logger_history.
+ */
+void logger_history_init(struct logger_history *hist) {
+
+  /* Set the counters to their initial value */
+  hist->size = 0;
+  hist->capacity = LOGGER_HISTORY_INIT_SIZE;
+
+  hist->data = (struct logger_index_data *)swift_malloc(
+      "logger_history",
+      sizeof(struct logger_index_data) * LOGGER_HISTORY_INIT_SIZE);
+  if (hist->data == NULL) {
+    error("Failed to allocate memory for the logger_history.");
+  }
+}
+
+/**
+ * @brief Reset the structure (for example just after a dump).
+ *
+ * @param hist The #logger_history.
+ * @param params The #swift_params.
+ * @param already_allocated Are the data already allocated? (Need to free it?)
+ */
+void logger_history_reset(struct logger_history *hist) {
+
+  swift_free("logger_history", hist->data);
+
+  logger_history_init(hist);
+}
+
+/**
+ * @brief Free the structure (e.g. just before exiting).
+ *
+ * @param hist The #logger_history.
+ */
+void logger_history_free(struct logger_history *hist) {
+  /* Set the counters to 0 */
+  hist->size = 0;
+  hist->capacity = 0;
+
+  /* Free the memory */
+  if (hist->data != NULL) {
+    swift_free("logger_history", hist->data);
+    hist->data = NULL;
+  }
+}
+
+/**
+ * @brief Log a the particle information into the #logger_history.
+ *
+ * @param hist The #logger_history.
+ * @param data The data from the particle.
+ */
+void logger_history_log(struct logger_history *hist, const long long id,
+                        const uint64_t last_offset) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (id < 0) {
+    error(
+        "Negative ID for a particle. "
+        "Are you trying to log a gpart linked to another type of particles?");
+  }
+#endif
+  const struct logger_index_data data = {id, last_offset};
+
+  /* Check if enough space is left */
+  if (hist->size == hist->capacity) {
+    /* Compute the previous amount of memory */
+    const size_t memsize = sizeof(struct logger_index_data) * hist->capacity;
+
+    /* Increase the capacity of the array */
+    hist->capacity *= 2;
+
+    /* Allocate the new array and copy the content of the previous one */
+    struct logger_index_data *tmp =
+        (struct logger_index_data *)swift_malloc("logger_history", 2 * memsize);
+
+    memcpy(tmp, hist->data, memsize);
+
+    /* Free the previous array and switch the pointers */
+    swift_free("logger_history", hist->data);
+    hist->data = tmp;
+  }
+
+  /* Save the new particle */
+  hist->data[hist->size] = data;
+
+  /* Increase the element counter */
+  hist->size += 1;
+}
+
+/**
+ * @brief Write the history into an index file.
+ *
+ * @param hist The #logger_history.
+ * @param e The #engine.
+ * @param f The file where to write the history.
+ */
+void logger_history_write(struct logger_history *hist, struct engine *e,
+                          FILE *f) {
+  /* Generate the structures for writing the index file */
+  const int num_fields = 2;
+  struct io_props list[2];
+  list[0] =
+      io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f,
+                           hist->data, id, "Field not used");
+  list[1] = io_make_output_field("Offset", UINT64, 1, UNIT_CONV_NO_UNITS, 0.f,
+                                 hist->data, offset, "Field not used");
+
+  write_index_array(e, f, list, num_fields, hist->size);
+
+  /* Reset the logger history */
+  logger_history_reset(hist);
+}
+
+void logger_history_dump(const struct logger_history *hist, FILE *stream) {
+  restart_write_blocks((void *)hist, sizeof(struct logger_history), 1, stream,
+                       "logger_history", "logger_history");
+
+  if (hist->size != 0)
+    restart_write_blocks((void *)hist->data, sizeof(struct logger_index_data),
+                         hist->size, stream, "logger_history_data",
+                         "logger_history_data");
+}
+
+void logger_history_restore(struct logger_history *hist, FILE *stream) {
+  restart_read_blocks((void *)hist, sizeof(struct logger_history), 1, stream,
+                      NULL, "logger_history");
+
+  hist->data = malloc(hist->capacity * sizeof(struct logger_index_data));
+  if (hist->data == NULL) {
+    error("Failed to allocate array for logger history");
+  }
+
+  if (hist->size != 0)
+    restart_read_blocks((void *)hist->data, sizeof(struct logger_index_data),
+                        hist->size, stream, NULL, "logger_history_data");
+}
+
+#endif  // WITH_LOGGER
diff --git a/src/logger_history.h b/src/logger_history.h
new file mode 100644
index 0000000000000000000000000000000000000000..cee7db5b4ad6c1b9baac559157f5059190c5702d
--- /dev/null
+++ b/src/logger_history.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_LOGGER_HISTORY_H
+#define SWIFT_LOGGER_HISTORY_H
+
+#include "../config.h"
+
+/* Standard includes */
+#include <stdint.h>
+
+/* Local include */
+#include "error.h"
+#include "part_type.h"
+
+#if defined(WITH_LOGGER)
+
+/* Forward declaration */
+struct xpart;
+struct part;
+struct gpart;
+struct spart;
+struct bpart;
+struct engine;
+struct swift_params;
+
+/**
+ * @brief Contains the information concerning
+ * a particle for the index files.
+ */
+struct logger_index_data {
+  /* Id of the particle. */
+  int64_t id;
+
+  /* Offset of the particle in the file. */
+  uint64_t offset;
+};
+
+/**
+ * @brief Structure dealing with the changes in the number
+ * of particles (e.g. creation, deletion, transformation).
+ */
+struct logger_history {
+
+  /* Number of elements currently stored */
+  uint64_t size;
+
+  /* Size of the current buffer */
+  size_t capacity;
+
+  /* Buffer containing the particles */
+  struct logger_index_data *data;
+};
+
+void logger_history_init(struct logger_history *hist);
+void logger_history_reset(struct logger_history *hist);
+void logger_history_free(struct logger_history *hist);
+void logger_history_log(struct logger_history *hist, const long long id,
+                        const uint64_t last_offset);
+void logger_history_write(struct logger_history *hist, struct engine *e,
+                          FILE *f);
+
+void logger_history_dump(const struct logger_history *hist, FILE *stream);
+void logger_history_restore(struct logger_history *hist, FILE *stream);
+
+#endif  // WITH_LOGGER
+#endif  // SWIFT_LOGGER_HISTORY_H
diff --git a/src/logger_io.c b/src/logger_io.c
index cd97be44133baf105f9006e17218207a5765a445..39f3dbd52f2369b69d8966383cc0993077ae99a3 100644
--- a/src/logger_io.c
+++ b/src/logger_io.c
@@ -26,12 +26,15 @@
 /* Some standard headers. */
 #include "common_io.h"
 
+#include <errno.h>
 #include <hdf5.h>
 #include <math.h>
 #include <stddef.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
 
 /* This object's header. */
 #include "logger_io.h"
@@ -60,37 +63,6 @@
 #include "version.h"
 #include "xmf.h"
 
-/**
- * @brief Mapper function to copy #part or #gpart fields into a buffer.
- * WARNING Assumes two io_props in extra_data.
- */
-void logger_io_copy_mapper(void* restrict temp, int N,
-                           void* restrict extra_data) {
-
-  /* Get the io_props */
-  const struct io_props* props = (const struct io_props*)(extra_data);
-  const struct io_props props1 = props[0];
-  const struct io_props props2 = props[1];
-
-  /* Get the sizes */
-  const size_t typeSize1 = io_sizeof_type(props1.type);
-  const size_t copySize1 = typeSize1 * props1.dimension;
-  const size_t typeSize2 = io_sizeof_type(props2.type);
-  const size_t copySize2 = typeSize2 * props2.dimension;
-  const size_t copySize = copySize1 + copySize2;
-
-  /* How far are we with this chunk? */
-  char* restrict temp_c = (char*)temp;
-  const ptrdiff_t delta = (temp_c - props1.start_temp_c) / copySize;
-
-  /* Copy the memory to the buffer */
-  for (int k = 0; k < N; k++) {
-    memcpy(&temp_c[k * copySize], props1.field + (delta + k) * props1.partSize,
-           copySize1);
-    memcpy(&temp_c[k * copySize + copySize1],
-           props2.field + (delta + k) * props2.partSize, copySize2);
-  }
-}
 /**
  * @brief Writes the data array in the index file.
  *
@@ -100,42 +72,82 @@ void logger_io_copy_mapper(void* restrict temp, int N,
  * @param n_props The number of element in @props.
  * @param N The number of particles to write.
  */
-void writeIndexArray(const struct engine* e, FILE* f, struct io_props* props,
-                     size_t n_props, size_t N) {
+void write_index_array(const struct engine* e, FILE* f, struct io_props* props,
+                       size_t n_props, size_t N) {
 
   /* Check that the assumptions are corrects */
   if (n_props != 2)
     error("Not implemented: The index file can only write two props.");
 
-  if (props[0].dimension != 1 || props[1].dimension != 1)
-    error("Not implemented: cannot use multidimensional data");
-
   /* Get a few variables */
-  const size_t typeSize =
-      io_sizeof_type(props[0].type) + io_sizeof_type(props[1].type);
+  const size_t type_size0 = io_sizeof_type(props[0].type) * props[0].dimension;
+  const size_t type_size1 = io_sizeof_type(props[1].type) * props[1].dimension;
+  const size_t type_size = type_size0 + type_size1;
+
+  /* Convert FILE to int */
+  int fd = fileno(f);
+  if (fd == -1) {
+    error("Failed to get the integer descriptor");
+  }
 
-  const size_t num_elements = N;
+  /* Get a few variables for the mapping */
+  char *data;
+  const size_t offset = ftell(f);
+  const size_t count = N * type_size + offset;
 
-  /* Allocate temporary buffer */
-  void* temp = NULL;
-  if (posix_memalign((void**)&temp, IO_BUFFER_ALIGNMENT,
-                     num_elements * typeSize) != 0)
-    error("Unable to allocate temporary i/o buffer");
+  /* Truncate the file to the correct length. */
+  if (ftruncate(fd, count) != 0) {
+    error("Failed to truncate dump file (%s).", strerror(errno));
+  }
 
-  /* Copy the particle data to the temporary buffer */
-  /* Set initial buffer position */
-  props[0].start_temp_c = temp;
-  props[1].start_temp_c = temp;
 
-  /* Copy the whole thing into a buffer */
-  threadpool_map((struct threadpool*)&e->threadpool, logger_io_copy_mapper,
-                 temp, N, typeSize, threadpool_auto_chunk_size, props);
+  /* Map the file */
+  if ((data = mmap(NULL, count, PROT_WRITE, MAP_SHARED, fd, 0)) ==
+      MAP_FAILED) {
+    error("Failed to allocate map of size %zi bytes (%s).", count,
+          strerror(errno));
+  }
+
+  /* Copy the data into the file */
+  char *first = data + offset;
+  char *second = data + type_size0 + offset;
+  for(size_t i = 0; i < N; i++) {
+    memcpy(first + i * type_size, props[0].field + i * props[0].partSize,
+           type_size0);
+    memcpy(second + i * type_size, props[1].field + i * props[1].partSize,
+           type_size1);
+  }
 
-  /* Write data to file */
-  fwrite(temp, typeSize, num_elements, f);
+  /* Unmap the data in memory. */
+  if (munmap(data, count) != 0) {
+    error("Failed to unmap dump data (%s).", strerror(errno));
+  }
 
-  /* Free everything */
-  free(temp);
+  /* Move the file position */
+  fseek(f, count, SEEK_SET);
+}
+
+/**
+ * @brief Write the history (created or deleted) for all the particles type.
+ *
+ * @param history The list of history to write.
+ * @param e The #engine.
+ * @param f The opened file to use.
+ */
+void logger_write_history(struct logger_history* history, struct engine* e,
+                          FILE* f) {
+
+  /* Write the number of particles. */
+  uint64_t size[swift_type_count];
+  for (int i = 0; i < swift_type_count; i++) {
+    size[i] = history[i].size;
+  }
+  fwrite(size, sizeof(uint64_t), swift_type_count, f);
+
+  /* Write the data */
+  for (int i = 0; i < swift_type_count; i++) {
+    logger_history_write(&history[i], e, f);
+  }
 }
 
 /**
@@ -161,7 +173,6 @@ void logger_write_index_file(struct logger_writer* log, struct engine* e) {
   struct xpart* xparts = e->s->xparts;
   struct gpart* gparts = e->s->gparts;
   struct spart* sparts = e->s->sparts;
-  static int outputCount = 0;
 
   /* Number of particles currently in the arrays */
   const size_t Ntot = e->s->nr_gparts;
@@ -191,11 +202,12 @@ void logger_write_index_file(struct logger_writer* log, struct engine* e) {
   /* File name */
   char fileName[FILENAME_BUFFER_SIZE];
   snprintf(fileName, FILENAME_BUFFER_SIZE, "%.100s_%04i_%04i.index",
-           e->logger->base_name, engine_rank, outputCount);
+           e->logger->base_name, engine_rank, log->index_file_number);
+  log->index_file_number++;
 
-  /* Open file */
+  /* Open file (include reading for mmap) */
   FILE* f = NULL;
-  f = fopen(fileName, "wb");
+  f = fopen(fileName, "w+b");
 
   if (f == NULL) {
     error("Failed to open file %s", fileName);
@@ -341,7 +353,7 @@ void logger_write_index_file(struct logger_writer* log, struct engine* e) {
     }
 
     /* Write ids */
-    writeIndexArray(e, f, list, num_fields, N);
+    write_index_array(e, f, list, num_fields, N);
 
     /* Free temporary arrays */
     if (parts_written) swift_free("parts_written", parts_written);
@@ -353,10 +365,14 @@ void logger_write_index_file(struct logger_writer* log, struct engine* e) {
     if (bparts_written) swift_free("bparts_written", bparts_written);
   }
 
+  /* Write the particles created */
+  logger_write_history(log->history_new, e, f);
+
+  /* Write the particles removed */
+  logger_write_history(log->history_removed, e, f);
+
   /* Close file */
   fclose(f);
-
-  ++outputCount;
 }
 
 /**
diff --git a/src/logger_io.h b/src/logger_io.h
index 0a11de5ca9d0acbb650e4ef4fc305dc5266b5a9f..2c0cfd5a800ce72c75e5dc69e765c712a4768922 100644
--- a/src/logger_io.h
+++ b/src/logger_io.h
@@ -65,6 +65,8 @@ struct mask_data {
   } reader;
 };
 
+void write_index_array(const struct engine* e, FILE* f, struct io_props* props,
+                       size_t n_props, size_t N);
 /**
  * @brief Initialize the mask_data with a given field.
  *
diff --git a/src/runner_others.c b/src/runner_others.c
index d0227c95ebcf5caa3a246a55aec2250ec2d4d1c6..7efb403213f15c7ad8a539cc30047d4b053e8912 100644
--- a/src/runner_others.c
+++ b/src/runner_others.c
@@ -283,8 +283,7 @@ void runner_do_star_formation(struct runner *r, struct cell *c, int timer) {
             /* Logs all the fields request by the user */
             // TODO select only the requested fields
             logger_log_part(e->logger, p, xp, e, /* log_all */ 1,
-                            logger_pack_flags_and_data(logger_flag_change_type,
-                                                       swift_type_stars));
+                            logger_flag_change_type, swift_type_stars);
 #endif
 
             /* Convert the gas particle to a star particle */
@@ -348,9 +347,8 @@ void runner_do_star_formation(struct runner *r, struct cell *c, int timer) {
               sp->logger_data = xp->logger_data;
 
               /* Write the s-particle */
-              logger_log_spart(e->logger, sp, e,
-                               /* log_all */ 1,
-                               /* special flags */ 0);
+              logger_log_spart(e->logger, sp, e, /* log_all */ 1,
+                               logger_flag_create, /* data */ 0);
 #endif
             } else if (swift_star_formation_model_creates_stars) {
 
@@ -743,8 +741,8 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
         if (logger_should_write(&xp->logger_data, e->logger)) {
           /* Write particle */
           /* Currently writing everything, should adapt it through time */
-          logger_log_part(e->logger, p, xp, e, /* log_all */ 0,
-                          /* special flags */ 0);
+          logger_log_part(e->logger, p, xp, e, /* log_all_fields= */ 0,
+                          /* flag= */ 0, /* flag_data= */ 0);
         } else
           /* Update counter */
           xp->logger_data.steps_since_last_output += 1;
@@ -766,8 +764,8 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
         if (logger_should_write(&gp->logger_data, e->logger)) {
           /* Write particle */
           /* Currently writing everything, should adapt it through time */
-          logger_log_gpart(e->logger, gp, e, /* log_all */ 0,
-                           /* Special flags */ 0);
+          logger_log_gpart(e->logger, gp, e, /* log_all_fields= */ 0,
+                           /* flag= */ 0, /* flag_data= */ 0);
 
         } else
           /* Update counter */
@@ -787,8 +785,8 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
         if (logger_should_write(&sp->logger_data, e->logger)) {
           /* Write particle */
           /* Currently writing everything, should adapt it through time */
-          logger_log_spart(e->logger, sp, e, /* Log_all */ 0,
-                           /* Special flags */ 0);
+          logger_log_spart(e->logger, sp, e, /* Log_all_fields= */ 0,
+                           /* flag= */ 0, /* flag_data= */ 0);
         } else
           /* Update counter */
           sp->logger_data.steps_since_last_output += 1;
diff --git a/src/stars/Default/stars_logger.h b/src/stars/Default/stars_logger.h
index 891e274048c482659d08826f14658fe40a983124..2637cef126fa2ce4b8980b7f22265c80c463f7be 100644
--- a/src/stars/Default/stars_logger.h
+++ b/src/stars/Default/stars_logger.h
@@ -21,6 +21,8 @@
 
 #ifdef WITH_LOGGER
 
+#include "logger_io.h"
+
 /*
  * List of all possible mask.
  * Outside the module, only stars_logger_field_count is used.
diff --git a/tools/read_index_file.py b/tools/read_index_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..96ab1df2e7e8426b5c84e3822b8fb6a05aabfd46
--- /dev/null
+++ b/tools/read_index_file.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+
+import sys
+import numpy as np
+
+filename = sys.argv[-1]
+n_type = 6
+
+
+# dtype for the particle's data
+dt = np.dtype([("ids", np.ulonglong),
+               ("offset", np.uint64)])
+
+# Read the file
+with open(filename, "rb") as f:
+    # read the time
+    time = np.fromfile(f, dtype=float, count=1)
+    time_int = np.fromfile(f, dtype=np.longlong, count=1)
+    print("Time: {}, integer time: {}".format(
+        time[0], time_int[0]))
+
+    # read the number of particles
+    nparts = np.fromfile(f, dtype=np.uint64, count=n_type)
+
+    print("Number of particles:", nparts)
+
+    # read if the file is sorted
+    sort = np.fromfile(f, dtype=np.bool, count=1)
+    print("File is sorted?", sort[0])
+
+    # read the memory alignment garbage
+    n = ((f.tell() + 7) & ~7) - f.tell()
+    f.read(n)
+
+    # read the particles
+    print("Particles data (ids / offset):")
+    for n in nparts:
+        if n == 0:
+            continue
+
+        data = np.fromfile(f, dtype=dt, count=n)
+
+        print("\t", data)
+
+    # print the history of new particles
+    n_new = np.fromfile(f, dtype=np.uint64, count=n_type)
+    print("New particles: ", n_new)
+
+    for n in n_new:
+        if n == 0:
+            continue
+
+        data = np.fromfile(f, dtype=dt, count=n)
+
+        print("\t", data)
+
+    # print the history of particles removed
+    n_rem = np.fromfile(f, dtype=np.uint64, count=n_type)
+    print("Particles removed: ", n_rem)
+
+    for n in n_rem:
+        if n == 0:
+            continue
+
+        data = np.fromfile(f, dtype=dt, count=n)
+
+        print("\t", data)