diff --git a/examples/HydroTests/SedovBlast_3D/sedov.yml b/examples/HydroTests/SedovBlast_3D/sedov.yml
index 00419ba94b262a1c94c0d3cd31acf70c949b9164..df7a2526b74e01d24ab7d7e164f2f54ef6437680 100644
--- a/examples/HydroTests/SedovBlast_3D/sedov.yml
+++ b/examples/HydroTests/SedovBlast_3D/sedov.yml
@@ -34,3 +34,12 @@ InitialConditions:
   file_name:                    ./sedov.hdf5          
   periodic:                     1
   smoothing_length_scaling:     3.33
+
+# Parameters governing the logger snapshot system
+Logger:
+  delta_step:           10     # Update the particle log every this many updates
+  basename:             index  # Common part of the filenames
+  initial_buffer_size:  0.01      # (Optional) Buffer size in GB
+  buffer_scale:	        10     # (Optional) When buffer size is too small, update it with required memory times buffer_scale
+  time_first_index:     0.    # Time of the first output (in internal units)
+  delta_time_index:     1e-2  # Time difference between consecutive outputs (in internal units)
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml
index d60006842b383195e07c812942a2ecfdf7aa8615..82e2569f9bdb7742305894562ce79efc3f88bbbc 100644
--- a/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml
@@ -56,4 +56,3 @@ StructureFinding:
   basename:             ./stf
   scale_factor_first:   0.1       # z = 9
   delta_time:           1.04
-
diff --git a/examples/main.c b/examples/main.c
index 9782a754abbfce20c1be323915484ddc9182f50c..8cbbf6917bd5bfa447482bb695a600bb0ad878f1 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -1419,7 +1419,10 @@ int main(int argc, char *argv[]) {
     }
 #ifdef WITH_LOGGER
     logger_log_all(e.logger, &e);
-    engine_dump_index(&e);
+
+    /* Write a sentinel timestamp */
+    logger_log_timestamp(e.logger, e.ti_current, e.time,
+                         &e.logger->timestamp_offset);
 #endif
 
     /* Write final snapshot? */
diff --git a/logger/Makefile.am b/logger/Makefile.am
index 3bfd5af848c504d50fe201e02f49186287fbfb5a..d4dd4b08456cd3fd3de3aea02069b2bcc89f0266 100644
--- a/logger/Makefile.am
+++ b/logger/Makefile.am
@@ -48,11 +48,11 @@ SUBDIRS = tests
 
 # List required headers
 include_HEADERS = logger_header.h logger_loader_io.h logger_particle.h logger_time.h logger_tools.h logger_reader.h \
-	logger_logfile.h
+	logger_logfile.h logger_index.h quick_sort.h
 
 # Common source files
 AM_SOURCES = logger_header.c logger_loader_io.c logger_particle.c logger_time.c logger_tools.c logger_reader.c \
-	logger_logfile.c
+	logger_logfile.c logger_index.c quick_sort.c
 if HAVEPYTHON
 AM_SOURCES += logger_python_wrapper.c
 endif
diff --git a/logger/examples/reader_example.py b/logger/examples/reader_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..038a8ea5039c841007a1f07ec72210f08162a83d
--- /dev/null
+++ b/logger/examples/reader_example.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+"""
+Read a logger file by using an index file.
+Example: ./reader_example.py ../../examples/SedovBlast_3D/index 0.1
+"""
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+sys.path.append("../.libs/")
+
+import liblogger as logger
+
+
+def plot3D(pos):
+    from mpl_toolkits.mplot3d import Axes3D
+    fig = plt.figure()
+    ax = fig.add_subplot(111, projection="3d")
+    ax.plot(pos[:, 0], pos[:, 1], pos[:, 2], ".", markersize=0.2)
+
+
+def plot2D():
+    center = np.array([0.5]*3)
+    r2 = np.sum((pos - center)**2, axis=1)
+
+    # plot entropy vs distance
+    plt.plot(np.sqrt(r2), data["entropies"], '.',
+             markersize=0.2)
+
+    plt.xlim(0., 0.5)
+    plt.ylim(-1, 50)
+    plt.xlabel("Radius")
+    plt.ylabel("Entropy")
+
+
+basename = "../../examples/HydroTests/SedovBlast_3D/index"
+time = 0.05
+if len(sys.argv) >= 2:
+    basename = sys.argv[1]
+else:
+    print("No basename supplied (first argument), using default.")
+if len(sys.argv) >= 3:
+    time = float(sys.argv[2])
+else:
+    print("No time supplied (second argument), using default.")
+if len(sys.argv) > 3:
+    print("Ignoring excess arguments '%s'." % sys.argv[3:])
+print("basename: %s" % basename)
+print("time: %g" % time)
+
+# read dump
+
+t = logger.getTimeLimits(basename)
+data = logger.loadSnapshotAtTime(basename, time)
+
+pos = data["positions"]
+
+plot3D(pos)
+plt.show()
diff --git a/logger/logger_header.c b/logger/logger_header.c
index 61e5da246c9aa07eeeb42e751832f017fa04ca0a..d1c70736d4f895c4ec58e7a625a4c10c6597d281 100644
--- a/logger/logger_header.c
+++ b/logger/logger_header.c
@@ -45,9 +45,9 @@ void header_print(const struct header *h) {
 #endif
   message("First Offset:     %lu.", h->offset_first_record);
   message("Offset direction: %s.", logger_offset_name[h->offset_direction]);
-  message("Number masks:     %i.", h->number_mask);
+  message("Number masks:     %i.", h->masks_count);
 
-  for (size_t i = 0; i < h->number_mask; i++) {
+  for (size_t i = 0; i < h->masks_count; i++) {
     message("  Mask:  %s.", h->masks[i].name);
     message("  Value: %u.", h->masks[i].mask);
     message("  Size:  %i.", h->masks[i].size);
@@ -70,7 +70,7 @@ void header_free(struct header *h) { free(h->masks); };
  * @return Index of the field (-1 if not found).
  */
 int header_get_field_index(const struct header *h, const char *field) {
-  for (size_t i = 0; i < h->number_mask; i++) {
+  for (size_t i = 0; i < h->masks_count; i++) {
     if (strcmp(h->masks[i].name, field) == 0) {
       return i;
     }
@@ -135,10 +135,12 @@ void header_read(struct header *h, struct logger_logfile *log) {
     error("Wrong offset value in the header (%i).", h->offset_direction);
 
   /* Read offset to first record. */
+  h->offset_first_record = 0;
   map = logger_loader_io_read_data(map, LOGGER_OFFSET_SIZE,
                                    &h->offset_first_record);
 
   /* Read the size of the strings. */
+  h->string_length = 0;
   map =
       logger_loader_io_read_data(map, sizeof(unsigned int), &h->string_length);
 
@@ -148,13 +150,14 @@ void header_read(struct header *h, struct logger_logfile *log) {
   }
 
   /* Read the number of masks. */
-  map = logger_loader_io_read_data(map, sizeof(unsigned int), &h->number_mask);
+  map = logger_loader_io_read_data(map, sizeof(unsigned int), &h->masks_count);
 
   /* Allocate the masks memory. */
-  h->masks = malloc(sizeof(struct mask_data) * h->number_mask);
+  h->masks = malloc(sizeof(struct mask_data) * h->masks_count);
 
   /* Loop over all masks. */
-  for (size_t i = 0; i < h->number_mask; i++) {
+  h->timestamp_mask = 0;
+  for (size_t i = 0; i < h->masks_count; i++) {
     /* Read the mask name. */
     map = logger_loader_io_read_data(map, h->string_length, h->masks[i].name);
 
@@ -164,6 +167,16 @@ void header_read(struct header *h, struct logger_logfile *log) {
     /* Read the mask data size. */
     map = logger_loader_io_read_data(map, sizeof(unsigned int),
                                      &h->masks[i].size);
+
+    /* Keep the timestamp mask in memory */
+    if (strcmp(h->masks[i].name, "timestamp") == 0) {
+      h->timestamp_mask = h->masks[i].mask;
+    }
+  }
+
+  /* Check that the timestamp mask exists */
+  if (h->timestamp_mask == 0) {
+    error("Unable to find the timestamp mask.");
   }
 
   /* Check the logfile header's size. */
@@ -187,7 +200,7 @@ size_t header_get_record_size_from_mask(const struct header *h,
                                         const size_t mask) {
   size_t count = 0;
   /* Loop over each masks. */
-  for (size_t i = 0; i < h->number_mask; i++) {
+  for (size_t i = 0; i < h->masks_count; i++) {
     if (mask & h->masks[i].mask) {
       count += h->masks[i].size;
     }
diff --git a/logger/logger_header.h b/logger/logger_header.h
index c388ef65cda21d00f53ddc54e97f43671edf1aeb..a68b98bc9a4807e6fe2b10132911132daad54813 100644
--- a/logger/logger_header.h
+++ b/logger/logger_header.h
@@ -24,9 +24,13 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+/* Number of character for the version information */
 #define LOGGER_VERSION_SIZE 20
-#define LOGGER_OFFSET_SIZE 7
-#define LOGGER_MASK_SIZE 1
+/* Number of bytes for the mask information in the headers
+   (need to be large enough for the larges mask) */
+#define LOGGER_MASK_SIZE 2
+/* Number of bytes for the offset size in the headers */
+#define LOGGER_OFFSET_SIZE (8 - LOGGER_MASK_SIZE)
 
 enum logger_offset_direction {
   logger_offset_backward = 0,
@@ -68,7 +72,7 @@ struct header {
   unsigned int string_length;
 
   /* Number of masks. */
-  unsigned int number_mask;
+  unsigned int masks_count;
 
   /* List of masks. */
   struct mask_data *masks;
@@ -78,6 +82,9 @@ struct header {
 
   /* The corresponding log. */
   struct logger_logfile *log;
+
+  /* Timestamp mask */
+  size_t timestamp_mask;
 };
 
 void header_print(const struct header *h);
diff --git a/logger/logger_index.c b/logger/logger_index.c
new file mode 100644
index 0000000000000000000000000000000000000000..774bc102279e5b874cfcf0bdac123a7ba2e53a66
--- /dev/null
+++ b/logger/logger_index.c
@@ -0,0 +1,224 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Include the corresponding header */
+#include "logger_index.h"
+
+/* Include the standard headers */
+#include <errno.h>
+#include <fcntl.h>
+
+/* Include local headers */
+#include "logger_loader_io.h"
+#include "logger_reader.h"
+#include "quick_sort.h"
+
+/* Define the place and size of each element in the header. */
+/* The time in double precision. */
+#define logger_index_time_offset 0
+#define logger_index_time_size sizeof(double)
+/* The time on the integer timeline. */
+#define logger_index_integer_time_offset \
+  logger_index_time_offset + logger_index_time_size
+#define logger_index_integer_time_size sizeof(integertime_t)
+/* The number of particle (for each type). */
+#define logger_index_npart_offset \
+  logger_index_integer_time_offset + logger_index_integer_time_size
+#define logger_index_npart_size sizeof(uint64_t) * swift_type_count
+/* The flag used for checking if the file is sorted or not. */
+#define logger_index_is_sorted_offset \
+  logger_index_npart_offset + logger_index_npart_size
+#define logger_index_is_sorted_size sizeof(char)
+/* The array containing the offset and ids. Rounding to a multiplier of 8 in
+ * order to align the memory */
+#define logger_index_data_offset \
+  ((logger_index_is_sorted_offset + logger_index_is_sorted_size + 7) & ~7)
+
+/**
+ * @brief Read the index file header.
+ *
+ * @param index The #logger_index.
+ * @param filename The filename of the index file.
+ */
+void logger_index_read_header(struct logger_index *index,
+                              const char *filename) {
+
+  /* Open the file */
+  message("Reading %s", filename);
+  logger_index_map_file(index, filename, 0);
+
+  /* Read times */
+  memcpy(&index->time, index->index.map + logger_index_time_offset,
+         logger_index_time_size);
+  memcpy(&index->integer_time,
+         index->index.map + logger_index_integer_time_offset,
+         logger_index_integer_time_size);
+
+  /* Read the number of particles */
+  memcpy(index->nparts, index->index.map + logger_index_npart_offset,
+         logger_index_npart_size);
+
+  /* Read if the file is sorted */
+  memcpy(&index->is_sorted, index->index.map + logger_index_is_sorted_offset,
+         logger_index_is_sorted_size);
+
+  /* Close the file */
+  logger_index_free(index);
+}
+
+/**
+ * @brief Write that the file is sorted.
+ *
+ * WARNING The file must be mapped.
+ *
+ * @param index The #logger_index.
+ */
+void logger_index_write_sorted(struct logger_index *index) {
+  /* Set the value */
+  const char is_sorted = 1;
+
+  /* Write the value */
+  memcpy(index->index.map + logger_index_is_sorted_offset, &is_sorted,
+         logger_index_is_sorted_size);
+}
+
+/**
+ * @brief Get the #index_data of a particle type.
+ *
+ * @param index The #logger_index.
+ * @param type The particle type.
+ */
+struct index_data *logger_index_get_data(struct logger_index *index, int type) {
+  /* Get the offset of particles of this type by skipping entries of lower
+   * types. */
+  size_t count = 0;
+  for (int i = 0; i < type; i++) {
+    count += index->nparts[i];
+  }
+  const size_t type_offset = count * sizeof(struct index_data);
+
+  return index->index.map + logger_index_data_offset + type_offset;
+}
+
+/**
+ * @brief Map the file and if required sort it.
+ *
+ * @param index The #logger_index.
+ * @param filename The index filename.
+ * @param sorted Does the file needs to be sorted?
+ */
+void logger_index_map_file(struct logger_index *index, const char *filename,
+                           int sorted) {
+  /* Un-map previous file */
+  if (index->index.map != NULL) {
+    error("Trying to remap.");
+  }
+
+  /* Check if need to sort the file */
+  if (sorted && !index->is_sorted) {
+    if (index->reader->verbose > 0) {
+      message("Sorting the index file.");
+    }
+    /* Map the index file */
+    logger_loader_io_mmap_file(&index->index, filename,
+                               /* read_only */ 0);
+    /* Sort the file */
+    for (int i = 0; i < swift_type_count; i++) {
+      if (index->nparts[i] != 0) {
+        struct index_data *data = logger_index_get_data(index, i);
+        quick_sort(data, index->nparts[i]);
+      }
+    }
+
+    /* Write that the file is sorted */
+    logger_index_write_sorted(index);
+
+    /* Free the index file before opening it again in read only */
+    logger_index_free(index);
+
+    if (index->reader->verbose > 0) {
+      message("Sorting done.");
+    }
+  }
+
+  /* Map the index file */
+  logger_loader_io_mmap_file(&index->index, filename,
+                             /* read_only */ 1);
+}
+
+/**
+ * @brief Cleanup the memory of a logger_index
+ *
+ * @param index The #logger_index.
+ */
+void logger_index_free(struct logger_index *index) {
+  if (index->index.map == NULL) {
+    error("Trying to unmap an unexisting map");
+  }
+  logger_loader_io_munmap_file(&index->index);
+}
+
+/**
+ * @brief Get the offset of a given particle
+ *
+ * @param index The #logger_index.
+ * @param id The ID of the particle.
+ * @param type The type of the particle.
+ *
+ * @return The offset of the particle or 0 if not found.
+ */
+size_t logger_index_get_particle_offset(struct logger_index *index,
+                                        long long id, int type) {
+  /* Define a few variables */
+  const struct index_data *data = logger_index_get_data(index, type);
+  size_t left = 0;
+  size_t right = index->nparts[type] - 1;
+
+  /* Search for the value (binary search) */
+  while (left <= right) {
+    size_t m = (left + right) / 2;
+    if (data[m].id < id) {
+      left = m + 1;
+    } else if (data[m].id > id) {
+      right = m - 1;
+    } else {
+      return data[m].offset;
+    }
+  }
+
+  return 0;
+}
+
+/**
+ * @brief Initialize the #logger_index.
+ *
+ * @param index The #logger_index.
+ * @param reader The #logger_reader.
+ */
+void logger_index_init(struct logger_index *index,
+                       struct logger_reader *reader) {
+  /* Set the mapped file to NULL */
+  index->index.map = NULL;
+
+  /* Set the pointer to the reader */
+  index->reader = reader;
+
+  /* Set the time to its default value */
+  index->time = -1;
+}
diff --git a/logger/logger_index.h b/logger/logger_index.h
new file mode 100644
index 0000000000000000000000000000000000000000..8419e0aaa18e00846e2c3ad9f1218780bf44f431
--- /dev/null
+++ b/logger/logger_index.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef LOGGER_LOGGER_INDEX_H
+#define LOGGER_LOGGER_INDEX_H
+
+#include "logger_loader_io.h"
+#include "logger_tools.h"
+
+/* predefine the structure */
+struct logger_reader;
+
+/**
+ * @brief Data structure contained in the logger files.
+ */
+struct index_data {
+  /* Id of the particle. */
+  int64_t id;
+
+  /* Offset of the particle in the file. */
+  uint64_t offset;
+};
+
+/**
+ * @brief Structure dealing with the index files.
+ *
+ * The structure is initialized with #logger_index_init and
+ * then a file can be read with #logger_index_read_header and
+ * #logger_index_map_file.
+ *
+ * The functions #logger_index_get_particle_offset and
+ * #logger_index_get_data should be used to access the element
+ * stored inside the index file.
+ * The first one access a particle through its ids and the second one
+ * gives a pointer to the first element that can be looped through.
+ */
+struct logger_index {
+  /* Pointer to the reader */
+  struct logger_reader *reader;
+
+  /* Time of the index file */
+  double time;
+
+  /* Integer time of the index file */
+  integertime_t integer_time;
+
+  /* Number of particles in the file */
+  uint64_t nparts[swift_type_count];
+
+  /* Is the file sorted ? */
+  char is_sorted;
+
+  /* The mapped file */
+  struct mapped_file index;
+};
+
+void logger_index_write_sorted(struct logger_index *index);
+void logger_index_init(struct logger_index *index,
+                       struct logger_reader *reader);
+void logger_index_read_header(struct logger_index *index, const char *filename);
+void logger_index_map_file(struct logger_index *index, const char *filename,
+                           int sorted);
+size_t logger_index_get_particle_offset(struct logger_index *index,
+                                        long long id, int type);
+void logger_index_free(struct logger_index *index);
+void logger_index_sort_file(struct logger_index *index);
+struct index_data *logger_index_get_data(struct logger_index *index, int type);
+
+#endif  // LOGGER_LOGGER_INDEX_H
diff --git a/logger/logger_loader_io.c b/logger/logger_loader_io.c
index f18f9bb7eb2eaf88ba11eaf916c0a68a27cfd2d2..6bebe3aefa619052b1f7688ad627fde6812d8d81 100644
--- a/logger/logger_loader_io.c
+++ b/logger/logger_loader_io.c
@@ -44,13 +44,13 @@ size_t logger_loader_io_get_file_size(int fd) {
  *
  * #logger_loader_io_munmap_file should be called to unmap the file.
  *
+ * @param map The #mapped_file.
  * @param filename file to read.
- * @param file_size (out) size of the file.
  * @param read_only Open the file in read only mode?
  *
  */
-void *logger_loader_io_mmap_file(char *filename, size_t *file_size,
-                                 int read_only) {
+void logger_loader_io_mmap_file(struct mapped_file *map, const char *filename,
+                                int read_only) {
   /* open the file. */
   int fd;
 
@@ -63,33 +63,34 @@ void *logger_loader_io_mmap_file(char *filename, size_t *file_size,
     error("Unable to open file %s (%s).", filename, strerror(errno));
 
   /* get the file size. */
-  *file_size = logger_loader_io_get_file_size(fd);
+  map->mmap_size = logger_loader_io_get_file_size(fd);
 
   /* map the memory. */
   int mode = PROT_READ;
   if (!read_only) mode |= PROT_WRITE;
 
-  void *map = mmap(NULL, *file_size, mode, MAP_SHARED, fd, 0);
-  if (map == MAP_FAILED)
-    error("Failed to allocate map of size %zi bytes (%s).", *file_size,
+  map->map = mmap(NULL, map->mmap_size, mode, MAP_SHARED, fd, 0);
+  if (map->map == MAP_FAILED)
+    error("Failed to allocate map of size %zi bytes (%s).", map->mmap_size,
           strerror(errno));
 
   /* Close the file. */
   close(fd);
-
-  return map;
 }
 
 /**
  * @brief Unmap a file.
  *
- * @param map file mapping.
- * @param file_size The file size.
+ * @param map The #mapped_file.
  *
  */
-void logger_loader_io_munmap_file(void *map, size_t file_size) {
+void logger_loader_io_munmap_file(struct mapped_file *map) {
   /* unmap the file. */
-  if (munmap(map, file_size) != 0) {
+  if (munmap(map->map, map->mmap_size) != 0) {
     error("Unable to unmap the file (%s).", strerror(errno));
   }
+
+  /* Reset values */
+  map->map = NULL;
+  map->mmap_size = 0;
 }
diff --git a/logger/logger_loader_io.h b/logger/logger_loader_io.h
index d44fea673017644306e73261afdbc6dec26948c6..b90337fe3e1e19c9a554fe36a93bf243218e56e1 100644
--- a/logger/logger_loader_io.h
+++ b/logger/logger_loader_io.h
@@ -29,10 +29,19 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+/* Structure for mapping a file. */
+struct mapped_file {
+  /* Mapped data. */
+  void *map;
+
+  /* File size. */
+  size_t mmap_size;
+};
+
 size_t logger_loader_io_get_file_size(int fd);
-void *logger_loader_io_mmap_file(char *filename, size_t *file_size,
-                                 int read_only);
-void logger_loader_io_munmap_file(void *map, size_t file_size);
+void logger_loader_io_mmap_file(struct mapped_file *map, const char *filename,
+                                int read_only);
+void logger_loader_io_munmap_file(struct mapped_file *map);
 
 /**
  * @brief read a mask with its offset.
diff --git a/logger/logger_logfile.c b/logger/logger_logfile.c
index c70068cd24c01a5ba231e97e343a0c076dc0ecb4..7c1660ec7530af7fc5440abd20c5ab627dbc9882 100644
--- a/logger/logger_logfile.c
+++ b/logger/logger_logfile.c
@@ -42,8 +42,8 @@ void logger_logfile_init_from_file(struct logger_logfile *log, char *filename,
 
   /* Open file, map it and get its size. */
   if (reader->verbose > 1) message("Mapping the log file.");
-  log->log.map = logger_loader_io_mmap_file(filename, &log->log.file_size,
-                                            /* read_only */ 1);
+  logger_loader_io_mmap_file(&log->log, filename,
+                             /* read_only */ 1);
 
   /* Read the header. */
   if (reader->verbose > 1) message("Reading the header.");
@@ -69,7 +69,7 @@ void logger_logfile_init_from_file(struct logger_logfile *log, char *filename,
   }
 
   /* Initialize the time array. */
-  if (reader->verbose > 1) message("Reading the time stamps.");
+  if (reader->verbose > 1) message("Reading the timestamps.");
   time_array_populate(&log->times, log);
 
   /* Print the time array. */
@@ -84,7 +84,7 @@ void logger_logfile_init_from_file(struct logger_logfile *log, char *filename,
  * @param log The #logger_logfile.
  */
 void logger_logfile_free(struct logger_logfile *log) {
-  logger_loader_io_munmap_file(log->log.map, log->log.file_size);
+  logger_loader_io_munmap_file(&log->log);
 
   time_array_free(&log->times);
 }
@@ -98,9 +98,9 @@ void logger_logfile_free(struct logger_logfile *log) {
 void logger_logfile_reverse_offset(struct logger_logfile *log, char *filename) {
 
   /* Close and reopen the file in write mode. */
-  logger_loader_io_munmap_file(log->log.map, log->log.file_size);
-  log->log.map = logger_loader_io_mmap_file(filename, &log->log.file_size,
-                                            /* read_only */ 0);
+  logger_loader_io_munmap_file(&log->log);
+  logger_loader_io_mmap_file(&log->log, filename,
+                             /* read_only */ 0);
 
   /* Get pointers */
   struct header *header = &log->header;
@@ -119,7 +119,7 @@ void logger_logfile_reverse_offset(struct logger_logfile *log, char *filename) {
 
   /* check that the record offset points to another record. */
   for (size_t offset_debug = header->offset_first_record;
-       offset_debug < log->log.file_size;
+       offset_debug < log->log.mmap_size;
        offset_debug = tools_check_record_consistency(reader, offset_debug)) {
   }
 
@@ -138,7 +138,7 @@ void logger_logfile_reverse_offset(struct logger_logfile *log, char *filename) {
   }
 
   /* reverse the record's offset. */
-  for (size_t offset = header->offset_first_record; offset < log->log.file_size;
+  for (size_t offset = header->offset_first_record; offset < log->log.mmap_size;
        offset = tools_reverse_offset(header, log->log.map, offset)) {
   }
 
@@ -159,7 +159,7 @@ void logger_logfile_reverse_offset(struct logger_logfile *log, char *filename) {
 
   /* check that the record offset points to another record. */
   for (size_t offset_debug = header->offset_first_record;
-       offset_debug < log->log.file_size;
+       offset_debug < log->log.mmap_size;
        offset_debug = tools_check_record_consistency(reader, offset_debug)) {
   }
 
@@ -169,7 +169,7 @@ void logger_logfile_reverse_offset(struct logger_logfile *log, char *filename) {
 #endif
 
   /* Close and reopen the file in read mode. */
-  logger_loader_io_munmap_file(log->log.map, log->log.file_size);
-  log->log.map = logger_loader_io_mmap_file(filename, &log->log.file_size,
-                                            /* read_only */ 1);
+  logger_loader_io_munmap_file(&log->log);
+  logger_loader_io_mmap_file(&log->log, filename,
+                             /* read_only */ 1);
 }
diff --git a/logger/logger_logfile.h b/logger/logger_logfile.h
index 0b6ef728d524bb104b83fc28b9250c51a764dfd4..15cd8bc3fb98be7f7dff2248256f1bdcf32ad799 100644
--- a/logger/logger_logfile.h
+++ b/logger/logger_logfile.h
@@ -24,6 +24,7 @@
 #define LOGGER_LOGGER_LOGFILE_H
 
 #include "logger_header.h"
+#include "logger_loader_io.h"
 #include "logger_time.h"
 
 struct logger_reader;
@@ -49,15 +50,8 @@ struct logger_logfile {
   /* Information about the time records. */
   struct time_array times;
 
-  /* The log's variables. */
-  struct {
-    /* Mapped data. */
-    void *map;
-
-    /* File size. */
-    size_t file_size;
-
-  } log;
+  /* The file. */
+  struct mapped_file log;
 };
 
 void logger_logfile_init_from_file(struct logger_logfile *log, char *filename,
diff --git a/logger/logger_particle.c b/logger/logger_particle.c
index 6809e0edf6125e66cbb8807cc98eeb31b5e04ecd..d45c61cf66b720d8bf68cef7b63c2499bf59a6de 100644
--- a/logger/logger_particle.c
+++ b/logger/logger_particle.c
@@ -33,7 +33,7 @@
  * @param p The #logger_particle to print
  */
 void logger_particle_print(const struct logger_particle *p) {
-  message("ID:            %lu.", p->id);
+  message("ID:            %lli.", p->id);
   message("Mass:          %g", p->mass);
   message("Time:          %g.", p->time);
   message("Cutoff Radius: %g.", p->h);
@@ -42,6 +42,7 @@ void logger_particle_print(const struct logger_particle *p) {
   message("Accelerations: (%g, %g, %g).", p->acc[0], p->acc[1], p->acc[2]);
   message("Entropy:       %g.", p->entropy);
   message("Density:       %g.", p->density);
+  message("Type:          %i.", p->type);
 }
 
 /**
@@ -61,6 +62,8 @@ void logger_particle_init(struct logger_particle *part) {
   part->h = -1;
   part->mass = -1;
   part->id = SIZE_MAX;
+
+  part->type = 0;
 }
 
 /**
@@ -92,6 +95,8 @@ void *logger_particle_read_field(struct logger_particle *part, void *map,
     p = &part->density;
   } else if (strcmp("consts", field) == 0) {
     p = malloc(size);
+  } else if (strcmp("special flags", field) == 0) {
+    p = &part->type;
   } else {
     error("Type %s not defined.", field);
   }
@@ -119,7 +124,7 @@ void *logger_particle_read_field(struct logger_particle *part, void *map,
  * @param reader The #logger_reader.
  * @param part The #logger_particle to update.
  * @param offset offset of the record to read.
- * @param time time to interpolate.
+ * @param time time to interpolate (not used if constant interpolation).
  * @param reader_type #logger_reader_type.
  *
  * @return position after the record.
@@ -129,6 +134,9 @@ size_t logger_particle_read(struct logger_particle *part,
                             const double time,
                             const enum logger_reader_type reader_type) {
 
+  /* Save the offset */
+  part->offset = offset;
+
   /* Get a few pointers. */
   const struct header *h = &reader->log.header;
   void *map = reader->log.log.map;
@@ -143,11 +151,18 @@ size_t logger_particle_read(struct logger_particle *part,
   /* Read the record's mask. */
   map = logger_loader_io_read_mask(h, map + offset, &mask, &h_offset);
 
+  /* Check that the mask is meaningful */
+  if (mask > (unsigned int)(1 << h->masks_count)) {
+    error("Found an unexpected mask %zi", mask);
+  }
+
   /* Check if it is not a time record. */
-  if (mask == 128) error("Unexpected mask: %lu.", mask);
+  if (mask == h->timestamp_mask) {
+    error("Unexpected timestamp while reading a particle: %lu.", mask);
+  }
 
   /* Read all the fields. */
-  for (size_t i = 0; i < h->number_mask; i++) {
+  for (size_t i = 0; i < h->masks_count; i++) {
     if (mask & h->masks[i].mask) {
       map = logger_particle_read_field(part, map, h->masks[i].name,
                                        h->masks[i].size);
@@ -215,8 +230,9 @@ void logger_particle_interpolate(struct logger_particle *part_curr,
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check the particle order. */
-  if (part_next->time <= part_curr->time)
-    error("Wrong particle order (next before current).");
+  if (part_next->time < part_curr->time)
+    error("Wrong particle order (next before current): %g, %g", part_next->time,
+          part_curr->time);
   if ((time < part_curr->time) || (part_next->time < time))
     error(
         "Cannot extrapolate (particle time: %f, "
diff --git a/logger/logger_particle.h b/logger/logger_particle.h
index addd23564b65a734152ae8f538596d79019dd36f..657f62ef4113c6bc306ad8df145c93cd0920bdcc 100644
--- a/logger/logger_particle.h
+++ b/logger/logger_particle.h
@@ -74,10 +74,16 @@ struct logger_particle {
   float mass;
 
   /* unique id. */
-  size_t id;
+  long long id;
 
   /* time of the record. */
   double time;
+
+  /* offset of the particle */
+  size_t offset;
+
+  /* The particle type */
+  int type;
 };
 
 /**
diff --git a/logger/logger_python_wrapper.c b/logger/logger_python_wrapper.c
index 07c87b4989896977c56ddff4df243a5310d393a7..a422a163f67c7664c057ddd282328a15cd250f9b 100644
--- a/logger/logger_python_wrapper.c
+++ b/logger/logger_python_wrapper.c
@@ -30,204 +30,120 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+typedef struct {
+  PyObject_HEAD struct logger_particle part;
+} PyLoggerParticle;
+
+static PyTypeObject PyLoggerParticle_Type;
+
+PyArray_Descr *logger_particle_descr;
+
 /**
- * @brief load data from the offset without any interpolation
+ * @brief load data from the index files.
  *
- * <b>offset</b> PyArrayObject list of offset for each particle.
+ * <b>basename</b> Base name of the logger files.
  *
- * <b>filename</b> string filename of the log file.
+ * <b>time</b> The time requested.
  *
  * <b>verbose</b> Verbose level.
  *
  * <b>returns</b> dictionnary containing the data read.
  */
-static PyObject *loadFromIndex(__attribute__((unused)) PyObject *self,
-                               PyObject *args) {
+static PyObject *loadSnapshotAtTime(__attribute__((unused)) PyObject *self,
+                                    PyObject *args) {
 
-  /* input variables. */
-  PyArrayObject *offset = NULL;
-  char *filename = NULL;
+  /* declare variables. */
+  char *basename = NULL;
 
-  /* output variables. */
-  PyArrayObject *pos = NULL;
-  PyArrayObject *vel = NULL;
-  PyArrayObject *acc = NULL;
-  PyArrayObject *entropy = NULL;
-  PyArrayObject *h_sph = NULL;
-  PyArrayObject *rho = NULL;
-  PyArrayObject *mass = NULL;
-  PyArrayObject *id = NULL;
-
-  size_t time_offset;
+  double time = 0;
   int verbose = 2;
 
   /* parse arguments. */
-  if (!PyArg_ParseTuple(args, "OsL|i", &offset, &filename, &time_offset,
-                        &verbose))
-    return NULL;
-
-  if (!PyArray_Check(offset)) {
-    error("Offset is not a numpy array.");
-  }
-  if (PyArray_NDIM(offset) != 1) {
-    error("Offset is not a 1 dimensional array.");
-  }
-  if (PyArray_TYPE(offset) != NPY_UINT64) {
-    error("Offset does not contain unsigned int.");
-  }
+  if (!PyArg_ParseTuple(args, "sd|i", &basename, &time, &verbose)) return NULL;
 
   /* initialize the reader. */
   struct logger_reader reader;
-  logger_reader_init(&reader, filename, verbose);
-  struct header *h = &reader.log.header;
-
-  /* init array. */
-  npy_intp dim[2];
-  dim[0] = PyArray_DIMS(offset)[0];
-  dim[1] = DIM;
-
-  /* Get required time. */
-  double time = time_array_get_time(&reader.log.times, time_offset);
+  logger_reader_init(&reader, basename, verbose);
 
-  /* init output. */
-  if (header_get_field_index(h, "positions") != -1) {
-    pos = (PyArrayObject *)PyArray_SimpleNew(2, dim, NPY_DOUBLE);
-  }
+  if (verbose > 1) message("Reading particles.");
 
-  if (header_get_field_index(h, "velocities") != -1) {
-    vel = (PyArrayObject *)PyArray_SimpleNew(2, dim, NPY_FLOAT);
-  }
+  /* Number of particles in the index files */
+  npy_intp n_tot = 0;
 
-  if (header_get_field_index(h, "accelerations") != -1) {
-    acc = (PyArrayObject *)PyArray_SimpleNew(2, dim, NPY_FLOAT);
-  }
+  /* Set the reading time */
+  logger_reader_set_time(&reader, time);
 
-  if (header_get_field_index(h, "entropy") != -1) {
-    entropy =
-        (PyArrayObject *)PyArray_SimpleNew(1, PyArray_DIMS(offset), NPY_FLOAT);
+  /* Get the number of particles */
+  int n_type = 0;
+  const uint64_t *n_parts =
+      logger_reader_get_number_particles(&reader, &n_type);
+  for (int i = 0; i < n_type; i++) {
+    n_tot += n_parts[i];
   }
 
-  if (header_get_field_index(h, "smoothing length") != -1) {
-    h_sph =
-        (PyArrayObject *)PyArray_SimpleNew(1, PyArray_DIMS(offset), NPY_FLOAT);
-  }
+#ifdef SWIFT_DEBUG_CHECKS
+  message("Found %lu particles", n_tot);
+#endif  // SWIFT_DEBUG_CHECKS
 
-  if (header_get_field_index(h, "density") != -1) {
-    rho =
-        (PyArrayObject *)PyArray_SimpleNew(1, PyArray_DIMS(offset), NPY_FLOAT);
-  }
+  /* Allocate the output memory */
+  PyArrayObject *out = (PyArrayObject *)PyArray_SimpleNewFromDescr(
+      1, &n_tot, logger_particle_descr);
 
-  if (header_get_field_index(h, "consts") != -1) {
-    mass =
-        (PyArrayObject *)PyArray_SimpleNew(1, PyArray_DIMS(offset), NPY_FLOAT);
-    id = (PyArrayObject *)PyArray_SimpleNew(1, PyArray_DIMS(offset), NPY_ULONG);
-  }
+  /* Allows to use threads */
+  Py_BEGIN_ALLOW_THREADS;
 
-  if (verbose > 1) message("Reading particles.");
+  /* Read the particle. */
+  logger_reader_read_all_particles(&reader, time, logger_reader_const,
+                                   PyArray_DATA(out), n_tot);
 
-  /* loop over all particles. */
-  for (npy_intp i = 0; i < PyArray_DIMS(offset)[0]; i++) {
-    struct logger_particle part;
-
-    /* Get the offset. */
-    size_t offset_particle = *(size_t *)PyArray_GETPTR1(offset, i);
-
-    /* Read the particle. */
-    logger_particle_read(&part, &reader, offset_particle, time,
-                         logger_reader_lin);
-
-    double *dtmp;
-    float *ftmp;
-    size_t *stmp;
-
-    /* copy the data. */
-    for (size_t k = 0; k < DIM; k++) {
-      if (pos) {
-        dtmp = PyArray_GETPTR2(pos, i, k);
-        *dtmp = part.pos[k];
-      }
-
-      if (vel) {
-        ftmp = PyArray_GETPTR2(vel, i, k);
-        *ftmp = part.vel[k];
-      }
-
-      if (acc) {
-        ftmp = PyArray_GETPTR2(acc, i, k);
-        *ftmp = part.acc[k];
-      }
-    }
-
-    if (entropy) {
-      ftmp = PyArray_GETPTR1(entropy, i);
-      *ftmp = part.entropy;
-    }
-
-    if (rho) {
-      ftmp = PyArray_GETPTR1(rho, i);
-      *ftmp = part.density;
-    }
-
-    if (h_sph) {
-      ftmp = PyArray_GETPTR1(h_sph, i);
-      *ftmp = part.h;
-    }
-
-    if (mass) {
-      ftmp = PyArray_GETPTR1(mass, i);
-      *ftmp = part.mass;
-    }
-
-    if (id) {
-      stmp = PyArray_GETPTR1(id, i);
-      *stmp = part.id;
-    }
-  }
+  /* No need of threads anymore */
+  Py_END_ALLOW_THREADS;
 
   /* Free the memory. */
   logger_reader_free(&reader);
 
-  /* construct return value. */
-  PyObject *dict = PyDict_New();
-  PyObject *key = PyUnicode_FromString("positions");
-  PyDict_SetItem(dict, key, PyArray_Return(pos));
+  return (PyObject *)out;
+}
 
-  if (vel) {
-    key = PyUnicode_FromString("velocities");
-    PyDict_SetItem(dict, key, PyArray_Return(vel));
-  }
+/**
+ * @brief Read the minimal and maximal time.
+ *
+ * <b>basename</b> Base name of the logger files.
+ *
+ * <b>verbose</b> Verbose level.
+ *
+ * <b>returns</b> tuple containing min and max time.
+ */
+static PyObject *getTimeLimits(__attribute__((unused)) PyObject *self,
+                               PyObject *args) {
 
-  if (acc) {
-    key = PyUnicode_FromString("accelerations");
-    PyDict_SetItem(dict, key, PyArray_Return(acc));
-  }
+  /* declare variables. */
+  char *basename = NULL;
 
-  if (entropy) {
-    key = PyUnicode_FromString("entropy");
-    PyDict_SetItem(dict, key, PyArray_Return(entropy));
-  }
+  int verbose = 2;
 
-  if (rho) {
-    key = PyUnicode_FromString("rho");
-    PyDict_SetItem(dict, key, PyArray_Return(rho));
-  }
+  /* parse arguments. */
+  if (!PyArg_ParseTuple(args, "s|i", &basename, &verbose)) return NULL;
 
-  if (h_sph) {
-    key = PyUnicode_FromString("h_sph");
-    PyDict_SetItem(dict, key, PyArray_Return(h_sph));
-  }
+  /* initialize the reader. */
+  struct logger_reader reader;
+  logger_reader_init(&reader, basename, verbose);
 
-  if (mass) {
-    key = PyUnicode_FromString("mass");
-    PyDict_SetItem(dict, key, PyArray_Return(mass));
-  }
+  if (verbose > 1) message("Reading particles.");
 
-  if (id) {
-    key = PyUnicode_FromString("id");
-    PyDict_SetItem(dict, key, PyArray_Return(id));
-  }
+  /* Get the time limits */
+  double time_min = logger_reader_get_time_begin(&reader);
+  double time_max = logger_reader_get_time_end(&reader);
+
+  /* Free the memory. */
+  logger_reader_free(&reader);
+
+  /* Create the output */
+  PyObject *out = PyTuple_New(2);
+  PyTuple_SetItem(out, 0, PyFloat_FromDouble(time_min));
+  PyTuple_SetItem(out, 1, PyFloat_FromDouble(time_max));
 
-  return dict;
+  return (PyObject *)out;
 }
 
 /**
@@ -259,10 +175,40 @@ static PyObject *pyReverseOffset(__attribute__((unused)) PyObject *self,
 /* definition of the method table. */
 
 static PyMethodDef libloggerMethods[] = {
-    {"loadFromIndex", loadFromIndex, METH_VARARGS,
-     "Load snapshot directly from the offset in an index file."},
+    {"loadSnapshotAtTime", loadSnapshotAtTime, METH_VARARGS,
+     "Load a snapshot directly from the logger using the index files.\n\n"
+     "Parameters\n"
+     "----------\n\n"
+     "basename: str\n"
+     "  The basename of the index files.\n\n"
+     "time: double\n"
+     "  The (double) time of the snapshot.\n\n"
+     "verbose: int, optional\n"
+     "  The verbose level of the loader.\n\n"
+     "Returns\n"
+     "-------\n\n"
+     "snapshot: dict\n"
+     "  The full output generated for the whole file.\n"},
     {"reverseOffset", pyReverseOffset, METH_VARARGS,
-     "Reverse the offset (from pointing backward to forward)."},
+     "Reverse the offset (from pointing backward to forward).\n\n"
+     "Parameters\n"
+     "----------\n\n"
+     "filename: str\n"
+     "  The filename of the log file.\n\n"
+     "verbose: int, optional\n"
+     "  The verbose level of the loader.\n"},
+    {"getTimeLimits", getTimeLimits, METH_VARARGS,
+     "Read the time limits of the simulation.\n\n"
+     "Parameters\n"
+     "----------\n\n"
+     "basename: str\n"
+     "  The basename of the index files.\n\n"
+     "verbose: int, optional\n"
+     "  The verbose level of the loader.\n\n"
+     "Returns\n"
+     "-------\n\n"
+     "times: tuple\n"
+     "  time min, time max\n"},
 
     {NULL, NULL, 0, NULL} /* Sentinel */
 };
@@ -279,12 +225,104 @@ static struct PyModuleDef libloggermodule = {
     NULL  /* m_free */
 };
 
+#define CREATE_FIELD(fields, name, field_name, type)                      \
+  ({                                                                      \
+    PyObject *tuple = PyTuple_New(2);                                     \
+    PyTuple_SetItem(tuple, 0, (PyObject *)PyArray_DescrFromType(type));   \
+    PyTuple_SetItem(                                                      \
+        tuple, 1,                                                         \
+        PyLong_FromSize_t(offsetof(struct logger_particle, field_name))); \
+    PyDict_SetItem(fields, PyUnicode_FromString(name), tuple);            \
+  })
+
+#define CREATE_FIELD_3D(fields, name, field_name, type)                   \
+  ({                                                                      \
+    /* Create the 3D descriptor */                                        \
+    PyArray_Descr *vec = PyArray_DescrNewFromType(type);                  \
+    vec->subarray = malloc(sizeof(PyArray_ArrayDescr));                   \
+    vec->subarray->base = PyArray_DescrFromType(type);                    \
+    vec->subarray->shape = PyTuple_New(1);                                \
+    PyTuple_SetItem(vec->subarray->shape, 0, PyLong_FromSize_t(3));       \
+                                                                          \
+    /* Create the field */                                                \
+    PyObject *tuple = PyTuple_New(2);                                     \
+    PyTuple_SetItem(tuple, 0, (PyObject *)vec);                           \
+    PyTuple_SetItem(                                                      \
+        tuple, 1,                                                         \
+        PyLong_FromSize_t(offsetof(struct logger_particle, field_name))); \
+    PyDict_SetItem(fields, PyUnicode_FromString(name), tuple);            \
+  })
+
+void pylogger_particle_define_descr(void) {
+  /* Generate list of field names */
+  PyObject *names = PyTuple_New(9);
+  PyTuple_SetItem(names, 0, PyUnicode_FromString("positions"));
+  PyTuple_SetItem(names, 1, PyUnicode_FromString("velocities"));
+  PyTuple_SetItem(names, 2, PyUnicode_FromString("accelerations"));
+  PyTuple_SetItem(names, 3, PyUnicode_FromString("entropies"));
+  PyTuple_SetItem(names, 4, PyUnicode_FromString("smoothing_lengths"));
+  PyTuple_SetItem(names, 5, PyUnicode_FromString("densities"));
+  PyTuple_SetItem(names, 6, PyUnicode_FromString("masses"));
+  PyTuple_SetItem(names, 7, PyUnicode_FromString("ids"));
+  PyTuple_SetItem(names, 8, PyUnicode_FromString("times"));
+
+  /* Generate list of fields */
+  PyObject *fields = PyDict_New();
+  CREATE_FIELD_3D(fields, "positions", pos, NPY_DOUBLE);
+  CREATE_FIELD(fields, "velocities", vel, NPY_FLOAT32);
+  CREATE_FIELD(fields, "accelerations", acc, NPY_FLOAT32);
+  CREATE_FIELD(fields, "entropies", entropy, NPY_FLOAT32);
+  CREATE_FIELD(fields, "smoothing_lenghts", h, NPY_FLOAT32);
+  CREATE_FIELD(fields, "densities", density, NPY_FLOAT32);
+  CREATE_FIELD(fields, "masses", mass, NPY_FLOAT32);
+  CREATE_FIELD(fields, "ids", id, NPY_ULONGLONG);
+  CREATE_FIELD(fields, "times", id, NPY_DOUBLE);
+
+  /* Generate descriptor */
+  logger_particle_descr = PyObject_New(PyArray_Descr, &PyArrayDescr_Type);
+  logger_particle_descr->typeobj = &PyLoggerParticle_Type;
+  // V if for an arbitrary kind of array
+  logger_particle_descr->kind = 'V';
+  // Not well documented (seems any value is fine)
+  logger_particle_descr->type = 'p';
+  // Native byte ordering
+  logger_particle_descr->byteorder = '=';
+  // Flags
+  logger_particle_descr->flags = NPY_USE_GETITEM | NPY_USE_SETITEM;
+  // id of the data type (assigned automatically)
+  logger_particle_descr->type_num = 0;
+  // Size of an element
+  logger_particle_descr->elsize = sizeof(struct logger_particle);
+  // alignment (doc magic)
+  logger_particle_descr->alignment = offsetof(
+      struct {
+        char c;
+        struct logger_particle v;
+      },
+      v);
+  // no subarray
+  logger_particle_descr->subarray = NULL;
+  // functions
+  logger_particle_descr->f = NULL;
+  // Meta data
+  logger_particle_descr->metadata = NULL;
+  logger_particle_descr->c_metadata = NULL;
+  logger_particle_descr->names = names;
+  logger_particle_descr->fields = fields;
+}
+
 PyMODINIT_FUNC PyInit_liblogger(void) {
   PyObject *m;
   m = PyModule_Create(&libloggermodule);
   if (m == NULL) return NULL;
 
+  /* Deal with SWIFT clock */
+  clocks_set_cpufreq(0);
+
   import_array();
 
+  /* Define the descr of the logger_particle */
+  pylogger_particle_define_descr();
+
   return m;
 }
diff --git a/logger/logger_reader.c b/logger/logger_reader.c
index 0954b9c5a8e56213de4d5b2a445aeeb9105e327c..f9f3fb558f887cc2d2bb3143ce13d06aacb1cfdd 100644
--- a/logger/logger_reader.c
+++ b/logger/logger_reader.c
@@ -17,29 +17,98 @@
  *
  ******************************************************************************/
 
+/* Include corresponding header */
 #include "logger_reader.h"
 
+/* Include standard library */
+#include <sys/sysinfo.h>
+#include <unistd.h>
+
+/* Include local headers */
+#include "threadpool.h"
+
+#define nr_threads get_nprocs()
+
 /**
  * @brief Initialize the reader.
  *
  * @param reader The #logger_reader.
- * @param filename The log filename.
+ * @param basename The basename of the logger files.
  * @param verbose The verbose level.
  */
-void logger_reader_init(struct logger_reader *reader, char *filename,
+void logger_reader_init(struct logger_reader *reader, const char *basename,
                         int verbose) {
   if (verbose > 1) message("Initializing the reader.");
 
+  /* Set the variable to the default values */
+  reader->time.time = -1.;
+  reader->time.int_time = 0;
+  reader->time.time_offset = 0;
+
+  /* Copy the base name */
+  strcpy(reader->basename, basename);
+
   /* Initialize the reader variables. */
   reader->verbose = verbose;
 
+  /* Generate the logfile filename */
+  char logfile_name[STRING_SIZE];
+  sprintf(logfile_name, "%s.dump", basename);
+
   /* Initialize the log file. */
-  logger_logfile_init_from_file(&reader->log, filename, reader,
+  logger_logfile_init_from_file(&reader->log, logfile_name, reader,
                                 /* only_header */ 0);
 
+  /* Initialize the index files */
+  logger_reader_init_index(reader);
+
   if (verbose > 1) message("Initialization done.");
 }
 
+/**
+ * @brief Initialize the index part of the reader.
+ *
+ * @param reader The #logger_reader.
+ */
+void logger_reader_init_index(struct logger_reader *reader) {
+  /* Initialize the logger_index */
+  logger_index_init(&reader->index.index, reader);
+
+  /* Count the number of files */
+  int count = 0;
+  while (1) {
+    char filename[STRING_SIZE + 50];
+    sprintf(filename, "%s_%04i.index", reader->basename, count);
+
+    /* Check if file exists */
+    if (access(filename, F_OK) != -1) {
+      count++;
+    } else {
+      break;
+    }
+  }
+
+  reader->index.n_files = count;
+
+  /* Initialize the arrays */
+  reader->index.times = (double *)malloc(count * sizeof(double));
+  reader->index.int_times =
+      (integertime_t *)malloc(count * sizeof(integertime_t));
+
+  /* Get the information contained in the headers */
+  for (int i = 0; i < reader->index.n_files; i++) {
+    char filename[STRING_SIZE + 50];
+    sprintf(filename, "%s_%04i.index", reader->basename, i);
+
+    /* Read the header */
+    logger_index_read_header(&reader->index.index, filename);
+
+    /* Save the required information */
+    reader->index.times[i] = reader->index.index.time;
+    reader->index.int_times[i] = reader->index.index.integer_time;
+  }
+}
+
 /**
  * @brief Free the reader.
  *
@@ -48,6 +117,10 @@ void logger_reader_init(struct logger_reader *reader, char *filename,
 void logger_reader_free(struct logger_reader *reader) {
   /* Free the log. */
   logger_logfile_free(&reader->log);
+
+  if (reader->time.time != -1.) {
+    logger_index_free(&reader->index.index);
+  }
 }
 
 /**
@@ -88,3 +161,301 @@ size_t reader_read_record(struct logger_reader *reader,
 
   return offset;
 }
+
+/**
+ * @brief Set the reader to a given time and read the correct index file.
+ *
+ * @param reader The #logger_reader.
+ * @param time The requested time.
+ */
+void logger_reader_set_time(struct logger_reader *reader, double time) {
+  /* Set the time */
+  reader->time.time = time;
+
+  /* Find the correct index */
+  unsigned int left = 0;
+  unsigned int right = reader->index.n_files - 1;
+
+  while (left != right) {
+    /* Do a ceil - division */
+    unsigned int m = (left + right + 1) / 2;
+    if (reader->index.times[m] > time) {
+      right = m - 1;
+    } else {
+      left = m;
+    }
+  }
+
+  /* Generate the filename */
+  char filename[STRING_SIZE + 50];
+  sprintf(filename, "%s_%04u.index", reader->basename, left);
+
+  /* Check if the file is already mapped */
+  if (reader->index.index.index.map != NULL) {
+    logger_index_free(&reader->index.index);
+  }
+
+  /* Read the file */
+  logger_index_read_header(&reader->index.index, filename);
+  logger_index_map_file(&reader->index.index, filename, /* sorted */ 1);
+
+  /* Get the offset of the time chunk */
+  size_t ind = time_array_get_index_from_time(&reader->log.times, time);
+
+  /* Check if we requested exactly a time step  */
+  if (reader->log.times.records[ind].time != time) {
+    /* In order to interpolate, we need to be above and not below the time */
+    ind += 1;
+  }
+
+  /* Save the values */
+  reader->time.index = ind;
+  reader->time.int_time = reader->log.times.records[ind].int_time;
+  reader->time.time_offset = reader->log.times.records[ind].offset;
+}
+
+/**
+ * @brief Provides the number of particle (per type) from the index file.
+ *
+ * @param reader The #logger_reader.
+ * @param n_type (output) The number of particle type possible.
+ *
+ * @return For each type possible, the number of particle.
+ */
+const uint64_t *logger_reader_get_number_particles(struct logger_reader *reader,
+                                                   int *n_type) {
+  *n_type = swift_type_count;
+  return reader->index.index.nparts;
+}
+
+struct extra_data_read {
+  struct logger_reader *reader;
+  struct logger_particle *parts;
+  struct index_data *data;
+  enum logger_reader_type type;
+};
+
+/**
+ * @brief Mapper function of logger_reader_read_all_particles().
+ *
+ * @param map_data The array of #logger_particle.
+ * @param num_elements The number of element to process.
+ * @param extra_data The #extra_data_read.
+ */
+void logger_reader_read_all_particles_mapper(void *map_data, int num_elements,
+                                             void *extra_data) {
+
+  struct logger_particle *parts = (struct logger_particle *)map_data;
+  struct extra_data_read *read = (struct extra_data_read *)extra_data;
+  const struct logger_reader *reader = read->reader;
+  struct index_data *data = read->data + (parts - read->parts);
+
+  const uint64_t *nparts = reader->index.index.nparts;
+  const size_t shift = parts - read->parts;
+
+  /* Read the particles */
+  for (int i = 0; i < num_elements; i++) {
+    const size_t part_ind = shift + i;
+
+    /* Get the offset */
+    size_t prev_offset = data[i].offset;
+    size_t next_offset = prev_offset;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /* check with the offset of the next timestamp.
+     * (the sentinel protects against overflow)
+     */
+    const size_t ind = reader->time.index + 1;
+    if (prev_offset >= reader->log.times.records[ind].offset) {
+      error("An offset is out of range (%zi > %zi).", prev_offset,
+            reader->log.times.records[ind].offset);
+    }
+#endif
+
+    while (next_offset < reader->time.time_offset) {
+      prev_offset = next_offset;
+      int test = tools_get_next_record(&reader->log.header, reader->log.log.map,
+                                       &next_offset, reader->log.log.mmap_size);
+
+      if (test == -1) {
+        size_t mask = 0;
+        logger_loader_io_read_mask(&reader->log.header,
+                                   reader->log.log.map + prev_offset, &mask,
+                                   &next_offset);
+        error(
+            "Trying to get a particle without next record (mask: %zi, diff "
+            "offset: %zi)",
+            mask, next_offset);
+      }
+    }
+
+    /* Read the particle */
+    logger_particle_read(&parts[i], reader, prev_offset, reader->time.time,
+                         read->type);
+
+    /* Set the type */
+    size_t count = 0;
+    for (int ptype = 0; ptype < swift_type_count; ptype++) {
+      count += nparts[ptype];
+      if (part_ind < count) {
+        parts[i].type = ptype;
+        break;
+      }
+    }
+  }
+}
+
+/**
+ * @brief Read all the particles from the index file.
+ *
+ * @param reader The #logger_reader.
+ * @param time The requested time for the particle.
+ * @param interp_type The type of interpolation.
+ * @param parts The array of particles to use.
+ * @param n_tot The total number of particles
+ */
+void logger_reader_read_all_particles(struct logger_reader *reader, double time,
+                                      enum logger_reader_type interp_type,
+                                      struct logger_particle *parts,
+                                      size_t n_tot) {
+
+  /* Initialize the thread pool */
+  struct threadpool threadpool;
+  threadpool_init(&threadpool, nr_threads);
+
+  /* Shortcut to some structures */
+  struct logger_index *index = &reader->index.index;
+
+  /* Get the correct index file */
+  logger_reader_set_time(reader, time);
+  struct index_data *data = logger_index_get_data(index, 0);
+
+  /* Read the particles */
+  struct extra_data_read read;
+  read.reader = reader;
+  read.parts = parts;
+  read.data = data;
+  read.type = interp_type;
+  threadpool_map(&threadpool, logger_reader_read_all_particles_mapper, parts,
+                 n_tot, sizeof(struct logger_particle), 0, &read);
+
+  /* Cleanup the threadpool */
+  threadpool_clean(&threadpool);
+}
+
+/**
+ * @brief Get the simulation initial time.
+ *
+ * @param reader The #logger_reader.
+ *
+ * @return The initial time
+ */
+double logger_reader_get_time_begin(struct logger_reader *reader) {
+  return reader->log.times.records[0].time;
+}
+
+/**
+ * @brief Get the simulation final time.
+ *
+ * @param reader The #logger_reader.
+ *
+ * @return The final time
+ */
+double logger_reader_get_time_end(struct logger_reader *reader) {
+  const size_t ind = reader->log.times.size;
+  return reader->log.times.records[ind - 1].time;
+}
+
+/**
+ * @brief Get the offset of the last timestamp before a given time.
+ *
+ * @param reader The #logger_reader.
+ * @param time The requested time.
+ *
+ * @return The offset of the timestamp.
+ */
+size_t logger_reader_get_next_offset_from_time(struct logger_reader *reader,
+                                               double time) {
+  size_t ind = time_array_get_index_from_time(&reader->log.times, time);
+  return reader->log.times.records[ind + 1].offset;
+}
+
+/**
+ * @brief Get the two particle records around the requested time.
+ *
+ * @param reader The #logger_reader.
+ * @param prev (in) A record before the requested time. (out) The last record
+ * before the time.
+ * @param next (out) The first record after the requested time.
+ * @param time_offset The offset of the requested time.
+ */
+void logger_reader_get_next_particle(struct logger_reader *reader,
+                                     struct logger_particle *prev,
+                                     struct logger_particle *next,
+                                     size_t time_offset) {
+
+  void *map = reader->log.log.map;
+  size_t prev_offset = prev->offset;
+  size_t next_offset = 0;
+
+  /* Get the mask index of the special flags */
+  const int spec_flag_ind =
+      header_get_field_index(&reader->log.header, "special flags");
+  if (spec_flag_ind < -1) {
+    error("The logfile does not contain the special flags field.");
+  }
+
+  /* Keep the type in memory */
+  const int prev_type = prev->type;
+  int new_type = -1;
+
+  while (1) {
+    /* Read the offset to the next particle */
+    size_t mask = 0;
+    logger_loader_io_read_mask(&reader->log.header, map + prev_offset, &mask,
+                               &next_offset);
+
+    /* Check if something special happened */
+    if (mask & reader->log.header.masks[spec_flag_ind].mask) {
+      struct logger_particle tmp;
+      logger_particle_read(&tmp, reader, prev_offset, /* Time */ -1,
+                           logger_reader_const);
+      new_type = tmp.type;
+    }
+
+    /* Are we at the end of the file? */
+    if (next_offset == 0) {
+      time_array_print(&reader->log.times);
+      error("End of file for offset %zi", prev_offset);
+    }
+
+    next_offset += prev_offset;
+
+    /* Have we found the next particle? */
+    if (next_offset > time_offset) {
+      break;
+    }
+
+    /* Update the previous offset */
+    prev_offset = next_offset;
+  }
+
+  /* Read the previous offset if required */
+  if (prev_offset != prev->offset) {
+    logger_particle_read(prev, reader, prev_offset, /* Time */ 0,
+                         logger_reader_const);
+  }
+
+  /* Read the next particle */
+  logger_particle_read(next, reader, next_offset, /* Time */ 0,
+                       logger_reader_const);
+
+  /* Set the types */
+  if (new_type == -1) {
+    next->type = prev_type;
+    prev->type = prev_type;
+  } else {
+    next->type = new_type;
+    prev->type = new_type;
+  }
+}
diff --git a/logger/logger_reader.h b/logger/logger_reader.h
index 124d271f57587a26dbfb59299678f0ce5cfbdf79..95a8ab8ef0b800e77d6de2bb483c1b4cd1092a3b 100644
--- a/logger/logger_reader.h
+++ b/logger/logger_reader.h
@@ -27,7 +27,7 @@
  * files.
  *
  * The <b>parameter file</b> contains all the information related to the code
- * (e.g. boxsize).
+ * (e.g. boxsize, scheme, ...).
  *
  * The <b>index files</b> are not mandatory files that indicates the position of
  * the particles in the log file at a given time step. They are useful to
@@ -47,6 +47,7 @@
 #ifndef LOGGER_LOGGER_READER_H
 #define LOGGER_LOGGER_READER_H
 
+#include "logger_index.h"
 #include "logger_loader_io.h"
 #include "logger_logfile.h"
 #include "logger_particle.h"
@@ -62,20 +63,71 @@
  */
 struct logger_reader {
 
-  /* Time of each index file. #TODO */
-  double *times;
+  /* Base name of the files */
+  char basename[STRING_SIZE];
+
+  struct {
+    /* Information contained in the index file */
+    struct logger_index index;
+
+    /* Number of index files */
+    int n_files;
+
+    /* Time of each index file */
+    double *times;
+
+    /* Integer time of each index file */
+    integertime_t *int_times;
+  } index;
 
   /* Informations contained in the file header. */
   struct logger_logfile log;
 
+  /* Information about the current time */
+  struct {
+    /* Double time */
+    double time;
+
+    /* Integer time */
+    integertime_t int_time;
+
+    /* Offset of the chunk */
+    size_t time_offset;
+
+    /* Index of the element in the time array */
+    size_t index;
+  } time;
+
   /* Level of verbosity. */
   int verbose;
 };
 
-void logger_reader_init(struct logger_reader *reader, char *filename,
+void logger_reader_init_index(struct logger_reader *reader);
+void logger_reader_init(struct logger_reader *reader, const char *basename,
                         int verbose);
 void logger_reader_free(struct logger_reader *reader);
 size_t reader_read_record(struct logger_reader *reader,
                           struct logger_particle *lp, double *time,
                           int *is_particle, size_t offset);
+
+void logger_reader_set_time(struct logger_reader *reader, double time);
+
+double logger_reader_get_time_begin(struct logger_reader *reader);
+double logger_reader_get_time_end(struct logger_reader *reader);
+size_t logger_reader_get_next_offset_from_time(struct logger_reader *reader,
+                                               double time);
+void logger_reader_get_next_particle(struct logger_reader *reader,
+                                     struct logger_particle *prev,
+                                     struct logger_particle *next, size_t time);
+
+const uint64_t *logger_reader_get_number_particles(struct logger_reader *reader,
+                                                   int *n_type);
+
+void logger_reader_read_all_particles_mapper(void *map_data, int num_elements,
+                                             void *extra_data);
+void logger_reader_read_all_particles(struct logger_reader *reader, double time,
+                                      enum logger_reader_type inter_type,
+                                      struct logger_particle *parts,
+                                      size_t n_tot);
+
 #endif  // LOGGER_LOGGER_READER_H
diff --git a/logger/logger_time.c b/logger/logger_time.c
index d2c6ebc3f9e3171ba7fdec6c6a63eb23d7001df6..e6eb778637faf7bdc8841a996751245077b1b5b1 100644
--- a/logger/logger_time.c
+++ b/logger/logger_time.c
@@ -166,9 +166,9 @@ void time_array_populate(struct time_array *t, struct logger_logfile *log) {
   double time = 0;
 
   /* get file size. */
-  size_t file_size = log->log.file_size;
+  size_t file_size = log->log.mmap_size;
 
-  /* get first time stamp. */
+  /* get first timestamp. */
   size_t offset = time_offset_first_record(&log->header);
   while (offset < file_size) {
     /* read current time record and store it. */
@@ -178,7 +178,7 @@ void time_array_populate(struct time_array *t, struct logger_logfile *log) {
 
     /* get next record. */
     int test = tools_get_next_record(&log->header, log->log.map, &offset,
-                                     log->log.file_size);
+                                     log->log.mmap_size);
     if (test == -1) break;
   }
 }
@@ -224,14 +224,16 @@ size_t time_array_get_index(const struct time_array *t, const size_t offset) {
   if (!t) error("NULL pointer.");
 
   if (offset < t->records[0].offset || offset > t->records[t->size - 1].offset)
-    error("Offset outside of range.");
+    error("Offset outside of range. %zi > %zi > %zi",
+          t->records[t->size - 1].offset, offset, t->records[0].offset);
 #endif
 
-  /* left will contain the index at the end of the loop */
+  /* right will contain the index at the end of the loop */
   size_t left = 0;
   size_t right = t->size - 1;
 
   /* Find the time_array with the correct offset through a bisection method. */
+  // TODO use interpolation search (same for the other binary searches)
   while (left <= right) {
     size_t center = (left + right) / 2;
     const size_t offset_center = t->records[center].offset;
@@ -245,6 +247,71 @@ size_t time_array_get_index(const struct time_array *t, const size_t offset) {
     }
   }
 
+  /* Avoid the sentinel */
+  if (right == t->size - 1) {
+    right = right - 1;
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (t->records[right].offset > offset ||
+      t->records[right + 1].offset <= offset) {
+    error("Found the wrong element");
+  }
+
+#endif
+
+  return right;
+}
+
+/**
+ * @brief Find the index of the last time record written before a given time.
+ *
+ * @param t #time_array to access.
+ * @param time The time requested.
+ *
+ * @return The index of the last time record.
+ */
+size_t time_array_get_index_from_time(const struct time_array *t,
+                                      const double time) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (!t) error("NULL pointer.");
+
+  if (time < t->records[0].time || time > t->records[t->size - 1].time)
+    error("Time outside of range (%g > %g).", time,
+          t->records[t->size - 1].time);
+#endif
+
+  /* right will contain the index at the end of the loop */
+  size_t left = 0;
+  size_t right = t->size - 1;
+
+  /* Find the time_array with the correct time through a bisection method. */
+  while (left <= right) {
+    size_t center = (left + right) / 2;
+    const double time_center = t->records[center].time;
+
+    if (time > time_center) {
+      left = center + 1;
+    } else if (time < time_center) {
+      right = center - 1;
+    } else {
+      return center;
+    }
+  }
+
+  /* Avoid the sentinel */
+  if (right == t->size - 1) {
+    right = right - 1;
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (t->records[right].time > time || t->records[right + 1].time <= time) {
+    error("Found the wrong element");
+  }
+
+#endif
+
   return right;
 }
 
@@ -269,7 +336,11 @@ void time_array_free(struct time_array *t) {
  * @param t #time_array to print
  */
 void time_array_print(const struct time_array *t) {
-  const size_t threshold = 4;
+#ifdef SWIFT_DEBUG_CHECKS
+  const size_t threshold = 1000;
+#else
+  const size_t threshold = 5;
+#endif
 
   size_t n = t->size;
   size_t up_threshold = n - threshold;
@@ -281,7 +352,7 @@ void time_array_print(const struct time_array *t) {
   for (size_t i = 1; i < n; i++) {
     /* Skip the times at the center of the array. */
     if (i < threshold || i > up_threshold)
-      printf(", %lli (%g)", t->records[i].int_time, t->records[i].time);
+      printf(", %zi (%g)", t->records[i].offset, t->records[i].time);
 
     if (i == threshold) printf(", ...");
   }
diff --git a/logger/logger_time.h b/logger/logger_time.h
index b27abffb9c1b3aa02c82c1739d1206b43f3ac431..9094f700da687a2699ba54a061ea049189a6572a 100644
--- a/logger/logger_time.h
+++ b/logger/logger_time.h
@@ -84,6 +84,9 @@ double time_array_get_time(const struct time_array *t, const size_t offset);
 
 size_t time_array_get_index(const struct time_array *t, const size_t offset);
 
+size_t time_array_get_index_from_time(const struct time_array *t,
+                                      const double time);
+
 void time_array_free(struct time_array *t);
 
 void time_array_print(const struct time_array *t);
diff --git a/logger/logger_tools.c b/logger/logger_tools.c
index a9a6ecfcb0acf72b11898d00fdfeff90fd70406d..1c8c813158330b934730a467edc1d81b85fd5de1 100644
--- a/logger/logger_tools.c
+++ b/logger/logger_tools.c
@@ -150,8 +150,9 @@ size_t tools_reverse_offset(const struct header *h, void *file_map,
   map -= LOGGER_MASK_SIZE + LOGGER_OFFSET_SIZE;
   logger_loader_io_read_mask(h, map, &prev_mask, NULL);
 
-  /* Check if we are not mixing time stamp and particles */
-  if ((prev_mask != 128 && mask == 128) || (prev_mask == 128 && mask != 128))
+  /* Check if we are not mixing timestamp and particles */
+  if ((prev_mask != h->timestamp_mask && mask == h->timestamp_mask) ||
+      (prev_mask == h->timestamp_mask && mask != h->timestamp_mask))
     error("Unexpected mask: %lu, got %lu.", mask, prev_mask);
 
 #endif  // SWIFT_DEBUG_CHECKS
@@ -209,23 +210,24 @@ size_t tools_check_record_consistency(const struct logger_reader *reader,
   logger_loader_io_read_mask(h, file_init + pointed_offset, &pointed_mask,
                              NULL);
 
-  /* check if not mixing time stamp and particles. */
-  if ((pointed_mask != 128 && mask == 128) ||
-      (pointed_mask == 128 && mask != 128))
+  /* check if not mixing timestamp and particles. */
+  if ((pointed_mask != h->timestamp_mask && mask == h->timestamp_mask) ||
+      (pointed_mask == h->timestamp_mask && mask != h->timestamp_mask))
     error("Error in the offset (mask %lu at %lu != %lu at %lu).", mask, offset,
           pointed_mask, pointed_offset);
 
-  if (pointed_mask == 128) return (size_t)(map - file_init);
+  if (pointed_mask == h->timestamp_mask) return (size_t)(map - file_init);
 
   struct logger_particle part;
   logger_particle_read(&part, reader, offset, 0, logger_reader_const);
 
-  size_t id = part.id;
+  long long id = part.id;
   logger_particle_read(&part, reader, pointed_offset, 0, logger_reader_const);
 
-  if (id != part.id)
-    error("Offset wrong, id incorrect (%lu != %lu) at %lu.", id, part.id,
+  if (id != part.id) {
+    error("Offset wrong, id incorrect (%lli != %lli) at %lu.", id, part.id,
           pointed_offset);
+  }
 
   return (size_t)(map - file_init);
 }
diff --git a/logger/python/reader_example.py b/logger/python/reader_example.py
deleted file mode 100644
index 6ace309c5b68b4fc4f1088b6206cd1ae3ccd69a5..0000000000000000000000000000000000000000
--- a/logger/python/reader_example.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-"""
-Read a logger file by using an index.
-Example: ./reader_example.py ../../examples/SedovBlast_3D/index.dump ../../examples/SedovBlast_3D/index_0005.hdf5
-"""
-import sys
-from h5py import File
-import numpy as np
-import matplotlib.pyplot as plt
-sys.path.append("../.libs/")
-
-import liblogger as logger
-
-# Get filenames
-if len(sys.argv) != 3:
-    print("WARNING missing arguments. Will use the default ones")
-    index = "../../examples/HydroTests/SedovBlast_3D/index_0002.hdf5"
-    dump = "../../examples/HydroTests/SedovBlast_3D/index.dump"
-else:
-    index = sys.argv[-1]
-    dump = sys.argv[-2]
-
-# constant
-offset_name = "PartType0/Offset"
-header = "Header"
-time_name = "Time Offset"
-
-# Read index file
-with File(index, "r") as f:
-    if offset_name not in f:
-        raise Exception("Unable to find the offset dataset")
-    offset = f[offset_name][:]
-
-    if header not in f:
-        raise Exception("Unable to find the header")
-    if time_name not in f[header].attrs:
-        raise Exception("Unable to find the time offset")
-    time_offset = f[header].attrs[time_name]
-
-# read dump
-data = logger.loadFromIndex(offset, dump, time_offset)
-
-# Compute distance from center
-pos = data["positions"]
-center = pos.mean()
-r2 = np.sum((pos - center)**2, axis=1)
-
-# plot entropy vs distance
-plt.plot(np.sqrt(r2), data["entropy"], '.')
-
-plt.xlim(0., 0.5)
-plt.ylim(-5, 50)
-plt.xlabel("Radius")
-plt.ylabel("Entropy")
-plt.show()
diff --git a/logger/quick_sort.c b/logger/quick_sort.c
new file mode 100644
index 0000000000000000000000000000000000000000..a1b76c864bbdb6d66a2bc84cee0de03a05379b4d
--- /dev/null
+++ b/logger/quick_sort.c
@@ -0,0 +1,125 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Include corresponding header */
+#include "quick_sort.h"
+
+/* Include local headers */
+#include "logger_index.h"
+
+struct qstack {
+  int64_t lo;
+  int64_t hi;
+};
+
+/**
+ * @brief Sort the data with the quicksort according to the ids.
+ */
+void quick_sort(struct index_data *data, size_t N) {
+  int64_t qpos, i, j, lo, hi, imin, pivot;
+  struct index_data temp;
+
+  /* Allocate a stack of operations */
+  int stack_size = log(N) + 1;
+  struct qstack *qstack =
+      (struct qstack *)malloc(sizeof(struct qstack) * stack_size);
+
+  /* Sort parts in decreasing order with quicksort */
+  qstack[0].lo = 0;
+  qstack[0].hi = N - 1;
+  qpos = 0;
+  while (qpos >= 0) {
+    lo = qstack[qpos].lo;
+    hi = qstack[qpos].hi;
+    qpos -= 1;
+    /* Do we have a low number of element to sort? */
+    if (hi - lo < 15) {
+      /* Sort the last elements. */
+      for (i = lo; i < hi; i++) {
+        imin = i;
+        /* Find the minimal value. */
+        for (j = i + 1; j <= hi; j++)
+          if (data[j].id < data[imin].id) imin = j;
+        /* Did we find the minimal value? */
+        if (imin != i) {
+          /* Swap the elements. */
+          temp = data[imin];
+          data[imin] = data[i];
+          data[i] = temp;
+        }
+      }
+    } else {
+      /* Select a pivot */
+      pivot = data[(lo + hi) / 2].id;
+      i = lo;
+      j = hi;
+      /* Ensure that the elements before/after the pivot
+         are smaller/larger than the pivot. */
+      while (i <= j) {
+        /* Find the first elements that do not respect
+           the order. */
+        while (data[i].id < pivot) i++;
+        while (data[j].id > pivot) j--;
+
+        /* Did we get two elements */
+        if (i <= j) {
+          /* Are they different? */
+          if (i < j) {
+            /* Swap them */
+            temp = data[i];
+            data[i] = data[j];
+            data[j] = temp;
+          }
+          /* The two elements are good now */
+          i += 1;
+          j -= 1;
+        }
+      }
+      /* Add the next operations to the stack.
+       * The order is important in order to decrease the stack size.
+       */
+      if (j > (lo + hi) / 2) {
+        if (lo < j) {
+          qpos += 1;
+          qstack[qpos].lo = lo;
+          qstack[qpos].hi = j;
+        }
+        if (i < hi) {
+          qpos += 1;
+          qstack[qpos].lo = i;
+          qstack[qpos].hi = hi;
+        }
+      } else {
+        if (i < hi) {
+          qpos += 1;
+          qstack[qpos].lo = i;
+          qstack[qpos].hi = hi;
+        }
+        if (lo < j) {
+          qpos += 1;
+          qstack[qpos].lo = lo;
+          qstack[qpos].hi = j;
+        }
+      }
+    }
+  }
+
+  /* Free the allocated memory */
+  free(qstack);
+}
diff --git a/logger/quick_sort.h b/logger/quick_sort.h
new file mode 100644
index 0000000000000000000000000000000000000000..57ed5779213d16cd6f67dc7cb4ecd8f86b9af2e6
--- /dev/null
+++ b/logger/quick_sort.h
@@ -0,0 +1,27 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef LOGGER_LOGGER_QUICK_SORT_H
+#define LOGGER_LOGGER_QUICK_SORT_H
+
+#include "logger_index.h"
+
+void quick_sort(struct index_data *data, size_t N);
+
+#endif  // LOGGER_LOGGER_QUICK_SORT_H
diff --git a/logger/tests/Makefile.am b/logger/tests/Makefile.am
index dd94462b8b98b0a089d0f959b81c603c29911a76..d5c88e234531e0f0685cd3fb73e6da61c513b9bb 100644
--- a/logger/tests/Makefile.am
+++ b/logger/tests/Makefile.am
@@ -17,13 +17,13 @@
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
 AM_CFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/logger $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS)
 
-AM_LDFLAGS = ../../src/.libs/libswiftsim.a ../.libs/liblogger.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
+AM_LDFLAGS = ../../src/.libs/libswiftsim.a ../.libs/liblogger.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS) -lswiftsim
 
 # List of programs and scripts to run in the test suite
-TESTS = testLogfileHeader testLogfileReader testTimeArray
+TESTS = testLogfileHeader testLogfileReader testTimeArray testQuickSort testVR
 
 # List of test programs to compile
-check_PROGRAMS = testLogfileHeader testLogfileReader testTimeArray
+check_PROGRAMS = testLogfileHeader testLogfileReader testTimeArray testQuickSort testVR
 
 # Rebuild tests when SWIFT is updated.
 $(check_PROGRAMS): ../../src/.libs/libswiftsim.a ../.libs/liblogger.a
@@ -32,6 +32,8 @@ $(check_PROGRAMS): ../../src/.libs/libswiftsim.a ../.libs/liblogger.a
 testLogfileHeader_SOURCES = testLogfileHeader.c
 testLogfileReader_SOURCES = testLogfileReader.c
 testTimeArray_SOURCES = testTimeArray.c
+testQuickSort_SOURCES = testQuickSort.c
+testVR_SOURCES = testVR.c
 
 # Files necessary for distribution
 EXTRA_DIST = testLogfileHeader.yml testLogfileReader.yml
diff --git a/logger/tests/generate_log.h b/logger/tests/generate_log.h
new file mode 100644
index 0000000000000000000000000000000000000000..4ef753b0abd39ac72a722be66cc2e804b6d54c56
--- /dev/null
+++ b/logger/tests/generate_log.h
@@ -0,0 +1,209 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Include config */
+#include "../../config.h"
+
+/* Local headers */
+#include "engine.h"
+#include "hydro.h"
+#include "logger.h"
+
+/* Not all the fields are written at every step.
+ * Here we define how often a few fields are written.
+ */
+#define period_rho 2
+#define period_h 4
+#define const_time_base 1e-4
+
+/**
+ * @brief Generate the data of a bunch of particles.
+ *
+ * @param parts The list of particles.
+ * @param xparts The list of extra particles.
+ * @param nparts The number of particles.
+ */
+void generate_particles(struct part *parts, struct xpart *xparts,
+                        size_t nparts) {
+  struct hydro_space hs;
+
+  for (size_t i = 0; i < nparts; i++) {
+    /* Set internal energy. */
+    hydro_set_init_internal_energy(&parts[i], 100);
+
+    /* Initialize particle. */
+    hydro_first_init_part(&parts[i], &xparts[i]);
+    hydro_init_part(&parts[i], &hs);
+    logger_part_data_init(&xparts[i].logger_data);
+
+    for (int j = 0; j < 3; j++) {
+      parts[i].x[j] = 0;
+      parts[i].v[j] = (j == 0) ? -1 : 0;
+      parts[i].a_hydro[j] = (j == 1) ? 1e-2 : 0;
+    }
+    parts[i].h = 15;
+    parts[i].rho = 50;
+    parts[i].id = i;
+    hydro_set_mass(&parts[i], 1.5);
+
+    /* Add time bin in order to skip particles. */
+    parts[i].time_bin = (i % 10) + 1;
+  }
+}
+
+/** Provides a integer time given the step number.*/
+integertime_t get_integer_time(int step) { return step; }
+
+/** Provides a double time given the step number. */
+double get_double_time(int step) { return step * const_time_base; }
+
+/**
+ * @brief Write a few particles during multiple time steps.
+ *
+ * As only the logger is tested, there is no need to really
+ * evolve the particles.
+ *
+ * @param log The #logger_writer.
+ * @param e The #engine.
+ */
+void write_particles(struct logger_writer *log, struct engine *e) {
+
+  size_t nparts = e->total_nr_parts;
+  struct part *parts = e->s->parts;
+  struct xpart *xparts = e->s->xparts;
+
+  const int number_steps = 100;
+  const int number_index = 5;
+
+  /* Loop over all the steps. */
+  for (int i = 0; i < number_steps; i++) {
+    e->time = get_double_time(i);
+    e->ti_current = get_integer_time(i);
+    /* Dump an index file if required */
+    if (i % (number_steps / number_index) == number_index - 1) {
+      engine_dump_index(e);
+    }
+    integertime_t ti_int = get_integer_time(i);
+    double ti_double = get_double_time(i);
+
+    /* Mark the current time step in the particle logger file. */
+    logger_log_timestamp(log, ti_int, ti_double, &log->timestamp_offset);
+    /* Make sure that we have enough space in the particle logger file
+     * to store the particles in current time step. */
+    logger_ensure_size(log, nparts, /* number gpart */ 0, 0);
+
+    /* Loop over all the particles. */
+    for (size_t j = 0; j < nparts; j++) {
+
+      /* Skip some particles. */
+      if (i % parts[j].time_bin != 0) continue;
+
+      /* Write a time information to check that the correct particle is read. */
+      parts[j].x[0] = i;
+
+      /* Write this particle. */
+      unsigned int mask =
+          logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
+          logger_mask_data[logger_a].mask | logger_mask_data[logger_u].mask |
+          logger_mask_data[logger_consts].mask;
+
+      int number_particle_step = i / parts[j].time_bin;
+
+      if (number_particle_step % period_h == 0)
+        mask |= logger_mask_data[logger_h].mask;
+      if (number_particle_step % period_rho == 0)
+        mask |= logger_mask_data[logger_rho].mask;
+
+      logger_log_part(log, &parts[j], mask, &xparts[j].logger_data.last_offset,
+                      /* special flags */ 0);
+    }
+  }
+}
+
+void generate_log(struct swift_params *params, struct part *parts,
+                  struct xpart *xparts, size_t nparts) {
+  /* Initialize the particles */
+  generate_particles(parts, xparts, nparts);
+
+  /* Initialize the writer */
+  struct logger_writer log;
+  logger_init(&log, params);
+
+  /* initialize the engine */
+  struct engine e;
+  e.total_nr_parts = nparts;
+  e.total_nr_gparts = 0;
+  e.total_nr_sparts = 0;
+  e.total_nr_bparts = 0;
+  e.verbose = 1;
+  e.policy = 0;
+  e.ti_current = 0;
+  e.time = 0;
+  e.time_base = const_time_base;
+  e.time_begin = 0;
+  e.logger = &log;
+  threadpool_init(&e.threadpool, 1);
+  struct space s;
+  e.s = &s;
+  s.xparts = xparts;
+  s.parts = parts;
+  s.gparts = NULL;
+  s.nr_parts = nparts;
+  s.nr_gparts = 0;
+  s.nr_sparts = 0;
+  s.nr_bparts = 0;
+  s.nr_inhibited_parts = 0;
+  s.nr_inhibited_gparts = 0;
+  s.nr_inhibited_sparts = 0;
+  s.nr_inhibited_bparts = 0;
+  s.nr_extra_gparts = 0;
+  s.nr_extra_parts = 0;
+  s.nr_extra_sparts = 0;
+  s.nr_extra_bparts = 0;
+
+  /* Write file header */
+  logger_write_file_header(&log);
+
+  /* Mark the current time step in the particle logger file. */
+  logger_log_timestamp(&log, e.ti_current, e.time, &log.timestamp_offset);
+  /* Make sure that we have enough space in the particle logger file
+   * to store the particles in current time step. */
+  logger_ensure_size(&log, nparts, /* number gpart */ 0, 0);
+
+  /* Log all the particles before starting */
+  logger_log_all(&log, &e);
+  engine_dump_index(&e);
+
+  /* Write particles */
+  write_particles(&log, &e);
+
+  /* Write a sentinel timestamp */
+  logger_log_timestamp(e.logger, e.ti_current, e.time,
+                       &e.logger->timestamp_offset);
+
+  /* Write all the particles at the end */
+  logger_log_all(e.logger, &e);
+
+  /* Write a sentinel timestamp */
+  logger_log_timestamp(e.logger, e.ti_current, e.time,
+                       &e.logger->timestamp_offset);
+
+  /* Cleanup the memory */
+  logger_free(&log);
+}
diff --git a/logger/tests/testLogfileHeader.c b/logger/tests/testLogfileHeader.c
index 0f2c8a5df7942d50cbb641b99e3173a05fe1d539..6044ffae36c68066672e8279e0f5e65e2b40bafb 100644
--- a/logger/tests/testLogfileHeader.c
+++ b/logger/tests/testLogfileHeader.c
@@ -76,10 +76,10 @@ int main(int argc, char *argv[]) {
   assert(h->minor_version == logger_minor_version);
 
   message("Checking offset of first record");
-  assert(h->offset_first_record == logfile->log.file_size);
+  assert(h->offset_first_record == logfile->log.mmap_size);
 
   message("Checking number of masks");
-  assert(h->number_mask == logger_count_mask);
+  assert(h->masks_count == logger_count_mask);
 
   message("Checking masks");
   for (int i = 0; i < logger_count_mask; i++) {
diff --git a/logger/tests/testLogfileReader.c b/logger/tests/testLogfileReader.c
index 751c6b7d628fcd1191e8deba9135cddd8cd04bf8..0741b7c3e58045480480b6a7da011b9da31b9ea8 100644
--- a/logger/tests/testLogfileReader.c
+++ b/logger/tests/testLogfileReader.c
@@ -17,116 +17,18 @@
  *
  ******************************************************************************/
 
+/* Local header */
 #include "logger_header.h"
 #include "logger_loader_io.h"
 #include "logger_particle.h"
 #include "logger_reader.h"
 #include "swift.h"
 
-#define number_parts 100
-/* Not all the fields are written at every step.
- * Here we define how often a few fields are written.
- */
-#define period_rho 2
-#define period_h 4
-
-/**
- * @brief Initialize the particles.
- *
- * @param p The array of #part.
- * @param xp The array of #xpart.
- */
-void init_particles(struct part *p, struct xpart *xp) {
-  struct hydro_space hs;
-
-  for (int i = 0; i < number_parts; i++) {
-    /* Set internal energy. */
-    hydro_set_init_internal_energy(&p[i], 100);
-
-    /* Initialize particle. */
-    hydro_first_init_part(&p[i], &xp[i]);
-    hydro_init_part(&p[i], &hs);
-
-    for (int j = 0; j < 3; j++) {
-      p[i].x[j] = i;
-      p[i].v[j] = (j == 0) ? -1 : 0;
-      p[i].a_hydro[j] = (j == 1) ? 1e-2 : 0;
-    }
-    p[i].h = 15;
-    p[i].rho = 50;
-    p[i].id = i;
-    hydro_set_mass(&p[i], 1.5);
-    xp[i].logger_data.last_offset = 0;
-
-    /* Add time bin in order to skip particles. */
-    p[i].time_bin = (i % 10) + 1;
-  }
-}
-
-/** Provides a integer time given the step number.*/
-integertime_t get_integer_time(int step) { return step; }
-
-/** Provides a double time given the step number. */
-double get_double_time(int step) {
-  const double time_base = 1e-4;
-  return step * time_base;
-}
-
-/**
- * @brief Write a few particles during multiple time steps.
- *
- * As only the logger is tested, there is no need to really
- * evolve the particles.
- */
-void write_particles(struct logger_writer *log, struct part *parts,
-                     struct xpart *xparts) {
-
-  const int number_steps = 100;
-
-  /* Loop over all the steps. */
-  for (int i = 0; i < number_steps; i++) {
-    integertime_t ti_int = get_integer_time(i);
-    double ti_double = get_double_time(i);
+/* Tests header */
+#include "generate_log.h"
 
-    /* Mark the current time step in the particle logger file. */
-    logger_log_timestamp(log, ti_int, ti_double, &log->timestamp_offset);
-    /* Make sure that we have enough space in the particle logger file
-     * to store the particles in current time step. */
-    logger_ensure_size(log, number_parts, /* number gpart */ 0, 0);
-
-    /* Loop over all the particles. */
-    for (int j = 0; j < number_parts; j++) {
-
-      /* Skip some particles. */
-      if (i % parts[j].time_bin != 0) continue;
-
-      /* Write a time information to check that the correct particle is read. */
-      parts[j].x[0] = i;
-
-      /* Write this particle. */
-      unsigned int mask =
-          logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
-          logger_mask_data[logger_a].mask | logger_mask_data[logger_u].mask |
-          logger_mask_data[logger_consts].mask;
-
-      int number_particle_step = i / parts[j].time_bin;
-
-      if (number_particle_step % period_h == 0)
-        mask |= logger_mask_data[logger_h].mask;
-      if (number_particle_step % period_rho == 0)
-        mask |= logger_mask_data[logger_rho].mask;
-
-      logger_log_part(log, &parts[j], mask, &xparts[j].logger_data.last_offset);
-    }
-
-    // TODO write index files.
-  }
-
-  /* Mark the current time step in the particle logger file. */
-  integertime_t ti_int = get_integer_time(number_steps);
-  double ti_double = get_double_time(number_steps);
-  logger_log_timestamp(log, ti_int, ti_double, &log->timestamp_offset);
-}
+#define number_parts 100
+#define max_step 99
 
 /** Count the number of active particles. */
 int get_number_active_particles(int step, struct part *p) {
@@ -136,6 +38,7 @@ int get_number_active_particles(int step, struct part *p) {
   }
   return count;
 }
+
 /**
  * @brief Check that the reader contains the correct data
  *
@@ -155,18 +58,19 @@ void check_data(struct logger_reader *reader, struct part *parts,
   /* Define a few variables */
   double time = get_double_time(0);
   int is_particle = 0;
-  int step = -1;
+  int step = 0;
+  int init_log_all_done = -1;
 
   /* Number of particle found during this time step. */
   int count = 0;
   /* Set it to an impossible value in order to flag it. */
   const size_t id_flag = 5 * number_parts;
-  size_t previous_id = id_flag;
+  long long previous_id = id_flag;
 
   /* Loop over each record. */
   for (size_t offset = reader_read_record(reader, &lp, &time, &is_particle,
                                           logfile->header.offset_first_record);
-       offset < logfile->log.file_size;
+       offset < logfile->log.mmap_size;
        offset = reader_read_record(reader, &lp, &time, &is_particle, offset)) {
 
     /* Do the particle case */
@@ -183,16 +87,21 @@ void check_data(struct logger_reader *reader, struct part *parts,
       }
 
       /* Get the corresponding particle */
-      if (lp.id >= number_parts) error("Wrong id %zi", lp.id);
+      if (lp.id >= number_parts) error("Wrong id %lli", lp.id);
 
       struct part *p = &parts[lp.id];
 
       /* Check the record's data. */
       for (int i = 0; i < 3; i++) {
         /* in the first index, we are storing the step information. */
-        if (i == 0)
-          assert(step == lp.pos[i]);
-        else
+        if (i == 0) {
+          double tmp = step;
+          /* At the end, we are not updating the particle */
+          if (step >= max_step) {
+            tmp = max_step - max_step % p->time_bin;
+          }
+          assert(tmp == lp.pos[i]);
+        } else
           assert(p->x[i] == lp.pos[i]);
         assert(p->v[i] == lp.vel[i]);
         assert(p->a_hydro[i] == lp.acc[i]);
@@ -203,12 +112,12 @@ void check_data(struct logger_reader *reader, struct part *parts,
 
       /* Check optional fields. */
       int number_steps = step / p->time_bin;
-      if (number_steps % period_h == 0) {
+      if (number_steps % period_h == 0 || step > max_step) {
         assert(p->h == lp.h);
       } else {
         assert(-1 == lp.h);
       }
-      if (number_steps % period_rho == 0) {
+      if (number_steps % period_rho == 0 || step > max_step) {
         assert(p->rho == lp.density);
       } else {
         assert(-1 == lp.density);
@@ -216,22 +125,28 @@ void check_data(struct logger_reader *reader, struct part *parts,
     }
     /* Time stamp case. */
     else {
-
+      message("Step: %i", step);
       /* Check if we have the current amount of particles in previous step. */
-      if (step != -1 && count != get_number_active_particles(step, parts))
+      if (step != 0 && count != get_number_active_particles(step, parts))
         error(
             "The reader did not find the correct number of particles during "
-            "step %i",
-            step);
+            "step %i: %i != %i",
+            step, count, get_number_active_particles(step, parts));
+
+      /* Avoid the initial log */
+      if (init_log_all_done > 0) {
+        step += 1;
+      }
 
-      step += 1;
+      init_log_all_done += 1;
 
       /* Reset some variables. */
       previous_id = id_flag;
       count = 0;
 
       /* Check the record's data. */
-      assert(time == get_double_time(step));
+      const int tmp_step = step >= max_step ? max_step : step;
+      assert(time == get_double_time(tmp_step));
     }
   }
 }
@@ -245,7 +160,6 @@ int main(int argc, char *argv[]) {
   message("Generating the dump.");
 
   /* Create required structures. */
-  struct logger_writer log;
   struct swift_params params;
   char filename[200] = "testLogfileReader.yml";
 
@@ -263,25 +177,9 @@ int main(int argc, char *argv[]) {
       NULL)
     error("Failed to allocate xparticles array.");
 
-  init_particles(parts, xparts);
-
-  /* Initialize the logger. */
-  logger_init(&log, &params);
-
-  /* get dump filename. */
-  char dump_filename[PARSER_MAX_LINE_SIZE];
-  message("%s", log.base_name);
-  strcpy(dump_filename, log.base_name);
-  strcat(dump_filename, ".dump");
-
-  /* Write file header. */
-  logger_write_file_header(&log);
-
-  /* Write particles. */
-  write_particles(&log, parts, xparts);
+  /* Write a 'simulation' */
+  generate_log(&params, parts, xparts, number_parts);
 
-  /* clean memory */
-  logger_free(&log);
   /*
     Then read the file.
   */
@@ -295,7 +193,9 @@ int main(int argc, char *argv[]) {
   reader.verbose = 1;
 
   /* Read the header. */
-  logger_reader_init(&reader, dump_filename, /* verbose */ 1);
+  char basename[200];
+  parser_get_param_string(&params, "Logger:basename", basename);
+  logger_reader_init(&reader, basename, /* verbose */ 1);
 
   /*
     Finally check everything.
diff --git a/logger/tests/testQuickSort.c b/logger/tests/testQuickSort.c
new file mode 100644
index 0000000000000000000000000000000000000000..85e2a13ce0bd7204666fb09ae540297036ad7054
--- /dev/null
+++ b/logger/tests/testQuickSort.c
@@ -0,0 +1,97 @@
+
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include "memswap.h"
+#include "quick_sort.h"
+
+#define N 10000
+
+int64_t getRandom(void) {
+  return llabs(((int64_t)lrand48() << 32) + (int64_t)lrand48());
+}
+/**
+ * @brief Initialize the array.
+ */
+void init_array(struct index_data *data) {
+  /* Assign the ids */
+  for (int i = 0; i < N; i++) {
+    data[i].id = getRandom();
+  }
+
+  /* randomize the array */
+  for (int i = 0; i < N; i++) {
+    /* Select an index randomly */
+    int j = rand() % N;
+
+    /* Swap the two elements */
+    memswap(&data[i], &data[j], sizeof(struct index_data));
+  }
+}
+
+/**
+ * @brief Ensure that the array is sorted
+ */
+void check_sort(struct index_data *data) {
+  for (size_t i = 1; i < N; i++) {
+    if (data[i].id < data[i - 1].id) {
+      error("The array is not sorted index=%zi, prev=%li, cur=%li", i,
+            data[i - 1].id, data[i].id);
+    }
+  }
+}
+
+/**
+ * @brief print the array
+ */
+void print_array(struct index_data *data) {
+  for (int i = 0; i < N; i++) {
+    printf("%li \t %zi\n", data[i].id, data[i].offset);
+  }
+}
+
+int main(int argc, char *argv[]) {
+
+  /* Create the array */
+  struct index_data *data =
+      (struct index_data *)malloc(N * sizeof(struct index_data));
+
+  if (data == NULL) {
+    error("Failed to allocate the memory");
+  }
+
+  /* Initialize the array */
+  init_array(data);
+
+  /* Print the array */
+  message("\nArray before sort\n");
+  print_array(data);
+
+  /* Sort the array */
+  quick_sort(data, N);
+
+  /* Print the array */
+  message("\nArray after sort\n");
+  print_array(data);
+
+  /* Check the results */
+  check_sort(data);
+
+  return 0;
+}
diff --git a/logger/tests/testVR.c b/logger/tests/testVR.c
new file mode 100644
index 0000000000000000000000000000000000000000..6e7828ee6380730972c260c249f22c876d3189b4
--- /dev/null
+++ b/logger/tests/testVR.c
@@ -0,0 +1,116 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *                    Florian Cabot (florian.cabot@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Include configuration */
+#include "config.h"
+
+/* Standard include */
+#include <stdlib.h>
+
+/* Local include */
+#include "generate_log.h"
+#include "hydro.h"
+#include "logger_reader.h"
+
+#define number_steps 10.
+#define number_parts 100
+
+/**
+ * This function test the logger in the VR mode.
+ * The idea is to simply read a snapshot at a given time and
+ * then simply advance in time the particles.
+ */
+int main(int argc, char *argv[]) {
+  /* Create required structures. */
+  struct swift_params params;
+  char filename[200] = "testVR.yml";
+
+  /* Read parameters. */
+  parser_read_file(filename, &params);
+
+  /* Initialize the particles. */
+  struct part *parts;
+  if ((parts = (struct part *)malloc(sizeof(struct part) * number_parts)) ==
+      NULL)
+    error("Failed to allocate particles array.");
+
+  struct xpart *xparts;
+  if ((xparts = (struct xpart *)malloc(sizeof(struct xpart) * number_parts)) ==
+      NULL)
+    error("Failed to allocate xparticles array.");
+
+  /* Write a 'simulation' */
+  generate_log(&params, parts, xparts, number_parts);
+
+  /* Initialize the reader */
+  struct logger_reader reader;
+  char basename[200];
+  parser_get_param_string(&params, "Logger:basename", basename);
+  logger_reader_init(&reader, basename,
+                     /* Verbose */ 0);
+
+  /* Read the time limits */
+  double begin = logger_reader_get_time_begin(&reader);
+  double end = logger_reader_get_time_end(&reader);
+
+  /* Set the time */
+  message("Time begin: %f end: %f", begin, end);
+  logger_reader_set_time(&reader, begin);
+
+  /* Get the number of particles */
+  int n_type = 0;
+  uint64_t n_tot = 0;
+  const uint64_t *n_parts =
+      logger_reader_get_number_particles(&reader, &n_type);
+  for (int i = 0; i < n_type; i++) {
+    n_tot += n_parts[i];
+  }
+
+  /* Allocate the particles memory */
+  struct logger_particle *particles =
+      malloc(n_tot * sizeof(struct logger_particle));
+
+  logger_reader_read_all_particles(&reader, begin, logger_reader_const,
+                                   particles, n_tot);
+
+  /* Loop over time for a single particle */
+  size_t id = 0;
+  struct logger_particle p = particles[id];
+  for (double t = begin; t < end; t += (end - begin) / number_steps) {
+    /* Get the offset of the given time */
+    size_t o = logger_reader_get_next_offset_from_time(&reader, t);
+    message("time: %f offset: %ld", t, o);
+
+    /* Read the next particle */
+    struct logger_particle n;
+    logger_reader_get_next_particle(&reader, &p, &n, o);
+
+    message("Particle %zi: %f %f %f %f", id, p.pos[0], p.pos[1], p.pos[2],
+            p.time);
+
+    /* Now you can interpolate */
+    logger_particle_interpolate(&p, &n, t);
+  }
+
+  /* Cleanup the memory */
+  free(particles);
+  logger_reader_free(&reader);
+  return 0;
+}
diff --git a/logger/tests/testVR.yml b/logger/tests/testVR.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e9297e337f84062630756550f0ed8cc5f68016cc
--- /dev/null
+++ b/logger/tests/testVR.yml
@@ -0,0 +1,6 @@
+# Parameter file for the tests
+Logger:
+  delta_step: 10
+  initial_buffer_size: 0.01 # in GB
+  buffer_scale: 10
+  basename: testvr
diff --git a/src/common_io.c b/src/common_io.c
index 25abb187a7ac1ae86feab50bfd5cdbd0633d17ac..2ad47f12beec32d0f3532c4107e4317f2c403ca4 100644
--- a/src/common_io.c
+++ b/src/common_io.c
@@ -79,6 +79,8 @@ hid_t io_hdf5_type(enum IO_DATA_TYPE type) {
       return H5T_NATIVE_INT;
     case UINT:
       return H5T_NATIVE_UINT;
+    case UINT64:
+      return H5T_NATIVE_UINT64;
     case LONG:
       return H5T_NATIVE_LONG;
     case ULONG:
@@ -1021,6 +1023,8 @@ size_t io_sizeof_type(enum IO_DATA_TYPE type) {
       return sizeof(int);
     case UINT:
       return sizeof(unsigned int);
+    case UINT64:
+      return sizeof(uint64_t);
     case LONG:
       return sizeof(long);
     case ULONG:
@@ -1035,6 +1039,8 @@ size_t io_sizeof_type(enum IO_DATA_TYPE type) {
       return sizeof(double);
     case CHAR:
       return sizeof(char);
+    case SIZE_T:
+      return sizeof(size_t);
     default:
       error("Unknown type");
       return 0;
diff --git a/src/common_io.h b/src/common_io.h
index b4a846dc4f683cb4e18e3bba03c4cc95235a8854..de73db3476eeefa1ca6db869ebb4f4de1bbb8906 100644
--- a/src/common_io.h
+++ b/src/common_io.h
@@ -55,11 +55,13 @@ enum IO_DATA_TYPE {
   LONG,
   LONGLONG,
   UINT,
+  UINT64,
   ULONG,
   ULONGLONG,
   FLOAT,
   DOUBLE,
-  CHAR
+  CHAR,
+  SIZE_T,
 };
 
 #if defined(HAVE_HDF5)
diff --git a/src/engine.c b/src/engine.c
index 5655e4612d8a9dfdaf26ec921927d33b9ac3349f..1b87ab2ab9a35bf4cfec7c7d36e510b156e1bc55 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -1952,7 +1952,8 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
                        &e->logger->timestamp_offset);
   /* Make sure that we have enough space in the particle logger file
    * to store the particles in current time step. */
-  logger_ensure_size(e->logger, e->total_nr_parts, e->total_nr_gparts, 0);
+  logger_ensure_size(e->logger, s->nr_parts, s->nr_gparts, s->nr_sparts);
+  logger_write_description(e->logger, e);
 #endif
 
   /* Now, launch the calculation */
@@ -2264,7 +2265,8 @@ void engine_step(struct engine *e) {
                        &e->logger->timestamp_offset);
   /* Make sure that we have enough space in the particle logger file
    * to store the particles in current time step. */
-  logger_ensure_size(e->logger, e->total_nr_parts, e->total_nr_gparts, 0);
+  logger_ensure_size(e->logger, e->s->nr_parts, e->s->nr_gparts,
+                     e->s->nr_sparts);
 #endif
 
   /* Are we drifting everything (a la Gadget/GIZMO) ? */
@@ -2353,6 +2355,9 @@ void engine_step(struct engine *e) {
   engine_dump_restarts(e, 0, e->restart_onexit && engine_is_done(e));
 
   engine_check_for_dumps(e);
+#ifdef WITH_LOGGER
+  engine_check_for_index_dump(e);
+#endif
 
   TIMER_TOC2(timer_step);
 
@@ -2382,7 +2387,7 @@ void engine_check_for_dumps(struct engine *e) {
     output_none,
     output_snapshot,
     output_statistics,
-    output_stf
+    output_stf,
   };
 
   /* What kind of output do we want? And at which time ?
@@ -2440,6 +2445,7 @@ void engine_check_for_dumps(struct engine *e) {
 
     /* Write some form of output */
     switch (type) {
+
       case output_snapshot:
 
         /* Do we want a corresponding VELOCIraptor output? */
@@ -2455,13 +2461,8 @@ void engine_check_for_dumps(struct engine *e) {
 #endif
         }
 
-          /* Dump... */
-#ifdef WITH_LOGGER
-        /* Write a file containing the offsets in the particle logger. */
-        engine_dump_index(e);
-#else
+        /* Dump... */
         engine_dump_snapshot(e);
-#endif
 
         /* Free the memory allocated for VELOCIraptor i/o. */
         if (with_stf && e->snapshot_invoke_stf && e->s->gpart_group_data) {
@@ -2549,6 +2550,38 @@ void engine_check_for_dumps(struct engine *e) {
   e->time = time;
 }
 
+/**
+ * @brief Check whether an index file has to be written during this
+ * step.
+ *
+ * @param e The #engine.
+ */
+void engine_check_for_index_dump(struct engine *e) {
+#ifdef WITH_LOGGER
+  /* Get a few variables */
+  struct logger_writer *log = e->logger;
+  const size_t dump_size = log->dump.count;
+  const size_t old_dump_size = log->index.dump_size_last_output;
+  const float mem_frac = log->index.mem_frac;
+  const size_t total_nr_parts =
+      (e->total_nr_parts + e->total_nr_gparts + e->total_nr_sparts +
+       e->total_nr_bparts + e->total_nr_DM_background_gparts);
+  const size_t index_file_size =
+      total_nr_parts * sizeof(struct logger_part_data);
+
+  /* Check if we should write a file */
+  if (mem_frac * (dump_size - old_dump_size) > index_file_size) {
+    /* Write an index file */
+    engine_dump_index(e);
+
+    /* Update the dump size for last output */
+    log->index.dump_size_last_output = dump_size;
+  }
+#else
+  error("This function should not be called without the logger.");
+#endif
+}
+
 /**
  * @brief dump restart files if it is time to do so and dumps are enabled.
  *
@@ -3217,8 +3250,7 @@ void engine_dump_index(struct engine *e) {
   }
 
   /* Dump... */
-  write_index_single(e, e->logger->base_name, e->internal_units,
-                     e->snapshot_units);
+  logger_write_index_file(e->logger, e);
 
   /* Flag that we dumped a snapshot */
   e->step_props |= engine_step_prop_logger_index;
diff --git a/src/engine.h b/src/engine.h
index 4c74ec4ec45a8952be27a047be431151c3440482..65105e4a8054c97879be75b431678576e27effbd 100644
--- a/src/engine.h
+++ b/src/engine.h
@@ -380,9 +380,8 @@ struct engine {
   int forcerepart;
   struct repartition *reparttype;
 
-#ifdef WITH_LOGGER
+  /* The particle logger */
   struct logger_writer *logger;
-#endif
 
   /* How many steps have we done with the same set of tasks? */
   int tasks_age;
@@ -495,6 +494,7 @@ void engine_reconstruct_multipoles(struct engine *e);
 void engine_allocate_foreign_particles(struct engine *e);
 void engine_print_stats(struct engine *e);
 void engine_check_for_dumps(struct engine *e);
+void engine_check_for_index_dump(struct engine *e);
 void engine_collect_end_of_step(struct engine *e, int apply);
 void engine_dump_snapshot(struct engine *e);
 void engine_init_output_lists(struct engine *e, struct swift_params *params);
diff --git a/src/gravity/Default/gravity_part.h b/src/gravity/Default/gravity_part.h
index 29f6bb6a64202c3fc3407407932e8f2e31f994af..bf0d612b077579829bffd69b5e821d86c75345c1 100644
--- a/src/gravity/Default/gravity_part.h
+++ b/src/gravity/Default/gravity_part.h
@@ -51,6 +51,11 @@ struct gpart {
   /*! Type of the #gpart (DM, gas, star, ...) */
   enum part_type type;
 
+#ifdef WITH_LOGGER
+  /* Additional data for the particle logger */
+  struct logger_part_data logger_data;
+#endif
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Numer of gparts this gpart interacted with */
diff --git a/src/gravity/MultiSoftening/gravity_part.h b/src/gravity/MultiSoftening/gravity_part.h
index de410fbbb988136af46c6f9085bdafee863b9f99..e865d99484735820ebb81e7a10fc3b5bb3edfdb2 100644
--- a/src/gravity/MultiSoftening/gravity_part.h
+++ b/src/gravity/MultiSoftening/gravity_part.h
@@ -52,6 +52,11 @@ struct gpart {
   /*! Type of the #gpart (DM, gas, star, ...) */
   enum part_type type;
 
+#ifdef WITH_LOGGER
+  /* Additional data for the particle logger */
+  struct logger_part_data logger_data;
+#endif
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Numer of gparts this gpart interacted with */
diff --git a/src/logger.c b/src/logger.c
index 762eb516077ef82f08b6c34da09cd7bc9eb6a280..20d77783166984304f5f8f5801cd20ff9a442112 100644
--- a/src/logger.c
+++ b/src/logger.c
@@ -46,7 +46,7 @@
  */
 /* Number of bytes for a mask. */
 // TODO change this to number of bits
-#define logger_mask_size 1
+#define logger_mask_size 2
 
 /* Number of bits for chunk header. */
 #define logger_header_bytes 8
@@ -77,10 +77,13 @@ const struct mask_data logger_mask_data[logger_count_mask] = {
     {sizeof(float), 1 << logger_rho, "density"},
     /* Particle's constants: mass (float) and ID (long long). */
     {sizeof(float) + sizeof(long long), 1 << logger_consts, "consts"},
+    /* Flag for special cases (e.g. change of MPI rank, star formation, ...) */
+    {sizeof(int), 1 << logger_special_flags, "special flags"},
     /* Simulation time stamp: integertime and double time (e.g. scale
        factor or time). */
     {sizeof(integertime_t) + sizeof(double), 1 << logger_timestamp,
-     "timestamp"}};
+     "timestamp"},
+};
 
 /**
  * @brief Write the header of a chunk (offset + mask).
@@ -101,7 +104,7 @@ char *logger_write_chunk_header(char *buff, const unsigned int *mask,
   buff += logger_mask_size;
 
   /* write offset. */
-  size_t diff_offset = offset_new - *offset;
+  uint64_t diff_offset = offset_new - *offset;
   memcpy(buff, &diff_offset, logger_offset_size);
   buff += logger_offset_size;
 
@@ -165,50 +168,73 @@ int logger_compute_chunk_size(unsigned int mask) {
 /**
  * @brief log all particles in the engine.
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param e The #engine
  */
 void logger_log_all(struct logger_writer *log, const struct engine *e) {
 
   /* Ensure that enough space is available. */
-  logger_ensure_size(log, e->total_nr_parts, e->total_nr_gparts, 0);
-#ifdef SWIFT_DEBUG_CHECKS
-  message("Need to implement stars");
-#endif
+  logger_ensure_size(log, e->s->nr_parts, e->s->nr_gparts, e->s->nr_sparts);
 
   /* some constants. */
   const struct space *s = e->s;
-  const unsigned int mask =
+  const unsigned int mask_hydro =
       logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
       logger_mask_data[logger_a].mask | logger_mask_data[logger_u].mask |
       logger_mask_data[logger_h].mask | logger_mask_data[logger_rho].mask |
       logger_mask_data[logger_consts].mask;
 
   /* loop over all parts. */
-  for (long long i = 0; i < e->total_nr_parts; i++) {
-    logger_log_part(log, &s->parts[i], mask,
-                    &s->xparts[i].logger_data.last_offset);
+  for (size_t i = 0; i < s->nr_parts; i++) {
+    logger_log_part(log, &s->parts[i], mask_hydro,
+                    &s->xparts[i].logger_data.last_offset,
+                    /* Special flags */ 0);
     s->xparts[i].logger_data.steps_since_last_output = 0;
   }
 
-  /* loop over all gparts. */
-  if (e->total_nr_gparts > 0) error("Not implemented");
+  const unsigned int mask_grav =
+      logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
+      logger_mask_data[logger_a].mask | logger_mask_data[logger_consts].mask;
+
+  /* loop over all gparts */
+  for (size_t i = 0; i < s->nr_gparts; i++) {
+    /* Log only the dark matter */
+    if (s->gparts[i].type != swift_type_dark_matter) continue;
+
+    logger_log_gpart(log, &s->gparts[i], mask_grav,
+                     &s->gparts[i].logger_data.last_offset,
+                     /* Special flags */ 0);
+    s->gparts[i].logger_data.steps_since_last_output = 0;
+  }
+
+  const unsigned int mask_stars = logger_mask_data[logger_x].mask |
+                                  logger_mask_data[logger_v].mask |
+                                  logger_mask_data[logger_consts].mask;
+
+  /* loop over all sparts */
+  for (size_t i = 0; i < s->nr_sparts; i++) {
+    logger_log_spart(log, &s->sparts[i], mask_stars,
+                     &s->sparts[i].logger_data.last_offset,
+                     /* Special flags */ 0);
+    s->sparts[i].logger_data.steps_since_last_output = 0;
+  }
 
-  /* loop over all sparts. */
-  // TODO
+  if (e->total_nr_bparts > 0) error("Not implemented");
 }
 
 /**
  * @brief Dump a #part to the log.
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param p The #part to dump.
  * @param mask The mask of the data to dump.
  * @param offset Pointer to the offset of the previous log of this particle;
+ * @param special_flags The value of the special flag.
  * (return) offset of this log.
  */
 void logger_log_part(struct logger_writer *log, const struct part *p,
-                     unsigned int mask, size_t *offset) {
+                     unsigned int mask, size_t *offset,
+                     const int special_flags) {
 
   /* Make sure we're not writing a timestamp. */
   if (mask & logger_mask_data[logger_timestamp].mask)
@@ -267,12 +293,85 @@ void logger_log_part(struct logger_writer *log, const struct part *p,
     // TODO make it dependent of logger_mask_data
     memcpy(buff, &p->mass, sizeof(float));
     buff += sizeof(float);
-    memcpy(buff, &p->id, sizeof(long long));
-    buff += sizeof(long long);
+    const int64_t id = p->id;
+    memcpy(buff, &id, sizeof(int64_t));
+    buff += sizeof(int64_t);
   }
 
 #endif
 
+  /* Special flags */
+  if (mask & logger_mask_data[logger_special_flags].mask) {
+    memcpy(buff, &special_flags, logger_mask_data[logger_special_flags].size);
+    buff += logger_mask_data[logger_special_flags].size;
+  }
+
+  /* Update the log message offset. */
+  *offset = offset_new;
+}
+
+/**
+ * @brief Dump a #spart to the log.
+ *
+ * @param log The #logger_writer
+ * @param sp The #spart to dump.
+ * @param mask The mask of the data to dump.
+ * @param offset Pointer to the offset of the previous log of this particle;
+ * @param special_flags The value of the special flag.
+ * (return) offset of this log.
+ */
+void logger_log_spart(struct logger_writer *log, const struct spart *sp,
+                      unsigned int mask, size_t *offset,
+                      const int special_flags) {
+
+  /* Make sure we're not writing a timestamp. */
+  if (mask & logger_mask_data[logger_timestamp].mask)
+    error("You should not log particles as timestamps.");
+
+  /* Make sure we're not looging fields not supported by gparts. */
+  if (mask &
+      (logger_mask_data[logger_u].mask | logger_mask_data[logger_rho].mask |
+       logger_mask_data[logger_a].mask))
+    error("Can't log SPH quantities for sparts.");
+
+  /* Start by computing the size of the message. */
+  const int size = logger_compute_chunk_size(mask);
+
+  /* Allocate a chunk of memory in the dump of the right size. */
+  size_t offset_new;
+  char *buff = (char *)dump_get(&log->dump, size, &offset_new);
+
+  /* Write the header. */
+  buff = logger_write_chunk_header(buff, &mask, offset, offset_new);
+
+  /* Particle position as three doubles. */
+  if (mask & logger_mask_data[logger_x].mask) {
+    memcpy(buff, sp->x, logger_mask_data[logger_x].size);
+    buff += logger_mask_data[logger_x].size;
+  }
+
+  /* Particle velocity as three floats. */
+  if (mask & logger_mask_data[logger_v].mask) {
+    memcpy(buff, sp->v, logger_mask_data[logger_v].size);
+    buff += logger_mask_data[logger_v].size;
+  }
+
+  /* Particle constants, which is a bit more complicated. */
+  if (mask & logger_mask_data[logger_consts].mask) {
+    // TODO make it dependent of logger_mask_data
+    memcpy(buff, &sp->mass, sizeof(float));
+    buff += sizeof(float);
+    const int64_t id = sp->id;
+    memcpy(buff, &id, sizeof(int64_t));
+    buff += sizeof(int64_t);
+  }
+
+  /* Special flags */
+  if (mask & logger_mask_data[logger_special_flags].mask) {
+    memcpy(buff, &special_flags, logger_mask_data[logger_special_flags].size);
+    buff += logger_mask_data[logger_special_flags].size;
+  }
+
   /* Update the log message offset. */
   *offset = offset_new;
 }
@@ -280,14 +379,22 @@ void logger_log_part(struct logger_writer *log, const struct part *p,
 /**
  * @brief Dump a #gpart to the log.
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param p The #gpart to dump.
  * @param mask The mask of the data to dump.
  * @param offset Pointer to the offset of the previous log of this particle;
+ * @param special_flags The value of the special flags.
  * (return) offset of this log.
  */
 void logger_log_gpart(struct logger_writer *log, const struct gpart *p,
-                      unsigned int mask, size_t *offset) {
+                      unsigned int mask, size_t *offset,
+                      const int special_flags) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (p->id_or_neg_offset < 0) {
+    error("Cannot log a gpart attached to another particle");
+  }
+#endif
 
   /* Make sure we're not writing a timestamp. */
   if (mask & logger_mask_data[logger_timestamp].mask)
@@ -331,8 +438,15 @@ void logger_log_gpart(struct logger_writer *log, const struct gpart *p,
     // TODO make it dependent of logger_mask_data.
     memcpy(buff, &p->mass, sizeof(float));
     buff += sizeof(float);
-    memcpy(buff, &p->id_or_neg_offset, sizeof(long long));
-    buff += sizeof(long long);
+    const int64_t id = p->id_or_neg_offset;
+    memcpy(buff, &id, sizeof(int64_t));
+    buff += sizeof(int64_t);
+  }
+
+  /* Special flags */
+  if (mask & logger_mask_data[logger_special_flags].mask) {
+    memcpy(buff, &special_flags, logger_mask_data[logger_special_flags].size);
+    buff += logger_mask_data[logger_special_flags].size;
   }
 
   /* Update the log message offset. */
@@ -342,7 +456,7 @@ void logger_log_gpart(struct logger_writer *log, const struct gpart *p,
 /**
  * @brief write a timestamp
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param timestamp time to write
  * @param time time or scale factor
  * @param offset Pointer to the offset of the previous log of this particle;
@@ -382,7 +496,7 @@ void logger_log_timestamp(struct logger_writer *log, integertime_t timestamp,
  * Check if logger parameters are large enough to write all particles
  * and ensure that enough space is available in the buffer.
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param total_nr_parts total number of part
  * @param total_nr_gparts total number of gpart
  * @param total_nr_sparts total number of spart
@@ -390,25 +504,29 @@ void logger_log_timestamp(struct logger_writer *log, integertime_t timestamp,
 void logger_ensure_size(struct logger_writer *log, size_t total_nr_parts,
                         size_t total_nr_gparts, size_t total_nr_sparts) {
 
-  /* count part memory. */
-  size_t limit = log->max_chunk_size;
+  /* count part memory */
+  size_t limit = 0;
 
-  limit *= total_nr_parts;
+  /* count part memory */
+  limit += total_nr_parts;
 
-  /* count gpart memory. */
-  if (total_nr_gparts > 0) error("Not implemented");
+  /* count gpart memory */
+  limit += total_nr_gparts;
 
   /* count spart memory. */
-  if (total_nr_sparts > 0) error("Not implemented");
+  limit += total_nr_sparts;
 
-  /* ensure enough space in dump. */
+  // TODO improve estimate with the size of each particle
+  limit *= log->max_chunk_size;
+
+  /* ensure enough space in dump */
   dump_ensure(&log->dump, limit, log->buffer_scale * limit);
 }
 
 /**
  * @brief intialize the logger structure
  *
- * @param log The #logger
+ * @param log The #logger_writer
  * @param params The #swift_params
  */
 void logger_init(struct logger_writer *log, struct swift_params *params) {
@@ -421,8 +539,12 @@ void logger_init(struct logger_writer *log, struct swift_params *params) {
       parser_get_opt_param_float(params, "Logger:buffer_scale", 10);
   parser_get_param_string(params, "Logger:basename", log->base_name);
 
+  log->index.mem_frac =
+      parser_get_opt_param_float(params, "Logger:index_mem_frac", 0.05);
+
   /* set initial value of parameters. */
   log->timestamp_offset = 0;
+  log->index.dump_size_last_output = 0;
 
   /* generate dump filename. */
   char logger_name_file[PARSER_MAX_LINE_SIZE];
@@ -445,14 +567,14 @@ void logger_init(struct logger_writer *log, struct swift_params *params) {
 /**
  * @brief Close dump file and desallocate memory
  *
- * @param log The #logger
+ * @param log The #logger_writer
  */
 void logger_free(struct logger_writer *log) { dump_close(&log->dump); }
 
 /**
  * @brief Write a file header to a logger file
  *
- * @param log The #logger
+ * @param log The #logger_writer
  *
  */
 void logger_write_file_header(struct logger_writer *log) {
@@ -460,7 +582,7 @@ void logger_write_file_header(struct logger_writer *log) {
   /* get required variables. */
   struct dump *dump = &log->dump;
 
-  size_t file_offset = dump->file_offset;
+  uint64_t file_offset = dump->file_offset;
 
   if (file_offset != 0)
     error(
@@ -516,7 +638,7 @@ void logger_write_file_header(struct logger_writer *log) {
  * @param buff The reading buffer
  * @param mask The mask to read
  * @param offset (return) the offset pointed by this chunk (absolute)
- * @param offset_cur The current chunk offset
+ * @param cur_offset The current chunk offset
  *
  * @return Number of bytes read
  */
@@ -599,8 +721,10 @@ int logger_read_part(struct part *p, size_t *offset, const char *buff) {
     // TODO make it dependent of logger_mask_data.
     memcpy(&p->mass, buff, sizeof(float));
     buff += sizeof(float);
-    memcpy(&p->id, buff, sizeof(long long));
-    buff += sizeof(long long);
+    int64_t id = 0;
+    memcpy(&id, buff, sizeof(int64_t));
+    p->id = id;
+    buff += sizeof(int64_t);
   }
 
 #endif
@@ -661,8 +785,9 @@ int logger_read_gpart(struct gpart *p, size_t *offset, const char *buff) {
     // TODO make it dependent of logger_mask_data
     memcpy(&p->mass, buff, sizeof(float));
     buff += sizeof(float);
-    memcpy(&p->id_or_neg_offset, buff, sizeof(long long));
-    buff += sizeof(long long);
+    int64_t id = p->id_or_neg_offset;
+    memcpy(&id, buff, sizeof(int64_t));
+    buff += sizeof(int64_t);
   }
 
   /* Finally, return the mask of the values we just read. */
@@ -673,6 +798,7 @@ int logger_read_gpart(struct gpart *p, size_t *offset, const char *buff) {
  * @brief Read a logger message for a timestamp.
  *
  * @param t The timestamp in which to store the value.
+ * @param time The time in which to store the value.
  * @param offset Pointer to the offset of the logger message in the buffer,
  *        will be overwritten with the offset of the previous message.
  * @param buff Pointer to the start of an encoded logger message.
diff --git a/src/logger.h b/src/logger.h
index ed2d6374fa9031f526e79e790572c89f6176df4b..3f17cb92c6ccd5870f79f4a39f3520e9dbbed94e 100644
--- a/src/logger.h
+++ b/src/logger.h
@@ -1,6 +1,7 @@
 /*******************************************************************************
  * This file is part of SWIFT.
  * Copyright (c) 2017 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
+ *               2018 Loic Hausammann (loic.hausammann@epfl.ch)
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published
@@ -19,6 +20,8 @@
 #ifndef SWIFT_LOGGER_H
 #define SWIFT_LOGGER_H
 
+#include "../config.h"
+
 #ifdef WITH_LOGGER
 
 /* Includes. */
@@ -35,7 +38,7 @@ struct part;
 struct engine;
 
 #define logger_major_version 0
-#define logger_minor_version 1
+#define logger_minor_version 2
 
 /**
  * Logger entries contain messages representing the particle data at a given
@@ -84,8 +87,11 @@ enum logger_masks_number {
   logger_h = 4,
   logger_rho = 5,
   logger_consts = 6,
-  logger_timestamp = 7,  /* expect it to be before count. */
-  logger_count_mask = 8, /* Need to be the last. */
+  logger_special_flags =
+      7,                 /* < 0 for MPI rank changes, 0 for none,
+                            > 0 for particle type changes, > part_type for deletion */
+  logger_timestamp = 8,  /* expect it to be before count. */
+  logger_count_mask = 9, /* Need to be the last. */
 } __attribute__((packed));
 
 struct mask_data {
@@ -104,7 +110,9 @@ extern const struct mask_data logger_mask_data[logger_count_mask];
 /* Size of the strings. */
 #define logger_string_length 200
 
-/* structure containing global data. */
+/**
+ * @brief structure containing global data for the particle logger.
+ */
 struct logger_writer {
   /* Number of particle steps between dumping a chunk of data. */
   short int delta_step;
@@ -112,6 +120,14 @@ struct logger_writer {
   /* Logger basename. */
   char base_name[logger_string_length];
 
+  struct {
+    /* The total memory fraction reserved for the index files. */
+    float mem_frac;
+
+    /* Size of the dump since at the last output */
+    size_t dump_size_last_output;
+  } index;
+
   /*  Dump file (In the reader, the dump is cleaned, therefore it is renamed
    * logfile). */
   struct dump dump;
@@ -133,16 +149,21 @@ struct logger_part_data {
   int steps_since_last_output;
 
   /* offset of last particle log entry. */
-  size_t last_offset;
+  uint64_t last_offset;
 };
 
 /* Function prototypes. */
 int logger_compute_chunk_size(unsigned int mask);
 void logger_log_all(struct logger_writer *log, const struct engine *e);
 void logger_log_part(struct logger_writer *log, const struct part *p,
-                     unsigned int mask, size_t *offset);
+                     unsigned int mask, size_t *offset,
+                     const int special_flags);
+void logger_log_spart(struct logger_writer *log, const struct spart *p,
+                      unsigned int mask, size_t *offset,
+                      const int special_flags);
 void logger_log_gpart(struct logger_writer *log, const struct gpart *p,
-                      unsigned int mask, size_t *offset);
+                      unsigned int mask, size_t *offset,
+                      const int special_flags);
 void logger_init(struct logger_writer *log, struct swift_params *params);
 void logger_free(struct logger_writer *log);
 void logger_log_timestamp(struct logger_writer *log, integertime_t t,
@@ -170,9 +191,9 @@ INLINE static void logger_part_data_init(struct logger_part_data *logger) {
  * @brief Should this particle write its data now ?
  *
  * @param logger_data The #logger_part_data of a particle.
- * @param log The #logger.
+ * @param log The #logger_writer.
  *
- * @return 1 if the particule should be writen, 0 otherwise.
+ * @return 1 if the particle should be writen, 0 otherwise.
  */
 __attribute__((always_inline)) INLINE static int logger_should_write(
     const struct logger_part_data *logger_data,
diff --git a/src/logger_io.c b/src/logger_io.c
index c6be1f292434c759e20064542e91caa2cd238a4d..06e7d0131451fdcb106d5c763465761ecee2bbc5 100644
--- a/src/logger_io.c
+++ b/src/logger_io.c
@@ -21,7 +21,7 @@
 /* Config parameters. */
 #include "../config.h"
 
-#if defined(WITH_LOGGER) && defined(HAVE_HDF5) && !defined(WITH_MPI)
+#if defined(WITH_LOGGER)
 
 /* Some standard headers. */
 #include <hdf5.h>
@@ -31,13 +31,15 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include "common_io.h"
+
 /* This object's header. */
 #include "logger_io.h"
 
 /* Local includes. */
 #include "chemistry_io.h"
 #include "common_io.h"
-#include "cooling.h"
+#include "cooling_io.h"
 #include "dimension.h"
 #include "engine.h"
 #include "error.h"
@@ -52,248 +54,336 @@
 #include "serial_io.h"
 #include "single_io.h"
 #include "stars_io.h"
+#include "tracers_io.h"
 #include "units.h"
+#include "version.h"
 #include "xmf.h"
 
 /**
- * @brief Writes an HDF5 index file
+ * @brief Mapper function to copy #part or #gpart fields into a buffer.
+ * WARNING Assumes two io_props in extra_data.
+ */
+void logger_io_copy_mapper(void* restrict temp, int N,
+                           void* restrict extra_data) {
+
+  /* Get the io_props */
+  const struct io_props* props = (const struct io_props*)(extra_data);
+  const struct io_props props1 = props[0];
+  const struct io_props props2 = props[1];
+
+  /* Get the sizes */
+  const size_t typeSize1 = io_sizeof_type(props1.type);
+  const size_t copySize1 = typeSize1 * props1.dimension;
+  const size_t typeSize2 = io_sizeof_type(props2.type);
+  const size_t copySize2 = typeSize2 * props2.dimension;
+  const size_t copySize = copySize1 + copySize2;
+
+  /* How far are we with this chunk? */
+  char* restrict temp_c = (char*)temp;
+  const ptrdiff_t delta = (temp_c - props1.start_temp_c) / copySize;
+
+  /* Copy the memory to the buffer */
+  for (int k = 0; k < N; k++) {
+    memcpy(&temp_c[k * copySize], props1.field + (delta + k) * props1.partSize,
+           copySize1);
+    memcpy(&temp_c[k * copySize + copySize1],
+           props2.field + (delta + k) * props2.partSize, copySize2);
+  }
+}
+/**
+ * @brief Writes the data array in the index file.
+ *
+ * @param e The #engine we are writing from.
+ * @param f The file to use.
+ * @param props The #io_props array.
+ * @param n_props The number of element in @props.
+ * @param N The number of particles to write.
+ */
+void writeIndexArray(const struct engine* e, FILE* f, struct io_props* props,
+                     size_t n_props, size_t N) {
+
+  /* Check that the assumptions are corrects */
+  if (n_props != 2)
+    error("Not implemented: The index file can only write two props.");
+
+  if (props[0].dimension != 1 || props[1].dimension != 1)
+    error("Not implemented: cannot use multidimensional data");
+
+  /* Get a few variables */
+  const size_t typeSize =
+      io_sizeof_type(props[0].type) + io_sizeof_type(props[1].type);
+
+  const size_t num_elements = N;
+
+  /* Allocate temporary buffer */
+  void* temp = NULL;
+  if (posix_memalign((void**)&temp, IO_BUFFER_ALIGNMENT,
+                     num_elements * typeSize) != 0)
+    error("Unable to allocate temporary i/o buffer");
+
+  /* Copy the particle data to the temporary buffer */
+  /* Set initial buffer position */
+  props[0].start_temp_c = temp;
+  props[1].start_temp_c = temp;
+
+  /* Copy the whole thing into a buffer */
+  threadpool_map((struct threadpool*)&e->threadpool, logger_io_copy_mapper,
+                 temp, N, typeSize, 0, props);
+
+  /* Write data to file */
+  fwrite(temp, typeSize, num_elements, f);
+
+  /* Free everything */
+  free(temp);
+}
+
+/**
+ * @brief Writes a logger index file
  *
+ * @param log The #logger_writer.
  * @param e The engine containing all the system.
- * @param baseName The common part of the snapshot file name.
- * @param internal_units The #unit_system used internally
- * @param snapshot_units The #unit_system used in the snapshots
  *
- * Creates an HDF5 output file and writes the offset and id of particles
+ * Creates an output file and writes the offset and id of particles
  * contained in the engine. If such a file already exists, it is erased and
  * replaced by the new one.
  *
+ * An index file is constructed by writing first a few variables (e.g. time,
+ * number of particles, if the file is sorted, ...) and then an array of index
+ * and offset for each particle type.
+ *
  * Calls #error() if an error occurs.
  *
  */
-void write_index_single(struct engine* e, const char* baseName,
-                        const struct unit_system* internal_units,
-                        const struct unit_system* snapshot_units) {
+void logger_write_index_file(struct logger_writer* log, struct engine* e) {
 
-  hid_t h_file = 0, h_grp = 0;
-  const size_t Ngas = e->s->nr_parts;
-  const size_t Nstars = e->s->nr_sparts;
-  const size_t Ntot = e->s->nr_gparts;
-  const int periodic = e->s->periodic;
-  int numFiles = 1;
   struct part* parts = e->s->parts;
   struct xpart* xparts = e->s->xparts;
-  // struct gpart* gparts = e->s->gparts;
-  struct gpart* dmparts = NULL;
-  // struct spart* sparts = e->s->sparts;
+  struct gpart* gparts = e->s->gparts;
+  struct spart* sparts = e->s->sparts;
   static int outputCount = 0;
 
-  struct logger_writer* log = e->logger;
-
-  /* Number of unassociated gparts */
-  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
-
-  long long N_total[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
+  /* Number of particles currently in the arrays */
+  const size_t Ntot = e->s->nr_gparts;
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  /* const size_t Nblackholes = e->s->nr_bparts; */
+
+  /* Number of particles that we will write */
+  const size_t Ntot_written =
+      e->s->nr_gparts - e->s->nr_inhibited_gparts - e->s->nr_extra_gparts;
+  const size_t Ngas_written =
+      e->s->nr_parts - e->s->nr_inhibited_parts - e->s->nr_extra_parts;
+  const size_t Nstars_written =
+      e->s->nr_sparts - e->s->nr_inhibited_sparts - e->s->nr_extra_sparts;
+  const size_t Nblackholes_written =
+      e->s->nr_bparts - e->s->nr_inhibited_bparts - e->s->nr_extra_bparts;
+  const size_t Nbaryons_written =
+      Ngas_written + Nstars_written + Nblackholes_written;
+  const size_t Ndm_written =
+      Ntot_written > 0 ? Ntot_written - Nbaryons_written : 0;
+
+  /* Format things in a Gadget-friendly array */
+  uint64_t N_total[swift_type_count] = {
+      (uint64_t)Ngas_written,   (uint64_t)Ndm_written,        0, 0,
+      (uint64_t)Nstars_written, (uint64_t)Nblackholes_written};
 
   /* File name */
   char fileName[FILENAME_BUFFER_SIZE];
-  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-           outputCount);
+  snprintf(fileName, FILENAME_BUFFER_SIZE, "%.100s_%04i.index",
+           e->logger->base_name, outputCount);
 
   /* Open file */
-  /* message("Opening file '%s'.", fileName); */
-  h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_file < 0) {
-    error("Error while opening file '%s'.", fileName);
-  }
+  FILE* f = NULL;
+  f = fopen(fileName, "wb");
 
-  /* Open header to write simulation properties */
-  /* message("Writing runtime parameters..."); */
-  h_grp =
-      H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating runtime parameters group\n");
-
-  /* Write the relevant information */
-  io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
-
-  /* Close runtime parameters */
-  H5Gclose(h_grp);
-
-  /* Open header to write simulation properties */
-  /* message("Writing file header..."); */
-  h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating file header\n");
-
-  /* Print the relevant information and print status */
-  io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
-  double dblTime = e->time;
-  io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
-  io_write_attribute(h_grp, "Time Offset", UINT, &log->timestamp_offset, 1);
-  int dimension = (int)hydro_dimension;
-  io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
-
-  /* GADGET-2 legacy values */
-  /* Number of particles of each type */
-  unsigned int numParticles[swift_type_count] = {0};
-  unsigned int numParticlesHighWord[swift_type_count] = {0};
-  for (int ptype = 0; ptype < swift_type_count; ++ptype) {
-    numParticles[ptype] = (unsigned int)N_total[ptype];
-    numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
-  }
-  io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total,
-                     swift_type_count);
-  io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
-                     swift_type_count);
-  io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT,
-                     numParticlesHighWord, swift_type_count);
-  double MassTable[swift_type_count] = {0};
-  io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count);
-  unsigned int flagEntropy[swift_type_count] = {0};
-  flagEntropy[0] = writeEntropyFlag();
-  io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy,
-                     swift_type_count);
-  io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);
-
-  /* Close header */
-  H5Gclose(h_grp);
-
-  /* Print the code version */
-  io_write_code_description(h_file);
-
-  /* Print the SPH parameters */
-  if (e->policy & engine_policy_hydro) {
-    h_grp = H5Gcreate(h_file, "/HydroScheme", H5P_DEFAULT, H5P_DEFAULT,
-                      H5P_DEFAULT);
-    if (h_grp < 0) error("Error while creating SPH group");
-    hydro_props_print_snapshot(h_grp, e->hydro_properties);
-    hydro_write_flavour(h_grp);
-    H5Gclose(h_grp);
+  if (f == NULL) {
+    error("Failed to open file %s", fileName);
   }
 
-  /* Print the gravity parameters */
-  if (e->policy & engine_policy_self_gravity) {
-    h_grp = H5Gcreate(h_file, "/GravityScheme", H5P_DEFAULT, H5P_DEFAULT,
-                      H5P_DEFAULT);
-    if (h_grp < 0) error("Error while creating gravity group");
-    gravity_props_print_snapshot(h_grp, e->gravity_properties);
-    H5Gclose(h_grp);
-  }
+  /* Write double time */
+  fwrite(&e->time, sizeof(double), 1, f);
 
-  /* Print the runtime parameters */
-  h_grp =
-      H5Gcreate(h_file, "/Parameters", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating parameters group");
-  parser_write_params_to_hdf5(e->parameter_file, h_grp, 1);
-  H5Gclose(h_grp);
-
-  /* Print the runtime unused parameters */
-  h_grp = H5Gcreate(h_file, "/UnusedParameters", H5P_DEFAULT, H5P_DEFAULT,
-                    H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating parameters group");
-  parser_write_params_to_hdf5(e->parameter_file, h_grp, 0);
-  H5Gclose(h_grp);
-
-  /* Print the system of Units used in the spashot */
-  io_write_unit_system(h_file, snapshot_units, "Units");
-
-  /* Print the system of Units used internally */
-  io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
-
-  /* Tell the user if a conversion will be needed */
-  if (e->verbose) {
-    if (units_are_equal(snapshot_units, internal_units)) {
-
-      message("Snapshot and internal units match. No conversion needed.");
-
-    } else {
-
-      message("Conversion needed from:");
-      message("(Snapshot) Unit system: U_M =      %e g.",
-              snapshot_units->UnitMass_in_cgs);
-      message("(Snapshot) Unit system: U_L =      %e cm.",
-              snapshot_units->UnitLength_in_cgs);
-      message("(Snapshot) Unit system: U_t =      %e s.",
-              snapshot_units->UnitTime_in_cgs);
-      message("(Snapshot) Unit system: U_I =      %e A.",
-              snapshot_units->UnitCurrent_in_cgs);
-      message("(Snapshot) Unit system: U_T =      %e K.",
-              snapshot_units->UnitTemperature_in_cgs);
-      message("to:");
-      message("(internal) Unit system: U_M = %e g.",
-              internal_units->UnitMass_in_cgs);
-      message("(internal) Unit system: U_L = %e cm.",
-              internal_units->UnitLength_in_cgs);
-      message("(internal) Unit system: U_t = %e s.",
-              internal_units->UnitTime_in_cgs);
-      message("(internal) Unit system: U_I = %e A.",
-              internal_units->UnitCurrent_in_cgs);
-      message("(internal) Unit system: U_T = %e K.",
-              internal_units->UnitTemperature_in_cgs);
-    }
+  /* Write integer time */
+  fwrite(&e->ti_current, sizeof(integertime_t), 1, f);
+
+  /* Write number of particles */
+  fwrite(N_total, sizeof(uint64_t), swift_type_count, f);
+
+  /* Write if the file is sorted */
+  const char sorted = 0;
+  fwrite(&sorted, sizeof(char), 1, f);
+
+  /* Ensure the data to be aligned */
+  size_t cur_pos = ftell(f);
+  size_t d_align = ((cur_pos + 7) & ~7) - cur_pos;
+  if (d_align > 0) {
+    int tmp = 0;
+    /* Fill the memory with garbage */
+    fwrite(&tmp, d_align, 1, f);
   }
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
     /* Don't do anything if no particle of this kind */
-    if (numParticles[ptype] == 0) continue;
-
-    /* Open the particle group in the file */
-    char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
-    snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
-             ptype);
-    h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
-                      H5P_DEFAULT);
-    if (h_grp < 0) {
-      error("Error while creating particle group.\n");
-    }
+    if (N_total[ptype] == 0) continue;
 
-    int num_fields = 0;
-    struct io_props list[100];
+    /* Number of properties (the code cannot deal with more than two props
+       per particle type) */
     size_t N = 0;
+    int num_fields = 0;
+    struct io_props list[2];
+
+    struct part* parts_written = NULL;
+    struct xpart* xparts_written = NULL;
+    struct gpart* gparts_written = NULL;
+    struct velociraptor_gpart_data* gpart_group_data_written = NULL;
+    struct spart* sparts_written = NULL;
+    struct bpart* bparts_written = NULL;
 
     /* Write particle fields from the particle structure */
     switch (ptype) {
 
       case swift_type_gas:
-        N = Ngas;
-        hydro_write_index(parts, xparts, list, &num_fields);
+        if (Ngas == Ngas_written) {
+
+          /* No inhibted particles: easy case */
+          N = Ngas;
+          num_fields += hydro_write_index(parts, xparts, list);
+
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Ngas_written;
+
+          /* Allocate temporary arrays */
+          if (swift_memalign("parts_written", (void**)&parts_written,
+                             part_align,
+                             Ngas_written * sizeof(struct part)) != 0)
+            error("Error while allocating temporary memory for parts");
+          if (swift_memalign("xparts_written", (void**)&xparts_written,
+                             xpart_align,
+                             Ngas_written * sizeof(struct xpart)) != 0)
+            error("Error while allocating temporary memory for xparts");
+
+          /* Collect the particles we want to write */
+          io_collect_parts_to_write(parts, xparts, parts_written,
+                                    xparts_written, Ngas, Ngas_written);
+
+          /* Select the fields to write */
+          num_fields += hydro_write_index(parts_written, xparts_written, list);
+        }
         break;
 
       case swift_type_dark_matter:
-        error("TODO");
+        if (Ntot == Ndm_written) {
+
+          /* This is a DM-only run without inhibited particles */
+          N = Ntot;
+          num_fields += darkmatter_write_index(gparts, list);
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Ndm_written;
+
+          /* Allocate temporary array */
+          if (swift_memalign("gparts_written", (void**)&gparts_written,
+                             gpart_align,
+                             Ndm_written * sizeof(struct gpart)) != 0)
+            error("Error while allocating temporary memory for gparts");
+
+          /* Collect the non-inhibited DM particles from gpart */
+          const int with_stf = 0;
+          io_collect_gparts_to_write(gparts, e->s->gpart_group_data,
+                                     gparts_written, gpart_group_data_written,
+                                     Ntot, Ndm_written, with_stf);
+
+          /* Select the fields to write */
+          num_fields += darkmatter_write_index(gparts_written, list);
+        }
         break;
 
       case swift_type_stars:
-        N = Nstars;
-        error("TODO");
-        // star_write_index(sparts, list, &num_fields);
+        if (Nstars == Nstars_written) {
+
+          /* No inhibted particles: easy case */
+          N = Nstars;
+          num_fields += stars_write_index(sparts, list);
+
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Nstars_written;
+
+          /* Allocate temporary arrays */
+          if (swift_memalign("sparts_written", (void**)&sparts_written,
+                             spart_align,
+                             Nstars_written * sizeof(struct spart)) != 0)
+            error("Error while allocating temporary memory for sparts");
+
+          /* Collect the particles we want to write */
+          io_collect_sparts_to_write(sparts, sparts_written, Nstars,
+                                     Nstars_written);
+
+          /* Select the fields to write */
+          num_fields += stars_write_index(sparts_written, list);
+        }
         break;
 
       default:
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
-    /* Write everything */
-    for (int i = 0; i < num_fields; ++i)
-      writeArray(e, h_grp, fileName, NULL, partTypeGroupName, list[i], N,
-                 internal_units, snapshot_units);
-
-    /* Free temporary array */
-    if (dmparts) {
-      free(dmparts);
-      dmparts = NULL;
+    if (num_fields != 2) {
+      error(
+          "The code expects only two fields per particle type for the logger");
     }
 
-    /* Close particle group */
-    H5Gclose(h_grp);
+    /* Write ids */
+    writeIndexArray(e, f, list, num_fields, N);
+
+    /* Free temporary arrays */
+    if (parts_written) swift_free("parts_written", parts_written);
+    if (xparts_written) swift_free("xparts_written", xparts_written);
+    if (gparts_written) swift_free("gparts_written", gparts_written);
+    if (gpart_group_data_written)
+      swift_free("gpart_group_written", gpart_group_data_written);
+    if (sparts_written) swift_free("sparts_written", sparts_written);
+    if (bparts_written) swift_free("bparts_written", bparts_written);
   }
 
-  /* message("Done writing particles..."); */
-
   /* Close file */
-  H5Fclose(h_file);
+  fclose(f);
 
   ++outputCount;
 }
 
-#endif /* WITH_LOGGER && HAVE_HDF5 && !WITH_MPI */
+/**
+ * @brief Write the parameters into a yaml file.
+ *
+ * @params log The #logger.
+ * @params e The #engine.
+ */
+void logger_write_description(struct logger_writer* log, struct engine* e) {
+  /* const struct unit_system *internal_units = e->internal_units; */
+  /* const struct unit_system *snapshot_units = e->snapshot_units; */
+
+  /* File name */
+  char fileName[FILENAME_BUFFER_SIZE];
+  snprintf(fileName, FILENAME_BUFFER_SIZE, "%.100s.yml", e->logger->base_name);
+
+  /* Open file */
+  FILE* f = NULL;
+  f = fopen(fileName, "wb");
+
+  if (f == NULL) {
+    error("Failed to open file %s", fileName);
+  }
+
+  /* TODO Write stuff */
+
+  /* Close file */
+  fclose(f);
+}
+
+#endif /* WITH_LOGGER */
diff --git a/src/logger_io.h b/src/logger_io.h
index a424c5c104b9f1090c69f7e0bb37e72635636f82..f1a0415ab6967f04e85375f26dd9b080530cc05e 100644
--- a/src/logger_io.h
+++ b/src/logger_io.h
@@ -30,34 +30,77 @@
 #include "part.h"
 #include "units.h"
 
-void write_index_single(struct engine* e, const char* baseName,
-                        const struct unit_system* internal_units,
-                        const struct unit_system* snapshot_units);
+void logger_write_index_file(struct logger_writer* log, struct engine* e);
+void logger_write_description(struct logger_writer* log, struct engine* e);
 
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
  * @param parts The particle array.
- * @param list The list of i/o properties to write.
- * @param num_fields The number of i/o fields to write.
+ * @param xparts The extra particle array.
+ * @param list (out) The parameters to write.
  *
  * In this version, we only want the ids and the offset.
  */
-__attribute__((always_inline)) INLINE static void hydro_write_index(
-    const struct part* parts, const struct xpart* xparts, struct io_props* list,
-    int* num_fields) {
+__attribute__((always_inline)) INLINE static int hydro_write_index(
+    const struct part* parts, const struct xpart* xparts,
+    struct io_props* list) {
 
-  *num_fields = 2;
+  /* List what we want to write */
+  list[0] =
+      io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f,
+                           parts, id, "Field not used");
+  list[1] =
+      io_make_output_field("Offset", UINT64, 1, UNIT_CONV_NO_UNITS, 0.f, xparts,
+                           logger_data.last_offset, "Field not used");
+
+  return 2;
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param gparts The gparticle array.
+ * @param list (out) The parameters to write.
+ *
+ * In this version, we only want the ids and the offset.
+ */
+__attribute__((always_inline)) INLINE static int darkmatter_write_index(
+    const struct gpart* gparts, struct io_props* list) {
 
   /* List what we want to write */
   list[0] =
       io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f,
-                           parts, id, "will be erased");
+                           gparts, id_or_neg_offset, "Field not used");
+  list[1] =
+      io_make_output_field("Offset", UINT64, 1, UNIT_CONV_NO_UNITS, 0.f, gparts,
+                           logger_data.last_offset, "Field not used");
 
+  return 2;
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param sparts The sparticle array.
+ * @param list (out) The parameters to write.
+ *
+ * In this version, we only want the ids and the offset.
+ */
+__attribute__((always_inline)) INLINE static int stars_write_index(
+    const struct spart* sparts, struct io_props* list) {
+
+  /* List what we want to write */
+  list[0] =
+      io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f,
+                           sparts, id, "Field not used");
   list[1] =
-      io_make_output_field("Offset", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f,
-                           xparts, logger_data.last_offset, "will be erased");
+      io_make_output_field("Offset", UINT64, 1, UNIT_CONV_NO_UNITS, 0.f, sparts,
+                           logger_data.last_offset, "Field not used");
+
+  return 2;
 }
+
 #endif
 
 #endif /* SWIFT_LOGGER_IO_H */
diff --git a/src/runner_others.c b/src/runner_others.c
index 9c111a7d129c5755982e5a08ce00f6d2078fa5bc..60607a1ef07d828d4d70cdc15bc0d722d04b9c9e 100644
--- a/src/runner_others.c
+++ b/src/runner_others.c
@@ -298,6 +298,23 @@ void runner_do_star_formation(struct runner *r, struct cell *c, int timer) {
           if (star_formation_should_convert_to_star(p, xp, sf_props, e,
                                                     dt_star)) {
 
+#ifdef WITH_LOGGER
+            /* Write the particle */
+            /* Logs all the fields request by the user */
+            // TODO select only the requested fields
+            logger_log_part(e->logger, p,
+                            logger_mask_data[logger_x].mask |
+                                logger_mask_data[logger_v].mask |
+                                logger_mask_data[logger_a].mask |
+                                logger_mask_data[logger_u].mask |
+                                logger_mask_data[logger_h].mask |
+                                logger_mask_data[logger_rho].mask |
+                                logger_mask_data[logger_consts].mask |
+                                logger_mask_data[logger_special_flags].mask,
+                            &xp->logger_data.last_offset,
+                            /* special flags */ swift_type_stars);
+#endif
+
             /* Convert the gas particle to a star particle */
             struct spart *sp = cell_convert_part_to_spart(e, c, p, xp);
 
@@ -314,6 +331,22 @@ void runner_do_star_formation(struct runner *r, struct cell *c, int timer) {
 
               /* Update the Star formation history */
               star_formation_logger_log_new_spart(sp, &c->stars.sfh);
+
+#ifdef WITH_LOGGER
+              /* Copy the properties back to the stellar particle */
+              sp->logger_data = xp->logger_data;
+
+              /* Write the s-particle */
+              logger_log_spart(e->logger, sp,
+                               logger_mask_data[logger_x].mask |
+                                   logger_mask_data[logger_v].mask |
+                                   logger_mask_data[logger_consts].mask,
+                               &sp->logger_data.last_offset,
+                               /* special flags */ 0);
+
+              /* Set counter back to zero */
+              sp->logger_data.steps_since_last_output = 0;
+#endif
             }
           }
 
@@ -545,10 +578,16 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
   const struct engine *e = r->e;
   struct part *restrict parts = c->hydro.parts;
   struct xpart *restrict xparts = c->hydro.xparts;
+  struct gpart *restrict gparts = c->grav.parts;
+  struct spart *restrict sparts = c->stars.parts;
   const int count = c->hydro.count;
+  const int gcount = c->grav.count;
+  const int scount = c->stars.count;
 
   /* Anything to do here? */
-  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e)) return;
+  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e) &&
+      !cell_is_active_stars(c, e))
+    return;
 
   /* Recurse? Avoid spending too much time in useless cells. */
   if (c->split) {
@@ -564,8 +603,6 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
       struct xpart *restrict xp = &xparts[k];
 
       /* If particle needs to be log */
-      /* This is the same function than part_is_active, except for
-       * debugging checks */
       if (part_is_active(p, e)) {
 
         if (logger_should_write(&xp->logger_data, e->logger)) {
@@ -579,7 +616,8 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
                               logger_mask_data[logger_h].mask |
                               logger_mask_data[logger_rho].mask |
                               logger_mask_data[logger_consts].mask,
-                          &xp->logger_data.last_offset);
+                          &xp->logger_data.last_offset,
+                          /* special flags */ 0);
 
           /* Set counter back to zero */
           xp->logger_data.steps_since_last_output = 0;
@@ -588,11 +626,65 @@ void runner_do_logger(struct runner *r, struct cell *c, int timer) {
           xp->logger_data.steps_since_last_output += 1;
       }
     }
-  }
 
-  if (c->grav.count > 0) error("gparts not implemented");
+    /* Loop over the gparts in this cell. */
+    for (int k = 0; k < gcount; k++) {
+
+      /* Get a handle on the part. */
+      struct gpart *restrict gp = &gparts[k];
+
+      /* Write only the dark matter particles */
+      if (gp->type != swift_type_dark_matter) continue;
+
+      /* If particle needs to be log */
+      if (gpart_is_active(gp, e)) {
+
+        if (logger_should_write(&gp->logger_data, e->logger)) {
+          /* Write particle */
+          /* Currently writing everything, should adapt it through time */
+          logger_log_gpart(e->logger, gp,
+                           logger_mask_data[logger_x].mask |
+                               logger_mask_data[logger_v].mask |
+                               logger_mask_data[logger_a].mask |
+                               logger_mask_data[logger_consts].mask,
+                           &gp->logger_data.last_offset,
+                           /* Special flags */ 0);
+
+          /* Set counter back to zero */
+          gp->logger_data.steps_since_last_output = 0;
+        } else
+          /* Update counter */
+          gp->logger_data.steps_since_last_output += 1;
+      }
+    }
+
+    /* Loop over the sparts in this cell. */
+    for (int k = 0; k < scount; k++) {
 
-  if (c->stars.count > 0) error("sparts not implemented");
+      /* Get a handle on the part. */
+      struct spart *restrict sp = &sparts[k];
+
+      /* If particle needs to be log */
+      if (spart_is_active(sp, e)) {
+
+        if (logger_should_write(&sp->logger_data, e->logger)) {
+          /* Write particle */
+          /* Currently writing everything, should adapt it through time */
+          logger_log_spart(e->logger, sp,
+                           logger_mask_data[logger_x].mask |
+                               logger_mask_data[logger_v].mask |
+                               logger_mask_data[logger_consts].mask,
+                           &sp->logger_data.last_offset,
+                           /* Special flags */ 0);
+
+          /* Set counter back to zero */
+          sp->logger_data.steps_since_last_output = 0;
+        } else
+          /* Update counter */
+          sp->logger_data.steps_since_last_output += 1;
+      }
+    }
+  }
 
   if (timer) TIMER_TOC(timer_logger);
 
diff --git a/src/runner_sort.c b/src/runner_sort.c
index 730e61825598983429616800739904bb6ba93ab2..21626d2a6e1b1e208ff3889b7b09b454eea4bd96 100644
--- a/src/runner_sort.c
+++ b/src/runner_sort.c
@@ -68,14 +68,21 @@ void runner_do_stars_resort(struct runner *r, struct cell *c, const int timer) {
  * @param N The number of entries.
  */
 void runner_do_sort_ascending(struct sort_entry *sort, int N) {
+  const int stack_size = 10;
 
   struct {
     short int lo, hi;
-  } qstack[10];
+  } qstack[stack_size];
   int qpos, i, j, lo, hi, imin;
   struct sort_entry temp;
   float pivot;
 
+  if (N >= (1LL << stack_size)) {
+    error(
+        "The stack size for sorting is too small."
+        "Either increase it or reduce the number of parts per cell.");
+  }
+
   /* Sort parts in cell_i in decreasing order with quicksort */
   qstack[0].lo = 0;
   qstack[0].hi = N - 1;
@@ -84,11 +91,18 @@ void runner_do_sort_ascending(struct sort_entry *sort, int N) {
     lo = qstack[qpos].lo;
     hi = qstack[qpos].hi;
     qpos -= 1;
+    /* Do we have a low number of element to sort? */
     if (hi - lo < 15) {
+      /* Sort the last elements. */
       for (i = lo; i < hi; i++) {
         imin = i;
-        for (j = i + 1; j <= hi; j++)
-          if (sort[j].d < sort[imin].d) imin = j;
+        /* Find the minimal value. */
+        for (j = i + 1; j <= hi; j++) {
+          if (sort[j].d < sort[imin].d) {
+            imin = j;
+          }
+        }
+        /* Swap the elements if a smaller element exists. */
         if (imin != i) {
           temp = sort[imin];
           sort[imin] = sort[i];
@@ -96,14 +110,21 @@ void runner_do_sort_ascending(struct sort_entry *sort, int N) {
         }
       }
     } else {
+      /* Select a pivot */
       pivot = sort[(lo + hi) / 2].d;
       i = lo;
       j = hi;
+      /* Ensure that the elements before/after the pivot
+         are smaller/larger than the pivot. */
       while (i <= j) {
+        /* Find the first elements that do not respect
+           the order. */
         while (sort[i].d < pivot) i++;
         while (sort[j].d > pivot) j--;
+        /* Did we get two different elements */
         if (i <= j) {
           if (i < j) {
+            /* Swap the elements */
             temp = sort[i];
             sort[i] = sort[j];
             sort[j] = temp;
@@ -112,6 +133,9 @@ void runner_do_sort_ascending(struct sort_entry *sort, int N) {
           j -= 1;
         }
       }
+      /* Add the next operations to the stack.
+       * The order is important in order to decrease the stack size.
+       */
       if (j > (lo + hi) / 2) {
         if (lo < j) {
           qpos += 1;
diff --git a/src/space.c b/src/space.c
index 64eef6864d361a3a41b3563103d8953749795dde..8e9b4543bd756b82f94479077e0f039fe061609d 100644
--- a/src/space.c
+++ b/src/space.c
@@ -4338,6 +4338,10 @@ void space_first_init_gparts_mapper(void *restrict map_data, int count,
 
     gravity_first_init_gpart(&gp[k], grav_props);
 
+#ifdef WITH_LOGGER
+    logger_part_data_init(&gp[k].logger_data);
+#endif
+
 #ifdef SWIFT_DEBUG_CHECKS
     /* Initialise the time-integration check variables */
     gp[k].ti_drift = 0;
@@ -4419,6 +4423,10 @@ void space_first_init_sparts_mapper(void *restrict map_data, int count,
 
     stars_first_init_spart(&sp[k], stars_properties);
 
+#ifdef WITH_LOGGER
+    logger_part_data_init(&sp[k].logger_data);
+#endif
+
     /* Also initialise the chemistry */
     chemistry_first_init_spart(chemistry, &sp[k]);
 
diff --git a/src/stars/Default/stars_part.h b/src/stars/Default/stars_part.h
index 05035039d22c7767bde99b5148b49c4b72492f09..29bf1409879cbde54ee75979fa14fad7e2c195cb 100644
--- a/src/stars/Default/stars_part.h
+++ b/src/stars/Default/stars_part.h
@@ -87,6 +87,11 @@ struct spart {
   /*! Chemistry structure */
   struct chemistry_part_data chemistry_data;
 
+#ifdef WITH_LOGGER
+  /* Additional data for the particle logger */
+  struct logger_part_data logger_data;
+#endif
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/stars/EAGLE/stars_part.h b/src/stars/EAGLE/stars_part.h
index 9114cb9107e1259698c365be82e5318d60d37ac7..94b9315ba77cca700a4bfe5109db2873e678829f 100644
--- a/src/stars/EAGLE/stars_part.h
+++ b/src/stars/EAGLE/stars_part.h
@@ -103,6 +103,11 @@ struct spart {
   /*! Particle time bin */
   timebin_t time_bin;
 
+#ifdef WITH_LOGGER
+  /* Additional data for the particle logger */
+  struct logger_part_data logger_data;
+#endif
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/tests/testLogger.c b/tests/testLogger.c
index d2c64e7fa3330ebd20cf8abc01a76e1dff08c8fc..5976597054c639a65f827876f1f244bc1a6024ce 100644
--- a/tests/testLogger.c
+++ b/tests/testLogger.c
@@ -51,12 +51,13 @@ void test_log_parts(struct logger_writer *log) {
           logger_mask_data[logger_a].mask | logger_mask_data[logger_u].mask |
           logger_mask_data[logger_h].mask | logger_mask_data[logger_rho].mask |
           logger_mask_data[logger_consts].mask,
-      &offset);
+      &offset, /* special flags */ 0);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Write only the position. */
   p.x[0] = 2.0;
-  logger_log_part(log, &p, logger_mask_data[logger_x].mask, &offset);
+  logger_log_part(log, &p, logger_mask_data[logger_x].mask, &offset,
+                  /* special flags */ 0);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Write the position and velocity. */
@@ -65,7 +66,7 @@ void test_log_parts(struct logger_writer *log) {
   logger_log_part(
       log, &p,
       logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask,
-      &offset);
+      &offset, /* special flags */ 0);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Recover the last part from the dump. */
@@ -126,12 +127,13 @@ void test_log_gparts(struct logger_writer *log) {
       logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
           logger_mask_data[logger_a].mask | logger_mask_data[logger_h].mask |
           logger_mask_data[logger_consts].mask,
-      &offset);
+      &offset, /* special flags */ 0);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Write only the position. */
   p.x[0] = 2.0;
-  logger_log_gpart(log, &p, logger_mask_data[logger_x].mask, &offset);
+  logger_log_gpart(log, &p, logger_mask_data[logger_x].mask, &offset,
+                   /* special flags */ 0);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Write the position and velocity. */
@@ -140,7 +142,7 @@ void test_log_gparts(struct logger_writer *log) {
   logger_log_gpart(
       log, &p,
       logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask,
-      &offset);
+      &offset, /* special flags */ 0);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Recover the last part from the dump. */