diff --git a/Jenkinsfile b/Jenkinsfile
index 798c38a6f4885ee6ec61fa0d162497c2016dc773..46e101c6d8d6f6194c63e57de3caff84c179eaed 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -39,6 +39,7 @@ pipeline {
              set -e
 
              module purge
+             module avail
              module load gnu_comp/10.2.0
              module load boost
              module load llvm/10.0.1
@@ -48,10 +49,12 @@ pipeline {
              git clean -fdx
              ./autogen.sh
 
-             ./configure --enable-python
-             make -j 2
-             make check
-             make clean
+             ls /cosma/local/boost/gnu_10.2.0/1_67_0/lib
+             # Python is messy => cannot fail on warning
+             # ./configure --enable-python --enable-compiler-warnings=yes
+             # make -j 2
+             # make check
+             # make clean
              '''
 
              updateGitlabCommitStatus name: 'Python', state: 'success'
diff --git a/configure.ac b/configure.ac
index bacfc4a3cd83c95e6c0d4958bc153daa5f79a69b..8f121395217431cc1731a37bc92c2bfa34de99c0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -55,6 +55,9 @@ AC_CONFIG_MACRO_DIR([m4])
 # Stop default CXXFLAGS from anyone except the environment.
 : ${CXXFLAGS=""}
 
+# Use c++11
+CXXFLAGS="$CXXFLAGS -std=c++11"
+
 # Generate header file.
 AM_CONFIG_HEADER(config.hpp)
 
@@ -147,6 +150,7 @@ if test "$enable_san" = "yes"; then
       AC_MSG_WARN([Compiler does not support address sanitizer option])
    fi
 fi
+AM_CONDITIONAL([HAVE_SANITIZER], [test x$enable_san = xyes])
 
 # Add the undefined sanitizer option to flags. Only useful for GCC
 # version 4.9 and later and clang to detected undefined code behaviour
@@ -219,9 +223,16 @@ if test "$enable_python" != "no"; then
    AC_DEFINE([HAVE_PYTHON],1,[Python appears to be present.])
 
    # Now add boost
-   AX_BOOST_BASE([1.58.0], [],
-       [AC_MSG_ERROR(something is wrong with teh boost library!)])
+   AX_BOOST_BASE([1.63.0], [],
+       [AC_MSG_ERROR(something is wrong with the boost library!)])
    AX_BOOST_PYTHON
+
+   # Add numpy
+   PYTHON_VERSION=`$PYTHON -c "import sys; \
+                           ver = sys.version[[:3]]; \
+                           print(ver.replace('.', ''))"`
+   BOOST_PYTHON_LIB="$BOOST_PYTHON_LIB -lboost_numpy$PYTHON_VERSION"
+
 fi
 AC_SUBST([PYTHON_CPPFLAGS])
 AC_SUBST([PYTHON_LIBS])
diff --git a/doc/source/QuickStart/using_python.rst b/doc/source/QuickStart/using_python.rst
index 60f9604e2b0a2c41033028e6344b2fde1f999587..e0ef0a530a76eb871beaff8aa615c64f3bd3d0c5 100644
--- a/doc/source/QuickStart/using_python.rst
+++ b/doc/source/QuickStart/using_python.rst
@@ -43,23 +43,21 @@ Now the Reader can provide some information about the simulation:
 .. code-block:: python
 
    reader.get_list_fields(part_type=None)
-   reader.gas.get_list_fields()
-
-The two calls get the fields that exists for the particles.
-The only difference is that the second call will look at only the fields
-for the gas particles.
-The parameter ``part_type`` can be used to get the same result by simply providing 0.
-In SWIFT, this type corresponds to the gas.
-Multiple types of particles can be provided through a list.
+
+The parameter ``part_type`` can be used to get the fields of a given particle type.
+It could be an int (e.g. 0 for the gas) or an array of int (e.g. [0, 1] for the gas and dark matter).
 In such cases, the fields will be restricted to the common ones.
-We do not only provide ``reader.gas`` but also for other type of particles
-(``dark_matter``, ``dark_matter_background``, ``sinks``, ``stars`` and ``black_holes``).
+We provide some enums to simplify the usage of particle types in ``csds.particle_types``:
+``gas``, ``dark_matter``, ``dark_matter_background``, ``sinks``, ``stars``, ``black_holes``,  ``neutrino``,
+``count``.
 
 .. code-block:: python
 
-   reader.get_time_limits()
+   t0, t1 = reader.get_time_limits()
+   t0 = reader.get_time_begin()
+   t1 = reader.get_time_end()
 
-This function returns the minimal and maximal time contained within the simulation.
+Theses functions return the minimal and maximal time contained within the simulation.
 Any requested time should stay within the two limits (boundary included).
 
 .. code-block:: python
@@ -74,7 +72,6 @@ Depending on the parameters, this function will have different optimizations:
 .. code-block:: python
 
    reader.get_data(fields, time=0, filter_by_ids=None, part_type=None)
-   reader.gas.get_data(fields)
 
 It is mandatory to provide the required fields. If you wish to get all
 of them, simply provide the output of ``reader.get_list_fields``.
@@ -88,6 +85,8 @@ that should be within the limits provided by ``get_time_limits``.
 ``filter_by_ids`` allows to pick only a subset of the particles.
 This parameter takes a list containing an array for each type of particle (or None).
 The list should have the same size than the number of particle types in the CSDS.
+The list of IDs will be modified by the CSDS in order to represent only the particles present
+within the simulation.
 The last parameter allows to read only some type of particles (e.g. ``part_type=0`` or
 ``part_type=[0, 1]``).
 The two last parameters cannot be used together.
diff --git a/examples/reader_example.py b/examples/reader_example.py
index ca36f1d88724c9757220b8ca548b4544d12e57d6..6fcc1a0491b01c86df3582ce439b951e506ae13f 100644
--- a/examples/reader_example.py
+++ b/examples/reader_example.py
@@ -73,11 +73,8 @@ for f in args.files:
 
         # Ensure that the fields are present
         fields = ["Coordinates", "InternalEnergies", "ParticleIDs"]
-        missing = set(fields).difference(set(reader.gas.get_list_fields()))
-
-        # Could be also be called like this:
-        # reader.get_list_fields(part_type=0)
-        # or reader.get_list_fields(part_type=[0, 1, 4]) for multiple types.
+        missing = set(fields).difference(
+            set(reader.get_list_fields(part_type=csds.particle_types.gas)))
 
         if missing:
             raise Exception("Fields %s not found in the logfile." % missing)
@@ -91,9 +88,8 @@ for f in args.files:
 
         # Read all the particles
         out = reader.get_data(
-            fields=fields, time=args.time - dt, part_type=csds.gas)
-        # Could be also called like this
-        # reader.gas.get_data(fields=fields, time=args.time)
+            fields=fields, time=args.time - dt,
+            part_type=csds.particle_types.gas)
 
         # Update the particles
         out = reader.update_particles(fields=fields, time=args.time)
@@ -103,15 +99,12 @@ for f in args.files:
         gas_ids = gas_ids[:len(gas_ids)//2]
 
         # Read from the ids
-        # As we are filtering by particle ids, the field "ParticleIDs" is required
-        # in order to verify the particle obtained.
-        out = reader.gas.get_data(
-            fields=fields, time=args.time, filter_by_ids=gas_ids)
-        # Could be also called like this:
-        # ids = [None] * 6
-        # ids[csds.gas] = gas_ids
-        # out = reader.get_data(
-        #     fields=fields, time=args.time, filter_by_ids=ids)
+        # As we are filtering by particle ids, the field "ParticleIDs" is
+        # required in order to verify the particle obtained.
+        ids = [None] * csds.particle_types.count
+        ids[csds.particle_types.gas] = gas_ids
+        out = reader.get_data(
+            fields=fields, time=args.time, filter_by_ids=ids)
 
         # Print the missing ids
         gas_ids, ids_found = set(gas_ids), set(out["ParticleIDs"])
diff --git a/src/Makefile.am b/src/Makefile.am
index 4746e93890322ad394fd38fda3c1d0b8b3ce4247..72c7c05dc74e77891954f848571adb8d85ce401c 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -28,7 +28,7 @@ include_HEADERS += csds_reader.hpp csds_logfile.hpp csds_index.hpp csds_quick_so
 include_HEADERS += csds_interpolation.hpp csds_parameters.hpp csds_cosmology.hpp csds_fields.hpp
 include_HEADERS += csds_hashmap.hpp csds_error.hpp csds_definitions.hpp
 include_HEADERS += csds_parser.hpp csds_inline.hpp csds_part_type.hpp csds_openmp.hpp
-include_HEADERS += csds_logfile_writer.h csds_version.hpp csds_cache.hpp
+include_HEADERS += csds_logfile_writer.h csds_version.hpp csds_cache.hpp csds_array.hpp
 
 # Source files for the reader
 AM_SOURCES = csds_header.cpp csds_mapped_file.cpp csds_time.cpp csds_tools.cpp csds_reader.cpp
@@ -38,6 +38,7 @@ AM_SOURCES += csds_parser.cpp csds_part_type.cpp
 
 if HAVEPYTHON
 AM_SOURCES += csds_python_wrapper.cpp
+include_HEADERS += csds_python_wrapper.hpp
 endif
 
 # Source files for writer
@@ -54,7 +55,7 @@ libcsds_writer_la_CXXFLAGS = $(AM_CXXFLAGS)
 libcsds_writer_la_LDFLAGS = $(AM_LDFLAGS)
 
 # Generate the version file
-csds_version.hpp: csds_version.hpp.in Makefile $(AM_SOURCES_WRITER) $(AM_SOURCES) $(include_HEADERS)
+csds_version.hpp: csds_version.hpp.in Makefile
 	  sed -e "s,@major_version \@,$(MAJOR_VERSION)," \
         -e "s,@minor_version \@,$(MINOR_VERSION)," $< > csds_version.hpp
 
diff --git a/src/csds_array.hpp b/src/csds_array.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5254ef71ecad790fc40cc1aecd95a47829334e9f
--- /dev/null
+++ b/src/csds_array.hpp
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ * This file is part of CSDS.
+ * Copyright (c) 2021 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef CSDS_ARRAY_HPP
+#define CSDS_ARRAY_HPP
+
+/* Include configuration */
+#include "config.hpp"
+
+#ifdef HAVE_PYTHON
+#undef error
+#include <boost/python/numpy.hpp>
+namespace bp = boost::python;
+#endif
+
+/* Local headers */
+#include "csds_error.hpp"
+#include "csds_fields.hpp"
+
+/**
+ * @brief An abstract array for the output data.
+ *
+ * This array ca
+ */
+class CsdsArray {
+
+ public:
+  CsdsArray(std::size_t size, enum field_enum field)
+      : mStolen(false), mSize(size), mField(field) {
+    mElementSize = field_enum_get_size(field);
+    mpData = new char[size * mElementSize];
+  }
+
+  /* The arrays are expecting to be large => no copy */
+  CsdsArray(CsdsArray const &) = delete;
+  CsdsArray(CsdsArray &&other) :
+    mpData(other.mpData), mElementSize(other.mElementSize),
+    mStolen(other.mStolen), mSize(other.mSize),
+    mField(other.mField) {
+		// prevent other from cleaning buffer if it destroys itself
+		other.mStolen = true;
+  };
+  void operator=(CsdsArray const &) = delete;
+
+  ~CsdsArray() {
+    if (!mStolen) {
+      delete[] mpData;
+    }
+  };
+
+  /**
+   * @brief Access an element
+   */
+  char *operator[](std::size_t i) { return mpData + i * mElementSize; }
+
+#ifdef HAVE_PYTHON
+  bp::numpy::ndarray ToNumpy(bool stealing_buffer) {
+    mStolen = stealing_buffer;
+    bp::tuple shape = bp::make_tuple(mSize);
+    bp::tuple stride = bp::make_tuple(mElementSize);
+    bp::numpy::dtype dt = field_enum_get_python_dtype(mField);
+    bp::numpy::ndarray out =
+        bp::numpy::from_data(mpData, dt, shape, stride, bp::object());
+    return out;
+  };
+#endif
+
+ protected:
+  /*! The array */
+  char *mpData;
+  /*! The size of each elements  */
+  int mElementSize;
+  /*! Were the data stolen (e.g. returned to numpy) */
+  bool mStolen;
+  /*! The size of the array */
+  std::size_t mSize;
+  /*! The field contained within the array */
+  enum field_enum mField;
+};
+
+#endif
diff --git a/src/csds_cache.hpp b/src/csds_cache.hpp
index b5e29301b72c0a01c26f4d69af8e4fdb0128121a..6b01edde384db2812dc0f2c20cb28216464efa5e 100644
--- a/src/csds_cache.hpp
+++ b/src/csds_cache.hpp
@@ -130,18 +130,17 @@ INLINE static void csds_cache_empty_init(struct csds_cache *cache) {
  * @param cache The #csds_cache
  * @param n_parts The number of particles to allocate
  * @param fields The fields that will be read
- * @param n_fields The size of fields
  * @param over_allocation The over allocation (required for creation / deletion
  * of particles)
  * @param h The logfile's header.
  * @param init_size The initial size of the arrays if no particles are present
  * @param time_offset The time offset of the current reading
  */
-INLINE static void csds_cache_init(struct csds_cache *cache,
-                                   const uint64_t *number_particles,
-                                   const enum field_enum *fields, int n_fields,
-                                   float over_allocation, struct header *h,
-                                   int64_t *init_size, size_t time_offset) {
+INLINE static void csds_cache_init(
+    struct csds_cache *cache,
+    const std::array<uint64_t, csds_type_count> &number_particles,
+    const std::vector<field_enum> &fields, float over_allocation,
+    struct header *h, int64_t *init_size, size_t time_offset) {
 
   cache->last_time_offset = time_offset;
 
@@ -154,6 +153,7 @@ INLINE static void csds_cache_init(struct csds_cache *cache,
   }
 
   /* Set the fields */
+  const int n_fields = fields.size();
   cache->n_fields = n_fields;
   for (int part = 0; part < csds_type_count; part++) {
 
@@ -166,7 +166,8 @@ INLINE static void csds_cache_init(struct csds_cache *cache,
     /* Allocate the fields */
     cache->fields[part] = (struct field_information *)malloc(
         n_fields * sizeof(struct field_information));
-    if (cache->fields[part] == NULL) error("Failed to allocate the fields");
+    if (cache->fields[part] == NULL)
+      csds_error("Failed to allocate the fields");
 
     /* Copy the header's field into the cache */
     for (int field = 0; field < n_fields; field++) {
@@ -179,7 +180,7 @@ INLINE static void csds_cache_init(struct csds_cache *cache,
   /* Allocate the offsets */
   cache->offset = (int *)malloc(n_fields * sizeof(int));
   if (cache->offset == NULL) {
-    error("Failed to allocate the offset");
+    csds_error("Failed to allocate the offset");
   }
 
   /* Get the size of all fields together and set the offsets */
@@ -220,7 +221,7 @@ INLINE static void csds_cache_init(struct csds_cache *cache,
       if (cache->last.fields[i] == NULL || cache->last.time[i] == NULL ||
           cache->next.fields[i] == NULL || cache->next.time[i] == NULL ||
           cache->next.offset[i] == NULL) {
-        error("Failed to allocate the cache memory");
+        csds_error("Failed to allocate the cache memory");
       }
     } else {
       cache->last.fields[i] = NULL;
@@ -277,8 +278,8 @@ INLINE static int csds_cache_get_field_index(const struct csds_cache *cache,
 
   /* Check that we found a field */
   if (field_index < 0) {
-    error("The field not found in the cache" <<
-          field_enum_get_name(field));
+    csds_error("The field not found in the cache"
+               << field_enum_get_name(field));
   }
   return field_index;
 }
@@ -319,7 +320,7 @@ INLINE static int64_t csds_cache_save(struct csds_cache *cache, int64_t index,
 
   /* Ensures that we have enough place */
   if (index >= cache->capacity[type]) {
-    error("The cache is too small. Please increase Cache:OverAllocation.");
+    csds_error("The cache is too small. Please increase Cache:OverAllocation.");
   }
 
   /* Get the index of the field */
@@ -330,7 +331,7 @@ INLINE static int64_t csds_cache_save(struct csds_cache *cache, int64_t index,
     if (time_before != cache->last.time[type][index] ||
         time_after != cache->next.time[type][index] ||
         offset_next != cache->next.offset[type][index]) {
-      error("The data within the cache are not compatible");
+      csds_error("The data within the cache are not compatible");
     }
   }
 
@@ -353,7 +354,7 @@ INLINE static int64_t csds_cache_save(struct csds_cache *cache, int64_t index,
   enum field_enum first = field_enum_get_first_derivative(field);
   if (first != field_enum_none) {
     if (before->first_deriv == NULL || after->first_deriv == NULL) {
-      error("First derivative is missing");
+      csds_error("First derivative is missing");
     }
     memcpy(cache->last.fields[type] + shift, before->first_deriv,
            field_enum_get_size(first));
@@ -366,7 +367,7 @@ INLINE static int64_t csds_cache_save(struct csds_cache *cache, int64_t index,
   enum field_enum second = field_enum_get_second_derivative(field);
   if (second != field_enum_none) {
     if (before->second_deriv == NULL || after->second_deriv == NULL) {
-      error("Second derivative is missing");
+      csds_error("Second derivative is missing");
     }
     memcpy(cache->last.fields[type] + shift, before->second_deriv,
            field_enum_get_size(second));
diff --git a/src/csds_cosmology.cpp b/src/csds_cosmology.cpp
index e8da39e7c55de0eb8bdb3a12aaf49a37ff90f3c6..9a3f7845d8916ee1d8bf0999bd35b36f0f8186b8 100644
--- a/src/csds_cosmology.cpp
+++ b/src/csds_cosmology.cpp
@@ -56,7 +56,7 @@ void csds_cosmology_init(struct csds_cosmology *cosmo,
       parser_get_opt_param_double(params, "Cosmology:Omega_nu_0", 0);
 
   if (Omega_nu_0 != 0)
-    error("Neutrinos are not implemented in the cosmology.");
+    csds_error("Neutrinos are not implemented in the cosmology.");
 
   /* Read w_0 */
   cosmo->w_0 = parser_get_param_double(params, "Cosmology:w_0");
diff --git a/src/csds_error.hpp b/src/csds_error.hpp
index f9576d4755ed4ddc60141855c00c3c2707c0b67a..8cbffdad7b565c1bdec8e1605937c95570a2ef27 100644
--- a/src/csds_error.hpp
+++ b/src/csds_error.hpp
@@ -43,11 +43,11 @@
 
 using namespace std::chrono;
 const high_resolution_clock::time_point INITIAL_TIME =
-  high_resolution_clock::now();
+    high_resolution_clock::now();
 
-INLINE static int GetDeltaTime(
-    const high_resolution_clock::time_point init,
-    const high_resolution_clock::time_point now = high_resolution_clock::now()) {
+INLINE static int GetDeltaTime(const high_resolution_clock::time_point init,
+                               const high_resolution_clock::time_point now =
+                                   high_resolution_clock::now()) {
   duration<double, std::milli> span = now - init;
   return span.count();
 }
@@ -55,30 +55,29 @@ INLINE static int GetDeltaTime(
 /**
  * @brief Error macro. Prints the message given in argument and aborts.
  */
-#define error(input)                                                    \
-  {                                                                     \
-    float delta_time_error = GetDeltaTime(INITIAL_TIME);                \
-    delta_time_error /= 1000; /* into seconds */                        \
-                                                                        \
-    std::cout << std::flush;                                            \
-    std::cerr << "["<< std::setfill('0') << std::setw(6)                \
-              << std::setprecision(1) << delta_time_error << "] "       \
-              << __FILE__ << ":" << __FUNCTION__ << ":"                 \
-              << __LINE__ << ": " << input                              \
-              << std::endl;                                             \
-    csds_abort(1);                                                      \
+#define csds_error(input)                                                     \
+  {                                                                           \
+    float delta_time_error = GetDeltaTime(INITIAL_TIME);                      \
+    delta_time_error /= 1000; /* into seconds */                              \
+                                                                              \
+    std::cout << std::flush;                                                  \
+    std::cerr << "[" << std::setfill('0') << std::setw(6)                     \
+              << std::setprecision(1) << delta_time_error << "] " << __FILE__ \
+              << ":" << __FUNCTION__ << ":" << __LINE__ << ": " << input      \
+              << std::endl;                                                   \
+    csds_abort(1);                                                            \
   }
 
 /**
  * @brief Macro to print a localized message with variable arguments.
  */
-#define message(input)                                                  \
-  {                                                                     \
-    float delta_time_error = GetDeltaTime(INITIAL_TIME);                \
-    delta_time_error /= 1000; /* into seconds */                        \
-    std::cout << "["<< std::setfill('0') << std::setw(6)                \
-              << std::setprecision(1) << delta_time_error << "] "       \
-              << __FUNCTION__ << ": " << input << std::endl;            \
+#define message(input)                                            \
+  {                                                               \
+    float delta_time_error = GetDeltaTime(INITIAL_TIME);          \
+    delta_time_error /= 1000; /* into seconds */                  \
+    std::cout << "[" << std::setfill('0') << std::setw(6)         \
+              << std::setprecision(1) << delta_time_error << "] " \
+              << __FUNCTION__ << ": " << input << std::endl;      \
   }
 
 #endif  // CSDS_ERROR_H
diff --git a/src/csds_fields.hpp b/src/csds_fields.hpp
index 90c737d1da05ebb0691622e6166713b8e7e990e9..2973a847c2d76977e405a7bd375aa3073cbf3c06 100644
--- a/src/csds_fields.hpp
+++ b/src/csds_fields.hpp
@@ -22,8 +22,18 @@
 #ifndef CSDS_FIELDS_H
 #define CSDS_FIELDS_H
 
+/* Configuration options */
+#include "config.hpp"
+
+/* Standard headers */
+#ifdef HAVE_PYTHON
+#include <boost/python.hpp>
+#include <boost/python/numpy.hpp>
+#endif
+
 /* Include CSDS headers */
 #include "csds_definitions.hpp"
+#include "csds_error.hpp"
 #include "csds_tools.hpp"
 
 #ifndef GEAR_CHEMISTRY_ELEMENT_COUNT
@@ -97,7 +107,7 @@ INLINE static enum field_enum field_enum_get(const char *name) {
     if (strcmp(name, fields_names[i]) == 0) return (enum field_enum)i;
   }
 
-  error("Field not found: " << name);
+  csds_error("Field not found: " << name);
 }
 
 /**
@@ -177,11 +187,10 @@ INLINE static unsigned int field_enum_get_size(enum field_enum field) {
       return GEAR_CHEMISTRY_ELEMENT_COUNT * sizeof(double);
 
     default:
-      error("The field " <<  fields_names[field] << "is not implemented");
+      csds_error("The field " << fields_names[field] << "is not implemented");
   }
 }
 
-
 struct mask_data {
   /* Number of bytes for a mask. */
   int size;
@@ -230,8 +239,9 @@ INLINE static void field_information_set_derivative(struct single_field *der,
     der->mask = data->mask;
     der->size = field_enum_get_size(der->field);
     if (der->size != data->size) {
-      error("Inconsistent size for " << fields_names[field] <<
-            " (" << der->size << " != " << data->size << ")");
+      csds_error("Inconsistent size for " << fields_names[field] << " ("
+                                          << der->size << " != " << data->size
+                                          << ")");
     }
   }
 }
@@ -255,8 +265,9 @@ INLINE static void field_information_init(struct field_information *info,
   info->field.mask = data->mask;
   info->field.size = field_enum_get_size(field);
   if (info->field.size != data->size) {
-    error("Inconsistent size for " << fields_names[field] << " ("
-          << info->field.size << " != " << data->size << ")");
+    csds_error("Inconsistent size for " << fields_names[field] << " ("
+                                        << info->field.size
+                                        << " != " << data->size << ")");
   }
   info->field.position = -1;
 
@@ -306,4 +317,105 @@ INLINE static void field_information_set_positions(
   }
 }
 
+#ifdef HAVE_PYTHON
+#define PYTHON_SET_FIELD(name, element_size, format) \
+  {                                                  \
+    names.append(name);                              \
+    formats.append(format);                          \
+    offsets.append(current_offset);                  \
+    current_offset += element_size;                  \
+  }
+
+namespace bp = boost::python;
+/**
+ * @brief Set a #csds_python_field for the field.
+ */
+INLINE static bp::numpy::dtype field_enum_get_python_dtype(
+    enum field_enum field) {
+
+  /* Initialize */
+  bp::list formats;
+  bp::list names;
+  bp::list offsets;
+  int current_offset = 0;
+
+  switch (field) {
+    case field_enum_coordinates:
+      formats.append("3f8");
+      break;
+
+    case field_enum_velocities:
+    case field_enum_accelerations:
+      formats.append("3f4");
+      break;
+
+    case field_enum_masses:
+    case field_enum_smoothing_lengths:
+    case field_enum_internal_energies:
+    case field_enum_entropies:
+    case field_enum_birth_time:
+    case field_enum_densities:
+      formats.append("f4");
+      break;
+
+    case field_enum_particles_ids:
+      formats.append("q");
+      break;
+
+    case field_enum_sphenix_secondary_fields: {
+      PYTHON_SET_FIELD("Entropies", sizeof(float), "f4");
+      PYTHON_SET_FIELD("Pressure", sizeof(float), "f4");
+      PYTHON_SET_FIELD("ViscosityParameters", sizeof(float), "f4");
+      PYTHON_SET_FIELD("DiffusionParameters", sizeof(float), "f4");
+      PYTHON_SET_FIELD("LaplacianInternalEnergies", sizeof(float), "f4");
+      PYTHON_SET_FIELD("VelocityDivergences", sizeof(float), "f4");
+      PYTHON_SET_FIELD("VelocityDivergenceTimeDifferentials", sizeof(float),
+                       "f4");
+      break;
+    }
+
+    case field_enum_gear_star_formation: {
+      PYTHON_SET_FIELD("BirthDensities", sizeof(float), "f4");
+      PYTHON_SET_FIELD("BirthMasses", sizeof(float), "f4");
+      PYTHON_SET_FIELD("ProgenitorIDs", sizeof(long long), "q");
+      break;
+    }
+
+    case field_enum_gear_chemistry_part: {
+      PYTHON_SET_FIELD("SmoothedMetalMassFractions",
+                       GEAR_CHEMISTRY_ELEMENT_COUNT * sizeof(double),
+                       std::to_string(GEAR_CHEMISTRY_ELEMENT_COUNT) + "f8");
+      PYTHON_SET_FIELD("MetalMassFractions",
+                       GEAR_CHEMISTRY_ELEMENT_COUNT * sizeof(double),
+                       std::to_string(GEAR_CHEMISTRY_ELEMENT_COUNT) + "f8");
+      break;
+    }
+
+    case field_enum_gear_chemistry_spart:
+      formats.append(std::to_string(GEAR_CHEMISTRY_ELEMENT_COUNT) + "f8");
+      break;
+
+    default:
+      csds_error("The field " << fields_names[field] << " is not implemented");
+  }
+
+  /* Creates the dtype */
+  if (len(formats) == 1) {
+    /* Here we store only a single field */
+    return bp::numpy::dtype(formats[0]);
+  } else {
+    /* Here we store multiple fields*/
+    bp::dict out;
+    if (len(names) != 0) {
+      out["names"] = names;
+    }
+    if (len(offsets) != 0) {
+      out["offsets"] = offsets;
+    }
+    out["formats"] = formats;
+    return bp::numpy::dtype(out);
+  }
+}
+#endif
+
 #endif  // CSDS_FIELDS_H
diff --git a/src/csds_hashmap.cpp b/src/csds_hashmap.cpp
index b6c8b3e3c0191595260bacd7a8205202012097dd..a99fcd80a03d85e2c5bc589e039f01072a85ac66 100644
--- a/src/csds_hashmap.cpp
+++ b/src/csds_hashmap.cpp
@@ -155,12 +155,12 @@ static int resize(struct csds_hashmap *map, size_t new_cap) {
  */
 void *csds_hashmap_set(struct csds_hashmap *map, struct index_data *item) {
   if (!item) {
-    error("item is null");
+    csds_error("item is null");
   }
   /* Increase the size of the map if needed */
   if (map->count == map->growat) {
     if (!resize(map, map->nbuckets * 2)) {
-      error("Failed to reallocate memory");
+      csds_error("Failed to reallocate memory");
     }
   }
 
@@ -294,14 +294,14 @@ void csds_hashmap_write(struct csds_hashmap *map, ofstream &f) {
     /* Do only the non-empty buckets */
     if (bucket->dib) {
       count += 1;
-      f.write((const char *) bucket_item(bucket), sizeof(struct index_data));
+      f.write((const char *)bucket_item(bucket), sizeof(struct index_data));
     }
   }
 
   /* Ensure that the correct number of elements
    * have been written. */
   if (count != map->count) {
-    error("Written a wrong number of elements.");
+    csds_error("Written a wrong number of elements.");
   }
 }
 
diff --git a/src/csds_hashmap.hpp b/src/csds_hashmap.hpp
index 1b8a4b704458998d0bd8f8d2edc781004100887f..b766dd2d2c4d4a5216c577e7c567836fc097c080 100644
--- a/src/csds_hashmap.hpp
+++ b/src/csds_hashmap.hpp
@@ -9,8 +9,8 @@
 #ifndef CSDS_HASHMAP_H
 #define CSDS_HASHMAP_H
 
-#include <iostream>
 #include <fstream>
+#include <iostream>
 #include <stddef.h>
 #include <stdint.h>
 #include <stdio.h>
diff --git a/src/csds_header.cpp b/src/csds_header.cpp
index 693f9bb6c1335644489535adbfab4be2a31a1d27..162f4c79514b21988d9d17bdcd310e687c9d928e 100644
--- a/src/csds_header.cpp
+++ b/src/csds_header.cpp
@@ -23,10 +23,10 @@
 #include "csds_reader.hpp"
 #include "csds_tools.hpp"
 
+#include <iomanip>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <iomanip>
 
 /* Name of each offset direction. */
 const char *csds_offset_name[csds_offset_count] = {
@@ -53,19 +53,22 @@ void header_print(const struct header *h) {
       /* Do the field */
       struct field_information *field = &h->fields[type][k];
       const char *name = field_enum_get_name(field->field.field);
-      message(std::setw(15) << name << ":\t mask=" << field->field.mask
+      message(std::setfill(' ')
+              << std::setw(15) << name << ":\t mask=" << field->field.mask
               << "\t size=" << field->field.size);
 
       /* Do the first derivative */
       if (field->first.field != field_enum_none) {
         const char *name_first = field_enum_get_name(field->first.field);
-        message(std::setw(30) << "First derivative: " << name_first);
+        message(std::setfill(' ')
+                << std::setw(30) << "First derivative: " << name_first);
       }
 
       /* Do the second derivative */
       if (field->second.field != field_enum_none) {
         const char *name_second = field_enum_get_name(field->second.field);
-        message(std::setw(30) << "Second derivative: " << name_second);
+        message(std::setfill(' ')
+                << std::setw(30) << "Second derivative: " << name_second);
       }
     }
   }
@@ -99,7 +102,7 @@ void header_change_offset_direction(struct header *h,
   /* Skip file format and version numbers. */
   size_t offset = CSDS_STRING_SIZE + 2 * sizeof(int);
 
-  csds_mapped_file_write_data(&h->log->log, offset, sizeof(unsigned int),
+  csds_mapped_file_write_data(h->log->log, offset, sizeof(unsigned int),
                               &new_value);
 }
 
@@ -120,74 +123,73 @@ void header_read(struct header *h, struct csds_logfile *log, int verbose) {
 
   /* read the file format. */
   char file_format[CSDS_STRING_SIZE];
-  offset = csds_mapped_file_read_data(&log->log, offset, CSDS_STRING_SIZE,
+  offset = csds_mapped_file_read_data(log->log, offset, CSDS_STRING_SIZE,
                                       &file_format);
   if (strcmp(file_format, CSDS_FORMAT_STRING))
-    error("Wrong file format: " << file_format);
+    csds_error("Wrong file format: " << file_format);
 
   /* Read the major version number. */
-  offset = csds_mapped_file_read_data(&log->log, offset, sizeof(int),
+  offset = csds_mapped_file_read_data(log->log, offset, sizeof(int),
                                       &h->major_version);
 
   /* Read the minor version number. */
-  offset = csds_mapped_file_read_data(&log->log, offset, sizeof(int),
+  offset = csds_mapped_file_read_data(log->log, offset, sizeof(int),
                                       &h->minor_version);
 
   /* Check the mask size */
   if (sizeof(mask_type) != CSDS_MASK_SIZE)
-    error("The mask sizes are not consistent (" <<
-          sizeof(mask_type) << " != " << CSDS_MASK_SIZE
-          << ")");
+    csds_error("The mask sizes are not consistent ("
+               << sizeof(mask_type) << " != " << CSDS_MASK_SIZE << ")");
 
   if (verbose > 0)
     message("File version " << h->major_version << "." << h->minor_version);
 
   /* Read the offset directions. */
-  offset = csds_mapped_file_read_data(&log->log, offset, sizeof(int),
+  offset = csds_mapped_file_read_data(log->log, offset, sizeof(int),
                                       &h->offset_direction);
 
   if (!header_is_forward(h) && !header_is_backward(h) &&
       !header_is_corrupted(h))
-    error("Wrong offset value in the header:" << h->offset_direction);
+    csds_error("Wrong offset value in the header:" << h->offset_direction);
 
   /* Read offset to first record. */
   h->offset_first_record = 0;
-  offset = csds_mapped_file_read_data(&log->log, offset, CSDS_OFFSET_SIZE,
+  offset = csds_mapped_file_read_data(log->log, offset, CSDS_OFFSET_SIZE,
                                       &h->offset_first_record);
 
   /* Read the size of the strings. */
   unsigned int string_length = 0;
-  offset = csds_mapped_file_read_data(&log->log, offset, sizeof(unsigned int),
+  offset = csds_mapped_file_read_data(log->log, offset, sizeof(unsigned int),
                                       &string_length);
 
   /* Check if value defined in this file is large enough. */
   if (CSDS_STRING_SIZE < string_length) {
-    error("Name too large in log file" << string_length);
+    csds_error("Name too large in log file" << string_length);
   }
 
   /* Read the number of masks. */
   unsigned int masks_count = 0;
-  offset = csds_mapped_file_read_data(&log->log, offset, sizeof(unsigned int),
+  offset = csds_mapped_file_read_data(log->log, offset, sizeof(unsigned int),
                                       &masks_count);
 
   /* Allocate the masks memory. */
   struct mask_data *masks =
       (struct mask_data *)malloc(sizeof(struct mask_data) * masks_count);
   if (masks == NULL) {
-    error("Failed to allocate the memory for the masks.");
+    csds_error("Failed to allocate the memory for the masks.");
   }
 
   /* Loop over all masks. */
   for (unsigned int i = 0; i < masks_count; i++) {
     /* Read the mask name. */
-    offset = csds_mapped_file_read_data(&log->log, offset, string_length,
+    offset = csds_mapped_file_read_data(log->log, offset, string_length,
                                         masks[i].name);
 
     /* Set the mask value. */
     masks[i].mask = 1 << i;
 
     /* Read the mask data size. */
-    offset = csds_mapped_file_read_data(&log->log, offset, sizeof(unsigned int),
+    offset = csds_mapped_file_read_data(log->log, offset, sizeof(unsigned int),
                                         &masks[i].size);
 
     /* Print the information. */
@@ -199,41 +201,42 @@ void header_read(struct header *h, struct csds_logfile *log, int verbose) {
   /* Check that the special flag is at the expected place. */
   const int special_flag = 0;
   if (strcmp(masks[special_flag].name, CSDS_SPECIAL_FLAGS_NAME) != 0) {
-    error("The special flag should be the first mask: "
-          << masks[special_flag].name);
+    csds_error("The special flag should be the first mask: "
+               << masks[special_flag].name);
   }
 
   /* Check the special flag mask */
   if (masks[special_flag].mask != CSDS_SPECIAL_FLAGS_MASK) {
-    error("The mask of the special flag should be "
-          << CSDS_SPECIAL_FLAGS_MASK);
+    csds_error("The mask of the special flag should be "
+               << CSDS_SPECIAL_FLAGS_MASK);
   }
 
   /* Check the size of the special flag. */
   if (masks[special_flag].size != CSDS_SPECIAL_FLAGS_SIZE) {
-    error("The special flag is expected to have the following size: "
-          << CSDS_SPECIAL_FLAGS_SIZE);
+    csds_error("The special flag is expected to have the following size: "
+               << CSDS_SPECIAL_FLAGS_SIZE);
   }
 
   /* Check that the timestamp is at the expected place */
   const int timestamp = 1;
   if (strcmp(masks[timestamp].name, CSDS_TIMESTAMP_NAME) != 0) {
-    error("The time stamp should be the second mask");
+    csds_error("The time stamp should be the second mask");
   }
 
   /* Check the timestamp mask */
   if (masks[timestamp].mask != CSDS_TIMESTAMP_MASK) {
-    error("The mask of the timestamp should be: " << CSDS_TIMESTAMP_MASK);
+    csds_error("The mask of the timestamp should be: " << CSDS_TIMESTAMP_MASK);
   }
 
   /* Check the size of the timestamp. */
   if (masks[timestamp].size != CSDS_TIMESTAMP_SIZE) {
-    error("The timestamp is expected to be an integertime_t and a double.");
+    csds_error(
+        "The timestamp is expected to be an integertime_t and a double.");
   }
 
   /* Read the number of fields per particle  */
   offset = csds_mapped_file_read_data(
-      &log->log, offset, sizeof(h->number_fields), h->number_fields);
+      log->log, offset, sizeof(h->number_fields), h->number_fields);
 
   /* Read the order of the fields */
   for (int type = 0; type < csds_type_count; type++) {
@@ -245,7 +248,7 @@ void header_read(struct header *h, struct csds_logfile *log, int verbose) {
     /* Allocate and read the order */
     size_t size = h->number_fields[type] * sizeof(int);
     int *order = (int *)malloc(size);
-    offset = csds_mapped_file_read_data(&log->log, offset, size, order);
+    offset = csds_mapped_file_read_data(log->log, offset, size, order);
 
     /* Add the special flag */
     size = (h->number_fields[type] + 1) * sizeof(struct field_information);
@@ -275,8 +278,8 @@ void header_read(struct header *h, struct csds_logfile *log, int verbose) {
 
   /* Check the logfile header's size. */
   if (offset != h->offset_first_record) {
-    error("Wrong header size (in header " << h->offset_first_record
-          << ", current" << offset << ")");
+    csds_error("Wrong header size (in header " << h->offset_first_record
+                                               << ", current" << offset << ")");
   }
 
   /* Cleanup the memory */
@@ -319,7 +322,7 @@ size_t header_get_record_size_from_mask(const struct header *h,
 
   /* Ensure that we found all the masks */
   if (mask != 0) {
-    error("A mask was not found");
+    csds_error("A mask was not found");
   }
   return count;
 }
@@ -348,9 +351,8 @@ const struct field_information *header_get_field_from_name(
 
   /* Check if we obtained a field. */
   if (info == NULL) {
-    error(
-          "The logfile does not contain the field "
-          << name << " for particle type " << part_type);
+    csds_error("The logfile does not contain the field "
+               << name << " for particle type " << part_type);
   }
 
   return info;
diff --git a/src/csds_index.cpp b/src/csds_index.cpp
index 1a95020f6e8f8949505045924225324732bf2db6..881b62b0613ac6d301586c47f6134615c2fe9b3e 100644
--- a/src/csds_index.cpp
+++ b/src/csds_index.cpp
@@ -56,8 +56,8 @@
  * @param filename The filename of the index file.
  * @param verbose The verbose level
  */
-void csds_index_read_header(struct csds_index *index, const std::string filename,
-                            int verbose) {
+void csds_index_read_header(struct csds_index *index,
+                            const std::string filename, int verbose) {
 
   /* Open the file */
   if (verbose > 1) {
@@ -209,7 +209,7 @@ struct index_data *csds_index_get_removed_history(struct csds_index *index,
 int csds_index_contains_time_array(struct csds_index *index) {
   /* Only the first index file should have a time array */
   if (index->integer_time != 0) {
-    error("Only the first index file can have a time array.");
+    csds_error("Only the first index file can have a time array.");
   }
 
   /* Get the file size */
@@ -234,7 +234,7 @@ void csds_index_map_file(struct csds_index *index, const std::string filename,
                          int sorted, int verbose) {
   /* Un-map previous file */
   if (index->index.map != NULL) {
-    error("Trying to remap.");
+    csds_error("Trying to remap.");
   }
 
   /* Check if need to sort the file */
@@ -243,7 +243,7 @@ void csds_index_map_file(struct csds_index *index, const std::string filename,
       message("Sorting the index file.");
     }
     /* Map the index file */
-    csds_mapped_file_mmap_file(&index->index, filename,
+    csds_mapped_file_mmap_file(index->index, filename,
                                /* read_only */ 0, /* track_mmap */ 0);
     /* Sort the file */
     for (int i = 0; i < csds_type_count; i++) {
@@ -265,7 +265,7 @@ void csds_index_map_file(struct csds_index *index, const std::string filename,
   }
 
   /* Map the index file */
-  csds_mapped_file_mmap_file(&index->index, filename,
+  csds_mapped_file_mmap_file(index->index, filename,
                              /* read_only */ 1, /* track_mmap */ 0);
 }
 
@@ -276,9 +276,9 @@ void csds_index_map_file(struct csds_index *index, const std::string filename,
  */
 void csds_index_free(struct csds_index *index) {
   if (index->index.map == NULL) {
-    error("Trying to unmap an unexisting map");
+    csds_error("Trying to unmap an unexisting map");
   }
-  csds_mapped_file_munmap_file(&index->index);
+  csds_mapped_file_munmap_file(index->index);
 }
 
 /**
@@ -340,11 +340,11 @@ int csds_index_get_offset(struct csds_index *index, int64_t id, int type,
 
   /* Ensure that the file is sorted according to the ids. */
   if (!index->is_sorted) {
-    error("The index file should be already sorted.");
+    csds_error("The index file should be already sorted.");
   }
   /* Ensure that we have some particles */
   if (index->nparts[type] == 0) {
-    error("Trying to find a particle in an empty index.");
+    csds_error("Trying to find a particle in an empty index.");
   }
 
   /* Get the data structure. */
@@ -371,7 +371,7 @@ int csds_index_get_offset(struct csds_index *index, int64_t id, int type,
 
   /* Ensure that we got the correct id */
   if (left == right && data[left].id != id) {
-    error("Failed to obtain the correct id");
+    csds_error("Failed to obtain the correct id");
   }
 
   /* Return the correct value if found. */
diff --git a/src/csds_index.hpp b/src/csds_index.hpp
index 0372a26de4f0871c1c99b7aa1b4097bc8749d493..394746f47bcbdd3a29e7edd21cbdfd60534c4251 100644
--- a/src/csds_index.hpp
+++ b/src/csds_index.hpp
@@ -63,8 +63,8 @@ struct csds_index {
 int csds_index_contains_time_array(struct csds_index *index);
 void csds_index_write_sorted(struct csds_index *index);
 void csds_index_init(struct csds_index *index);
-void csds_index_read_header(struct csds_index *index, const std::string filename,
-                            int verbose);
+void csds_index_read_header(struct csds_index *index,
+                            const std::string filename, int verbose);
 void csds_index_map_file(struct csds_index *index, const std::string filename,
                          int sorted, int verbose);
 size_t csds_index_get_particle_offset(struct csds_index *index, long long id,
diff --git a/src/csds_interpolation.hpp b/src/csds_interpolation.hpp
index caaf75684f81eb160fc09daf194584cbaa8de6d9..86a5b4275f03bc89882eefad4081c72415731618 100644
--- a/src/csds_interpolation.hpp
+++ b/src/csds_interpolation.hpp
@@ -240,11 +240,9 @@ INLINE static void interpolate_quintic_double_float_ND(
 #ifdef CSDS_DEBUG_CHECKS
       /* Check if the periodic box is correctly applied */
       if (x[i] >= params->box_size[i] || x[i] < 0) {
-        error(
-              "A particle is outside the periodic box " <<
-              params->box_size[i] << ": before=" <<
-              x0 << ", after=" << x1 << ", interpolation="
-              << x[i]);
+        csds_error("A particle is outside the periodic box "
+                   << params->box_size[i] << ": before=" << x0
+                   << ", after=" << x1 << ", interpolation=" << x[i]);
       }
 #endif
     }
@@ -275,7 +273,7 @@ INLINE static void interpolate_cubic_float_ND(
 #ifdef CSDS_DEBUG_CHECKS
   /* The position is expected to be a double => error with periodic */
   if (periodic) {
-    error("Not implemented yet");
+    csds_error("Not implemented yet");
   }
 #endif
 
@@ -335,7 +333,7 @@ INLINE static void interpolate_linear_float_ND(
 #ifdef CSDS_DEBUG_CHECKS
   /* The position is expected to be a double => error with periodic */
   if (periodic) {
-    error("Not implemented yet");
+    csds_error("Not implemented yet");
   }
 #endif
 
@@ -377,7 +375,7 @@ INLINE static void interpolate_linear_double_ND(
   /* The velocity and acceleration are supposed to be provided => quintic =>
    * error here */
   if (periodic) {
-    error("Not implemented yet");
+    csds_error("Not implemented yet");
   }
 #endif
 
@@ -415,7 +413,7 @@ INLINE static void interpolate_linear_float(
 #ifdef CSDS_DEBUG_CHECKS
   /* The position is expected to be a double => error with periodic */
   if (periodic) {
-    error("Not implemented yet");
+    csds_error("Not implemented yet");
   }
 #endif
 
diff --git a/src/csds_logfile.cpp b/src/csds_logfile.cpp
index b5b74f6c3c5a03322b9eb4df59c9c10b188f7695..6e5af7874f58cd21383b8b2bb358502c4037d2c1 100644
--- a/src/csds_logfile.cpp
+++ b/src/csds_logfile.cpp
@@ -32,19 +32,19 @@ using namespace std;
  * @param verbose The verbose level.
  * @param only_header Read only the header.
  */
-void csds_logfile_init_from_file(struct csds_logfile *log, const string basename,
-                                 int only_header, int verbose) {
+void csds_logfile_init_from_file(struct csds_logfile *log,
+                                 const string basename, int only_header,
+                                 int verbose) {
 
   /* Set pointers to zero. */
   time_array_init(&log->times, CSDS_TIME_INIT_SIZE);
 
   /* Generate the logfile filename */
-  string logfile_name;
-  logfile_name = basename + ".dump";
+  string logfile_name = basename + ".dump";
 
   /* Open file, map it and get its size. */
   if (verbose > 1) message("Mapping the log file.");
-  csds_mapped_file_mmap_file(&log->log, logfile_name,
+  csds_mapped_file_mmap_file(log->log, logfile_name,
                              /* read_only */ 1, /* track_mmap */ 1);
 
   /* Read the header. */
@@ -62,7 +62,7 @@ void csds_logfile_init_from_file(struct csds_logfile *log, const string basename
 
   /* Check if the offset are corrupted. */
   if (header_is_corrupted(&log->header)) {
-    error("The offsets have been corrupted.");
+    csds_error("The offsets have been corrupted.");
   }
 
   /* Reverse the offsets direction. */
@@ -86,7 +86,7 @@ void csds_logfile_init_from_file(struct csds_logfile *log, const string basename
  * @param log The #csds_logfile.
  */
 void csds_logfile_free(struct csds_logfile *log) {
-  csds_mapped_file_munmap_file(&log->log);
+  csds_mapped_file_munmap_file(log->log);
 
   time_array_free(&log->times);
 
@@ -94,7 +94,8 @@ void csds_logfile_free(struct csds_logfile *log) {
 }
 
 /**
- * @brief debugging function checking the offset and the mask of all the records.
+ * @brief debugging function checking the offset and the mask of all the
+ * records.
  *
  * Compare the mask with the one pointed by the header.
  * if the record is a particle, check the id too.
@@ -102,8 +103,8 @@ void csds_logfile_free(struct csds_logfile *log) {
  * @param log The #csds_logfile
  * @param verbose The verbose level
  */
-void csds_logfile_check_record_consistency(
-    ATTR_UNUSED struct csds_logfile *log, ATTR_UNUSED int verbose) {
+void csds_logfile_check_record_consistency(ATTR_UNUSED struct csds_logfile *log,
+                                           ATTR_UNUSED int verbose) {
 #ifdef CSDS_DEBUG_CHECKS
   struct header *header = &log->header;
 
@@ -139,7 +140,8 @@ void csds_logfile_check_record_consistency(
   }
 
 #else
-  error("This function should not be called outside of the debugging checks");
+  csds_error(
+      "This function should not be called outside of the debugging checks");
 #endif
 }
 
@@ -153,8 +155,8 @@ void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename,
                                  int verbose) {
 
   /* Close and reopen the file in write mode. */
-  csds_mapped_file_munmap_file(&log->log);
-  csds_mapped_file_mmap_file(&log->log, filename,
+  csds_mapped_file_munmap_file(log->log);
+  csds_mapped_file_mmap_file(log->log, filename,
                              /* read_only */ 0, /* track_mmap */ 0);
 
   /* Get pointers */
@@ -162,7 +164,7 @@ void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename,
 
   /* Check if the offsets need to be reversed. */
   if (!header_is_backward(header)) {
-    error("The offsets are already reversed.");
+    csds_error("The offsets are already reversed.");
   }
 
 #ifdef CSDS_DEBUG_CHECKS
@@ -183,10 +185,9 @@ void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename,
   const float delta_percentage = 0.2;
   const high_resolution_clock::time_point init = high_resolution_clock::now();
 
-
   /* reverse the record's offset. */
   for (size_t offset = header->offset_first_record; offset < log->log.mmap_size;
-       offset = tools_reverse_offset(header, &log->log, offset)) {
+       offset = tools_reverse_offset(header, log->log, offset)) {
     /* Check if we should update the progress. */
     float current = 100 * ((float)offset) / log->log.mmap_size;
     if (verbose > 0 && current > next_percentage) {
@@ -216,8 +217,8 @@ void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename,
 #endif
 
   /* Close and reopen the file in read mode. */
-  csds_mapped_file_munmap_file(&log->log);
-  csds_mapped_file_mmap_file(&log->log, filename,
+  csds_mapped_file_munmap_file(log->log);
+  csds_mapped_file_mmap_file(log->log, filename,
                              /* read_only */ 1,
                              /* track_mmap */ 1);
 }
diff --git a/src/csds_logfile.hpp b/src/csds_logfile.hpp
index 72ed4be607639191e96e332214d296d7276193e6..6191d0daed37a2d42d9f7f42ec5f70ebccee5648 100644
--- a/src/csds_logfile.hpp
+++ b/src/csds_logfile.hpp
@@ -49,9 +49,11 @@ struct csds_logfile {
   struct mapped_file log;
 };
 
-void csds_logfile_init_from_file(struct csds_logfile *log, const std::string basename,
-                                 int only_header, int verbose);
-void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename, int verbose);
+void csds_logfile_init_from_file(struct csds_logfile *log,
+                                 const std::string basename, int only_header,
+                                 int verbose);
+void csds_logfile_reverse_offset(struct csds_logfile *log, std::string filename,
+                                 int verbose);
 void csds_logfile_free(struct csds_logfile *log);
 
 #endif  // CSDS_LOGFILE_H
diff --git a/src/csds_mapped_file.cpp b/src/csds_mapped_file.cpp
index e678999a9e61527fa12fe0d33bb19dde21106271..9aa067aab0d2c1ad51c5c9782487dfd4d9824a4d 100644
--- a/src/csds_mapped_file.cpp
+++ b/src/csds_mapped_file.cpp
@@ -42,8 +42,7 @@ using namespace std;
 size_t csds_loader_io_get_file_size(int fd) {
   struct stat s;
   int status = fstat(fd, &s);
-  if (status != 0)
-    error("Unable to get file size: " << strerror(errno));
+  if (status != 0) csds_error("Unable to get file size: " << strerror(errno));
   return s.st_size;
 }
 
@@ -66,7 +65,7 @@ string strip_ext(string fname) {
  * @param track_mmap Should we track the memory reading?
  *
  */
-void csds_mapped_file_mmap_file(struct mapped_file *map, const string filename,
+void csds_mapped_file_mmap_file(struct mapped_file &map, const string filename,
                                 int read_only, ATTR_UNUSED int track_mmap) {
   /* open the file. */
   int fd;
@@ -77,29 +76,29 @@ void csds_mapped_file_mmap_file(struct mapped_file *map, const string filename,
     fd = open(filename.data(), O_RDWR);
 
   if (fd == -1)
-    error("Unable to open file: " << filename << " ("
-          << strerror(errno) << ")");
+    csds_error("Unable to open file: " << filename << " (" << strerror(errno)
+                                       << ")");
 
   /* get the file size. */
-  map->mmap_size = csds_loader_io_get_file_size(fd);
+  map.mmap_size = csds_loader_io_get_file_size(fd);
 
   /* map the memory. */
   int mode = PROT_READ;
   if (!read_only) mode |= PROT_WRITE;
 
-  map->map = (char *)mmap(NULL, map->mmap_size, mode, MAP_SHARED, fd, 0);
-  if (map->map == MAP_FAILED)
-    error("Failed to allocate map of size " << map->mmap_size
-          << " bytes (" << strerror(errno) << ")");
+  map.map = (char *)mmap(NULL, map.mmap_size, mode, MAP_SHARED, fd, 0);
+  if (map.map == MAP_FAILED)
+    csds_error("Failed to allocate map of size " << map.mmap_size << " bytes ("
+                                                 << strerror(errno) << ")");
 
   /* Close the file. */
   close(fd);
 
 #ifdef CSDS_MMAP_TRACKING
   /* Now deal with the mmap tracking */
-  map->tracking.f = NULL;
-  map->tracking.number_elements = 0;
-  map->tracking.size = 0;
+  map.tracking.f = NULL;
+  map.tracking.number_elements = 0;
+  map.tracking.size = 0;
 
   /* Nothing to do without tracking */
   if (!track_mmap) return;
@@ -109,14 +108,14 @@ void csds_mapped_file_mmap_file(struct mapped_file *map, const string filename,
   tracking_filename = strip_ext(tracking_filename);
   tracking_filename += ".track";
 
-  map->tracking.f = fopen(tracking_filename, "w");
-  if (map->tracking.f == NULL)
-    error("Unable to open file " << tracking_filename
-          << " (" << strerror(errno) << ")");
+  map.tracking.f = fopen(tracking_filename, "w");
+  if (map.tracking.f == NULL)
+    csds_error("Unable to open file " << tracking_filename << " ("
+                                      << strerror(errno) << ")");
 
   /* Write the header */
-  fprintf(map->tracking.f, "%20s", "CSDS_TRACK");
-  fwrite(&map->tracking.number_elements, 1, sizeof(int64_t), map->tracking.f);
+  fprintf(map.tracking.f, "%20s", "CSDS_TRACK");
+  fwrite(&map.tracking.number_elements, 1, sizeof(int64_t), map.tracking.f);
 #endif
 }
 
@@ -126,23 +125,23 @@ void csds_mapped_file_mmap_file(struct mapped_file *map, const string filename,
  * @param map The #mapped_file.
  *
  */
-void csds_mapped_file_munmap_file(struct mapped_file *map) {
+void csds_mapped_file_munmap_file(struct mapped_file &map) {
   /* unmap the file. */
-  if (munmap(map->map, map->mmap_size) != 0) {
-    error("Unable to unmap the file :" << strerror(errno));
+  if (munmap(map.map, map.mmap_size) != 0) {
+    csds_error("Unable to unmap the file :" << strerror(errno));
   }
 
   /* Reset values */
-  map->map = NULL;
-  map->mmap_size = 0;
+  map.map = NULL;
+  map.mmap_size = 0;
 
 #ifdef CSDS_MMAP_TRACKING
   /* Now deal with tracking */
-  if (map->tracking.f == NULL) return;
+  if (map.tracking.f == NULL) return;
 
-  fclose(map->tracking.f);
+  fclose(map.tracking.f);
 
   /* Reset the values */
-  map->tracking.f = NULL;
+  map.tracking.f = NULL;
 #endif
 }
diff --git a/src/csds_mapped_file.hpp b/src/csds_mapped_file.hpp
index a3f1b556899fe415cf202a54c7cc130ad5634f60..dbc879174d4f9cec8e8fc99f9f9107408d7ce690 100644
--- a/src/csds_mapped_file.hpp
+++ b/src/csds_mapped_file.hpp
@@ -79,37 +79,38 @@ INLINE static void mmap_tracking_element_init(struct mmap_tracking_element *el,
 
 /** @brief Write a tracking element into the file */
 INLINE static void mmap_tracking_element_write(struct mmap_tracking_element *el,
-                                               struct mapped_file *file) {
+                                               struct mapped_file &file) {
 #pragma omp critical
   {
     /* Stop when file becomes too huge */
-    if (file->tracking.size < CSDS_TRACKING_SIZE_BYTES) {
+    if (file.tracking.size < CSDS_TRACKING_SIZE_BYTES) {
 
       /* Write the element */
-      fwrite(el, 1, sizeof(struct mmap_tracking_element), file->tracking.f);
+      fwrite(el, 1, sizeof(struct mmap_tracking_element), file.tracking.f);
 
       /* Update the counter */
-      file->tracking.number_elements++;
+      file.tracking.number_elements++;
 
       /* Write it back to the file */
-      const long int pos = ftell(file->tracking.f);
+      const long int pos = ftell(file.tracking.f);
       /* See csds_mapped_file_mmap_file for the value */
-      fseek(file->tracking.f, 20, SEEK_SET);
-      fwrite(&file->tracking.number_elements, 1,
-             sizeof(file->tracking.number_elements), file->tracking.f);
-      fseek(file->tracking.f, pos, SEEK_SET);
+      fseek(file.tracking.f, 20, SEEK_SET);
+      fwrite(&file.tracking.number_elements, 1,
+             sizeof(file.tracking.number_elements), file.tracking.f);
+      fseek(file.tracking.f, pos, SEEK_SET);
 
       /* Update the file size */
-      file->tracking.size += sizeof(struct mmap_tracking_element);
+      file.tracking.size += sizeof(struct mmap_tracking_element);
     }
   }
 }
 #endif
 
 size_t csds_loader_io_get_file_size(int fd);
-void csds_mapped_file_mmap_file(struct mapped_file *map, const std::string filename,
-                                int read_only, int track_mmap);
-void csds_mapped_file_munmap_file(struct mapped_file *map);
+void csds_mapped_file_mmap_file(struct mapped_file &map,
+                                const std::string filename, int read_only,
+                                int track_mmap);
+void csds_mapped_file_munmap_file(struct mapped_file &map);
 
 /**
  * @brief read a mask with its offset.
@@ -122,7 +123,7 @@ void csds_mapped_file_munmap_file(struct mapped_file *map);
  *
  * @return The offset after the record header.
  */
-INLINE static size_t csds_mapped_file_read_mask(struct mapped_file *mmap,
+INLINE static size_t csds_mapped_file_read_mask(struct mapped_file &mmap,
                                                 size_t offset, mask_type *mask,
                                                 size_t *diff_offset) {
 #ifdef CSDS_MMAP_TRACKING
@@ -133,21 +134,21 @@ INLINE static size_t csds_mapped_file_read_mask(struct mapped_file *mmap,
   /* read mask */
   if (mask) {
     *mask = 0;
-    memcpy(mask, mmap->map + offset, CSDS_MASK_SIZE);
+    memcpy(mask, mmap.map + offset, CSDS_MASK_SIZE);
   }
   offset += CSDS_MASK_SIZE;
 
   /* read offset */
   if (diff_offset) {
     *diff_offset = 0;
-    memcpy(diff_offset, mmap->map + offset, CSDS_OFFSET_SIZE);
+    memcpy(diff_offset, mmap.map + offset, CSDS_OFFSET_SIZE);
   }
   offset += CSDS_OFFSET_SIZE;
 
 #ifdef CSDS_MMAP_TRACKING
   /* Write the result into the file */
   el.ticks = getticks() - el.ticks;
-  if (mmap->tracking.f) {
+  if (mmap.tracking.f) {
     mmap_tracking_element_write(&el, mmap);
   }
 #endif
@@ -165,7 +166,7 @@ INLINE static size_t csds_mapped_file_read_mask(struct mapped_file *mmap,
 
  * @return The offset after the data read.
  */
-INLINE static size_t csds_mapped_file_read_data(struct mapped_file *mmap,
+INLINE static size_t csds_mapped_file_read_data(struct mapped_file &mmap,
                                                 size_t offset,
                                                 const size_t size, void *p) {
 #ifdef CSDS_MMAP_TRACKING
@@ -173,12 +174,12 @@ INLINE static size_t csds_mapped_file_read_data(struct mapped_file *mmap,
   mmap_tracking_element_init(&el, offset);
 #endif
 
-  memcpy(p, mmap->map + offset, size);
+  memcpy(p, mmap.map + offset, size);
 
 #ifdef CSDS_MMAP_TRACKING
   /* Write the result into the file */
   el.ticks = getticks() - el.ticks;
-  if (mmap->tracking.f) {
+  if (mmap.tracking.f) {
     mmap_tracking_element_write(&el, mmap);
   }
 #endif
@@ -196,7 +197,7 @@ INLINE static size_t csds_mapped_file_read_data(struct mapped_file *mmap,
  *
  * @return memory after the data written.
  */
-INLINE static size_t csds_mapped_file_write_data(struct mapped_file *mmap,
+INLINE static size_t csds_mapped_file_write_data(struct mapped_file &mmap,
                                                  size_t offset,
                                                  const size_t size,
                                                  const void *p) {
@@ -206,12 +207,12 @@ INLINE static size_t csds_mapped_file_write_data(struct mapped_file *mmap,
   mmap_tracking_element_init(&el, offset);
 #endif
 
-  memcpy(mmap->map + offset, p, size);
+  memcpy(mmap.map + offset, p, size);
 
 #ifdef CSDS_MMAP_TRACKING
   /* Write the result into the file */
   el.ticks = getticks() - el.ticks;
-  if (mmap->tracking.f) {
+  if (mmap.tracking.f) {
     mmap_tracking_element_write(&el, mmap);
   }
 #endif
@@ -225,7 +226,7 @@ INLINE static size_t csds_mapped_file_write_data(struct mapped_file *mmap,
     /* Warn the OS that we will read in a sequential way */          \
     int test_ret = madvise(log.map, log.mmap_size, MADV_SEQUENTIAL); \
     if (test_ret != 0) {                                             \
-      error("Failed to advise the mmap");                            \
+      csds_error("Failed to advise the mmap");                       \
     }                                                                \
   }
 
@@ -234,7 +235,7 @@ INLINE static size_t csds_mapped_file_write_data(struct mapped_file *mmap,
     /* Warn the OS that we will read in a normal way */          \
     int test_ret = madvise(log.map, log.mmap_size, MADV_NORMAL); \
     if (test_ret != 0) {                                         \
-      error("Failed to advise the mmap");                        \
+      csds_error("Failed to advise the mmap");                   \
     }                                                            \
   }
 
@@ -248,7 +249,7 @@ INLINE static size_t csds_mapped_file_write_data(struct mapped_file *mmap,
     int test_ret =                                                        \
         madvise(mmap + mem_align, length + page_size, MADV_WILLNEED);     \
     if (test_ret != 0) {                                                  \
-      error("Failed to advise the mmap");                                 \
+      csds_error("Failed to advise the mmap");                            \
     }                                                                     \
   }
 
diff --git a/src/csds_parameters.cpp b/src/csds_parameters.cpp
index d2a65cc875296aefa2e779eaa855a6de78b38d26..749f871ab3af911ddfcc92456b65c78466d36244 100644
--- a/src/csds_parameters.cpp
+++ b/src/csds_parameters.cpp
@@ -35,8 +35,8 @@ using namespace std;
  * @param verbose the verbose level
  *
  */
-void csds_parameters_init(struct csds_parameters *params,
-                          const string filename, int verbose) {
+void csds_parameters_init(struct csds_parameters *params, const string filename,
+                          int verbose) {
 
   /* Initialize the parser */
   struct csds_parser csds_parser;
diff --git a/src/csds_parser.cpp b/src/csds_parser.cpp
index 98dc954d4e570e9500ee9a3a81a276e709c254b1..aa5c546a85cdb13378f35c9052dc0b3abccca526 100644
--- a/src/csds_parser.cpp
+++ b/src/csds_parser.cpp
@@ -132,7 +132,7 @@ void parser_read_file(const std::string file_name, struct csds_parser *params) {
 
   /* Check if parameter file exits. */
   if (file == NULL) {
-    error("Error opening parameter file: " << file_name);
+    csds_error("Error opening parameter file: " << file_name);
   }
 
   /* Read until the end of the file is reached.*/
@@ -196,8 +196,8 @@ static void find_duplicate_params(const struct csds_parser *params,
                                   const char *param_name) {
   for (int i = 0; i < params->paramCount; i++) {
     if (!strcmp(param_name, params->data[i].name)) {
-      error("Invalid line:" << lineNumber << " '" << param_name
-            << "', parameter is a duplicate.");
+      csds_error("Invalid line:" << lineNumber << " '" << param_name
+                                 << "', parameter is a duplicate.");
     }
   }
 }
@@ -213,8 +213,8 @@ static void find_duplicate_section(const struct csds_parser *params,
                                    const char *section_name) {
   for (int i = 0; i < params->sectionCount; i++) {
     if (!strcmp(section_name, params->section[i].name)) {
-      error("Invalid line:" << lineNumber << " '" << section_name
-            << "', parameter is a duplicate.");
+      csds_error("Invalid line:" << lineNumber << " '" << section_name
+                                 << "', parameter is a duplicate.");
     }
   }
 }
@@ -254,8 +254,7 @@ static void parse_line(char *line, struct csds_parser *params) {
       /* Check for invalid lines,not including the start and end of file. */
       else if (strcmp(trim_line, PARSER_START_OF_FILE) &&
                strcmp(trim_line, PARSER_END_OF_FILE)) {
-        error("Invalid line:" << lineNumber << " '"
-              << trim_line << "'");
+        csds_error("Invalid line:" << lineNumber << " '" << trim_line << "'");
       }
     }
   }
@@ -280,9 +279,9 @@ static void parse_value(char *line, struct csds_parser *params) {
 
   /* Check that standalone parameters have correct indentation. */
   if (!inSection && line[0] == ' ') {
-    error("Invalid line:" << lineNumber << " '" << line
-          << "', standalone parameter defined with "
-          << "incorrect indentation.");
+    csds_error("Invalid line:" << lineNumber << " '" << line
+                               << "', standalone parameter defined with "
+                               << "incorrect indentation.");
   }
 
   /* Check that it is a parameter inside a section.*/
@@ -313,7 +312,7 @@ static void parse_value(char *line, struct csds_parser *params) {
       strcpy(section, tmpSectionName);
       strcpy(params->section[params->sectionCount].name, tmpSectionName);
       if (params->sectionCount == PARSER_MAX_NO_OF_SECTIONS - 1) {
-        error(
+        csds_error(
             "Maximal number of sections in parameter file reached. Aborting !");
       } else {
         params->sectionCount++;
@@ -337,7 +336,7 @@ static void parse_value(char *line, struct csds_parser *params) {
       strcpy(params->data[params->paramCount].name, tmpStr);
       strcpy(params->data[params->paramCount].value, token);
       if (params->paramCount == PARSER_MAX_NO_OF_PARAMS - 1) {
-        error(
+        csds_error(
             "Maximal number of parameters in parameter file reached. Aborting "
             "!");
       } else {
@@ -373,8 +372,8 @@ static void parse_section_param(char *line, int *isFirstParam,
     sectionIndent = count_indentation(line);
     *isFirstParam = 0;
   } else if (count_indentation(line) != sectionIndent) {
-    error("Invalid line:" << lineNumber << " '" << line
-          << "', parameter has incorrect indentation.");
+    csds_error("Invalid line:" << lineNumber << " '" << line
+                               << "', parameter has incorrect indentation.");
   }
 
   /* Take first token as the parameter name and trim leading white space. */
@@ -395,7 +394,8 @@ static void parse_section_param(char *line, int *isFirstParam,
   strcpy(params->data[params->paramCount].name, paramName);
   strcpy(params->data[params->paramCount].value, token);
   if (params->paramCount == PARSER_MAX_NO_OF_PARAMS - 1) {
-    error("Maximal number of parameters in parameter file reached. Aborting !");
+    csds_error(
+        "Maximal number of parameters in parameter file reached. Aborting !");
   } else {
     params->paramCount++;
   }
@@ -412,17 +412,18 @@ static void parse_section_param(char *line, int *isFirstParam,
       if (strcmp(name, params->data[i].name) == 0) {                          \
         /* Check that exactly one number is parsed, capture junk. */          \
         if (sscanf(params->data[i].value, " " FMT "%s ", result, str) != 1) { \
-          error("Tried parsing " << DESC << " '" << params->data[i].name \
-                << "' but found '" << params->data[i].value             \
-                << "' with illegal trailing characters '"               \
-                << str << "'.");                                        \
-        }                                                               \
+          csds_error("Tried parsing "                                         \
+                     << DESC << " '" << params->data[i].name                  \
+                     << "' but found '" << params->data[i].value              \
+                     << "' with illegal trailing characters '" << str         \
+                     << "'.");                                                \
+        }                                                                     \
         return 1;                                                             \
       }                                                                       \
     }                                                                         \
     if (def == NULL)                                                          \
-      error("Cannot find '" << name                                     \
-            << "' in the structure, in file: " << params->fileName);    \
+      csds_error("Cannot find '" << name << "' in the structure, in file: "   \
+                                 << params->fileName);                        \
     return 0;                                                                 \
   }
 
@@ -516,7 +517,7 @@ void parser_get_param_string(struct csds_parser *params, const char *name,
     }
   }
 
-  error("Cannot find '" << name << "' in the structure.");
+  csds_error("Cannot find '" << name << "' in the structure.");
 }
 
 /**
@@ -620,58 +621,57 @@ void parser_get_opt_param_string(struct csds_parser *params, const char *name,
  * single value, so "%f" for a float and DESC the type description
  * i.e. "float".
  */
-#define PARSER_GET_ARRAY(TYPE, FMT, DESC)                             \
-  static int get_param_##TYPE##_array(struct csds_parser *params,     \
-                                      const char *name, int required, \
-                                      int nval, TYPE *values) {       \
-    char str[PARSER_MAX_LINE_SIZE];                                   \
-    char cpy[PARSER_MAX_LINE_SIZE];                                   \
-                                                                      \
-    for (int i = 0; i < params->paramCount; i++) {                    \
-      if (!strcmp(name, params->data[i].name)) {                      \
-        char *cp = cpy;                                               \
-        strcpy(cp, params->data[i].value);                            \
-        cp = trim_both(cp);                                           \
-                                                                      \
-        /* Strip off [], if present. */                               \
-        if (cp[0] == '[') cp++;                                       \
-        int l = strlen(cp);                                           \
-        if (cp[l - 1] == ']') cp[l - 1] = '\0';                       \
-        cp = trim_both(cp);                                           \
-                                                                      \
-        /* Format that captures spaces and trailing junk. */          \
-        char fmt[20];                                                 \
-        sprintf(fmt, " %s%%s ", FMT);                                 \
-                                                                      \
-        /* Parse out values which should now be "v, v, v" with        \
-         * internal     whitespace variations. */                     \
-        char *p = strtok(cp, ",");                                    \
-        for (int k = 0; k < nval; k++) {                              \
-          if (p != NULL) {                                            \
-            TYPE tmp_value;                                           \
-            if (sscanf(p, fmt, &tmp_value, str) != 1) {                 \
-              error("Tried parsing " << DESC << " '" << params->data[i].name \
-                    << "' but found '" << params->data[i].value         \
-                    << "' with illegal trailing characters '"           \
-                    << str << "'.");                                    \
-            }                                                           \
-            values[k] = tmp_value;                                    \
-          } else {                                                    \
-            error(                                                    \
-                  "Array '" << name << "' with value '"                 \
-                  << params->data[i].value                              \
-                  << "' has too few values, expected " << nval);        \
-          }                                                           \
-          if (k < nval - 1) p = strtok(NULL, ",");                    \
-        }                                                             \
-        return 1;                                                     \
-      }                                                               \
-    }                                                                 \
-    if (required)                                                     \
-      error("Cannot find '" << name                                    \
-            << "' in the structure, in file '"                          \
-            << params->fileName << "'.");                                        \
-    return 0;                                                         \
+#define PARSER_GET_ARRAY(TYPE, FMT, DESC)                                   \
+  static int get_param_##TYPE##_array(struct csds_parser *params,           \
+                                      const char *name, int required,       \
+                                      int nval, TYPE *values) {             \
+    char str[PARSER_MAX_LINE_SIZE];                                         \
+    char cpy[PARSER_MAX_LINE_SIZE];                                         \
+                                                                            \
+    for (int i = 0; i < params->paramCount; i++) {                          \
+      if (!strcmp(name, params->data[i].name)) {                            \
+        char *cp = cpy;                                                     \
+        strcpy(cp, params->data[i].value);                                  \
+        cp = trim_both(cp);                                                 \
+                                                                            \
+        /* Strip off [], if present. */                                     \
+        if (cp[0] == '[') cp++;                                             \
+        int l = strlen(cp);                                                 \
+        if (cp[l - 1] == ']') cp[l - 1] = '\0';                             \
+        cp = trim_both(cp);                                                 \
+                                                                            \
+        /* Format that captures spaces and trailing junk. */                \
+        char fmt[20];                                                       \
+        sprintf(fmt, " %s%%s ", FMT);                                       \
+                                                                            \
+        /* Parse out values which should now be "v, v, v" with              \
+         * internal     whitespace variations. */                           \
+        char *p = strtok(cp, ",");                                          \
+        for (int k = 0; k < nval; k++) {                                    \
+          if (p != NULL) {                                                  \
+            TYPE tmp_value;                                                 \
+            if (sscanf(p, fmt, &tmp_value, str) != 1) {                     \
+              csds_error("Tried parsing "                                   \
+                         << DESC << " '" << params->data[i].name            \
+                         << "' but found '" << params->data[i].value        \
+                         << "' with illegal trailing characters '" << str   \
+                         << "'.");                                          \
+            }                                                               \
+            values[k] = tmp_value;                                          \
+          } else {                                                          \
+            csds_error("Array '"                                            \
+                       << name << "' with value '" << params->data[i].value \
+                       << "' has too few values, expected " << nval);       \
+          }                                                                 \
+          if (k < nval - 1) p = strtok(NULL, ",");                          \
+        }                                                                   \
+        return 1;                                                           \
+      }                                                                     \
+    }                                                                       \
+    if (required)                                                           \
+      csds_error("Cannot find '" << name << "' in the structure, in file '" \
+                                 << params->fileName << "'.");              \
+    return 0;                                                               \
   }
 
 /* Instantiations. */
diff --git a/src/csds_part_type.cpp b/src/csds_part_type.cpp
index bf56b33e6d09eba4307b5396d48f41fa945fc348..1643a6f3bc58a22fefa00b4c0e06d6e2fcb4c4bd 100644
--- a/src/csds_part_type.cpp
+++ b/src/csds_part_type.cpp
@@ -20,5 +20,6 @@
 /* This object's header. */
 #include "csds_part_type.hpp"
 
-const char* part_type_names[csds_type_count] = {
-    "Gas", "DM", "DMBackground", "Sink", "Stars", "BH", "Neutrino"};
+const char* part_type_names[csds_type_count+1] = {
+    "gas",         "dark_matter", "dark_matter_background", "sink", "stars",
+    "black_holes", "neutrino", "count"};
diff --git a/src/csds_particle.hpp b/src/csds_particle.hpp
index fd2307a2c1bb9acf2cd2f5943788f34c5e8202ed..d757333da609dcc682984b539434ff0da5dcb192 100644
--- a/src/csds_particle.hpp
+++ b/src/csds_particle.hpp
@@ -52,7 +52,7 @@ INLINE static size_t csds_particle_read_field(
     size_t offset, void *output, const struct field_information *field_read,
     const int derivative, mask_type *mask, size_t *h_offset,
     const struct field_information *fields, int number_fields,
-    struct mapped_file *log_map) {
+    struct mapped_file &log_map) {
 
   *mask = 0;
   *h_offset = 0;
@@ -63,14 +63,14 @@ INLINE static size_t csds_particle_read_field(
 #ifdef CSDS_DEBUG_CHECKS
   /* Check if it is not a time record. */
   if (*mask == CSDS_TIMESTAMP_MASK) {
-    error("Cannot read a particle from timestep record.");
+    csds_error("Cannot read a particle from timestep record.");
   }
 
   if (derivative == 1 && field_read->first.field == field_enum_none) {
-    error("Trying to read the non existing first derivative.");
+    csds_error("Trying to read the non existing first derivative.");
   }
   if (derivative == 2 && field_read->second.field == field_enum_none) {
-    error("Trying to read the non existing second derivative.");
+    csds_error("Trying to read the non existing second derivative.");
   }
 #endif
 
@@ -82,8 +82,9 @@ INLINE static size_t csds_particle_read_field(
 
 #ifdef CSDS_DEBUG_CHECKS
   if (position < 0) {
-    error("Position not set (derivative " << derivative << " of "
-          << field_enum_get_name(field_read->field.field) << ")");
+    csds_error("Position not set (derivative "
+               << derivative << " of "
+               << field_enum_get_name(field_read->field.field) << ")");
   }
 #endif
 
@@ -131,7 +132,7 @@ INLINE static enum csds_special_flags csds_unpack_flags_and_data(
 
 #ifdef CSDS_DEBUG_CHECKS
   if (*part_type >= csds_type_count || *part_type < 0) {
-    error("Invalid particle type found:" << *part_type);
+    csds_error("Invalid particle type found:" << *part_type);
   }
 #endif
   return flag;
@@ -150,7 +151,7 @@ INLINE static enum csds_special_flags csds_unpack_flags_and_data(
  */
 INLINE static enum csds_special_flags csds_particle_read_special_flag(
     size_t offset, mask_type *mask, size_t *h_offset, int *data, int *part_type,
-    struct mapped_file *map) {
+    struct mapped_file &map) {
 
   *mask = 0;
   *h_offset = 0;
@@ -161,7 +162,7 @@ INLINE static enum csds_special_flags csds_particle_read_special_flag(
 #ifdef CSDS_DEBUG_CHECKS
   /* Check if it is not a time record. */
   if (*mask == CSDS_TIMESTAMP_MASK) {
-    error("Cannot read a particle from timestep record.");
+    csds_error("Cannot read a particle from timestep record.");
   }
 #endif
 
@@ -228,7 +229,7 @@ INLINE static void csds_particle_interpolate_field(
       /* Constant float term */
     case field_enum_birth_time:
       if (*(float *)after->field != *(float *)before->field) {
-        error("Interpolating different particles.");
+        csds_error("Interpolating different particles.");
       }
       *(float *)output = *(float *)after->field;
       break;
@@ -237,7 +238,7 @@ INLINE static void csds_particle_interpolate_field(
     case field_enum_particles_ids:
       /* Ensure to have the same particle */
       if (*(long long *)after->field != *(long long *)before->field) {
-        error("Interpolating different particles.");
+        csds_error("Interpolating different particles.");
       }
       *(long long *)output = *(long long *)after->field;
       break;
@@ -259,7 +260,7 @@ INLINE static void csds_particle_interpolate_field(
       long long *id = (long long *)(x + 2);
       *id = *(long long *)(aft + 2);
       if (*id != *(long long *)(bef + 2)) {
-        error("Interpolating different particles.");
+        csds_error("Interpolating different particles.");
       }
       break;
     }
@@ -307,8 +308,8 @@ INLINE static void csds_particle_interpolate_field(
     }
 
     default:
-      error("Field " << field_enum_get_name(field->field.field)
-            << " not implemented");
+      csds_error("Field " << field_enum_get_name(field->field.field)
+                          << " not implemented");
   }
 }
 
diff --git a/src/csds_python_wrapper.cpp b/src/csds_python_wrapper.cpp
index 08174c53ce483ae5f137f3105b6d3b1f3c91fc7b..6b106949ad17af70d7a2309e84f9032cab23b018 100644
--- a/src/csds_python_wrapper.cpp
+++ b/src/csds_python_wrapper.cpp
@@ -17,20 +17,504 @@
  *
  ******************************************************************************/
 
-// /* Standard headers */
-// #include <boost/python.hpp>
-// #include <boost/python/numpy.hpp>
+/* Standard headers */
+#include <boost/python.hpp>
+#include <boost/python/numpy.hpp>
+#include <boost/python/stl_iterator.hpp>
+#include <utility>
 
-// /* Local headers */
-// #include "csds_reader.hpp"
+/* This file's header */
+#include "csds_python_wrapper.hpp"
 
+/* Local headers */
+#include "csds_array.hpp"
+#include "csds_part_type.hpp"
+#include "csds_reader.hpp"
 
-// namespace p = boost::python;
-// namespace np = boost::python::numpy;
+#define PYTHON_STR(INPUT) bp::extract<std::string>(bp::str(INPUT))()
 
-// BOOST_PYTHON_MODULE(libcsds) {
-//   using namespace boost::python;
+BOOST_PYTHON_MODULE(libcsds) {
+  using namespace boost::python;
 
-//   Py_Initialize();
-//   np::initialize();
-// }
+  Py_Initialize();
+  np::initialize();
+
+  class_<PyReader>(
+      "Reader",
+      "Deals with a CSDS file and provides the data through"
+      " its different methods. The reader is really open with the method "
+      "__enter__"
+      "(called by `with ...``). Outside the area delimited by with, "
+      "the CSDS is not accessible.\n\n"
+      "Methods\n"
+      "-------\n\n"
+      "get_time_begin\n"
+      "    Provides the initial time of the simulation\n"
+      "get_time_end\n"
+      "    Provides the final time of the simulation\n"
+      "get_time_limits\n"
+      "    Provides the initial and final time of the simulation\n"
+      "get_data\n"
+      "    Reads some particle's data from the logfile\n\n"
+      "update_particles\n"
+      "    Update the particles in the cache and returns them\n\n"
+      "Examples\n"
+      "--------\n\n"
+      ">>> import libcsds as csds\n"
+      ">>> with csds.Reader(\"index_0000\") as reader:\n"
+      ">>>    t0, t1 = reader.get_time_begin(), reader.get_time_end()\n"
+      ">>>    fields = reader.get_list_fields()\n"
+      ">>>    if \"Coordinates\" not in fields:\n"
+      ">>>        raise Exception(\"Field Coordinates not present in the "
+      "logfile.\")\n"
+      ">>>    pos, ent = reader.get_data([\"Coordinates\", "
+      "\"Entropies\"]"
+      ", 0.5 * (t0 + t1))\n",
+      init<std::string, int, int, bool, bool>(
+          (arg("self"), arg("basename"), arg("verbose") = 0,
+           arg("number_index") = 10, arg("restart_init") = false,
+           arg("use_cache") = false),
+          "Parameters\n"
+          "----------\n\n"
+          "basename: str\n"
+          "    Basename of the logfile.\n\n"
+          "verbose: int (optional)\n"
+          "    Verbose level\n\n"
+          "number_index: int (optional)\n"
+          "    Number of index files to generate\n\n"
+          "restart_init: bool (optional)\n"
+          "    Are we restarting an initialization?\n\n"
+          "use_cache: bool (optional)\n"
+          "    Use a cache for the evolution of particles?\n\n"))
+      /* Methods */
+      .def("get_time_begin", &PyReader::GetTimeBegin,
+           "Read the initial time of the simulation.\n\n"
+           "Returns\n"
+           "-------\n\n"
+           "time: float\n"
+           "  The initial time\n")
+      .def("get_time_end", &PyReader::GetTimeEnd,
+           "Read the final time of the simulation.\n\n"
+           "Returns\n"
+           "-------\n\n"
+           "time: float\n"
+           "  The initial time\n")
+      .def("get_time_limits", &PyReader::GetTimeLimits,
+           "Read the initial and final time of the simulation.\n\n"
+           "Returns\n"
+           "-------\n\n"
+           "t0: float\n"
+           "  The initial time\n"
+           "t1: float\n"
+           "  The final time\n")
+      .def("get_list_fields", &PyReader::GetListFields,
+           (arg("self"), arg("part_type") = bp::object()),
+           "Read the list of available fields in the logfile.\n\n"
+           "Parameters\n"
+           "----------\n\n"
+           "type: int, list\n"
+           "  The particle type for the list of fields\n\n"
+           "Returns\n"
+           "-------\n"
+           "fields: tuple\n"
+           "  The list of fields present in the logfile.\n")
+      .def("get_data", &PyReader::GetData,
+           (arg("self"), arg("fields"), arg("time"),
+            arg("part_type") = bp::object(),
+            arg("filter_by_ids") = bp::object()),
+           "Read some fields from the logfile at a given time.\n\n"
+           "Parameters\n"
+           "----------\n\n"
+           "fields: list\n"
+           "  The list of fields (e.g. 'Coordinates', 'Entropies', ...)\n\n"
+           "time: float\n"
+           "  The time at which the fields must be read.\n\n"
+           "filter_by_ids: bool\n"
+           "  Read only the IDs provided.\n\n"
+           "part_type: int, list"
+           "  Types to read (default all). This can be a single int or a list"
+           " of boolean where the index corresponds to the particle type.\n\n"
+           "Returns\n"
+           "-------\n\n"
+           "list_of_fields: dict\n"
+           "  The key is given by the fields name and it contains a numpy array for each field.\n")
+      .def("update_particles", &PyReader::UpdateParticles,
+           (arg("self"), arg("fields"), arg("time")),
+           "Update the particles read with get_data using the cache.\n\n"
+           "Parameters\n"
+           "----------\n\n"
+           "fields: list\n"
+           "  The list of fields (e.g. 'Coordinates', 'Entropies', ...)\n\n"
+           "time: float\n"
+           "  The time at which the fields must be read.\n\n"
+           "Returns\n"
+           "-------\n\n"
+           "list_of_fields: dict\n"
+           "  The key is given by the fields name and it contains a numpy array for each field.\n")
+      .def("__enter__", &PyReader::Enter)
+      .def("__exit__", &PyReader::Exit)
+      /* Attributes */
+      .add_property("verbose", &PyReader::GetVerbose, &PyReader::SetVerbose)
+      .def_readonly("basename", &PyReader::mBasename)
+      .def_readonly("number_index", &PyReader::mNumberIndex)
+      .def_readonly("restart_init", &PyReader::mRestartInit)
+      .def_readonly("use_cache", &PyReader::mUseCache);
+
+  /* Now the particle types */
+  auto test = enum_<part_type>("particle_types");
+  for (int i = 0; i < csds_type_count + 1; i++) {
+    test.value(part_type_names[i], (part_type)i);
+  }
+  test.export_values();
+}
+
+/**
+ * @brief Read the particles from a file.
+ *
+ * @param fields The list of fields to read.
+ * @param time The requested time
+ * @param ids The IDs of the particle to read (NULL in order to read everything)
+ *
+ * @return The fields
+ */
+bp::dict PyReader::GetData(bp::list &py_fields, double time,
+                           bp::object &py_part_type, bp::object &py_ids) {
+  if (this->mReader == NULL) {
+    csds_error("You need to open the reader with the keyword 'with'");
+  }
+
+  /* Check the input */
+  const bool use_ids = !py_ids.is_none();
+  if (use_ids && !py_part_type.is_none()) {
+    csds_error("Cannot provide both the type of particles and the ids");
+  }
+
+  /* Convert the fields */
+  std::vector<field_enum> fields = GetFieldsFromNames(py_fields);
+  std::array<bool, csds_type_count> part_type =
+      GetParticleTypesFromObject(py_part_type);
+  const int n_fields = len(py_fields);
+
+  /* Convert the IDs */
+  bp::list py_ids_list;
+  if (use_ids) {
+    bp::extract<bp::list> get_ids(py_ids);
+    if (!get_ids.check()) {
+      csds_error("You need to provide a list for the IDs");
+    }
+    py_ids_list = get_ids();
+    if (len(py_ids_list) != csds_type_count) {
+      csds_error("The list of Ids should have a length of " << csds_type_count);
+    }
+  }
+
+  /* Set the time. */
+  mReader->SetTime(time);
+
+  /* Get the number of particles. */
+  std::array<uint64_t, csds_type_count> n_part;
+  if (use_ids) {
+    for (int i = 0; i < csds_type_count; i++) {
+      bp::extract<bp::numpy::ndarray> get_array(py_ids_list[i]);
+      if (!get_array.check()) {
+        /* Do we need this type? object defaults to None */
+        if (py_ids_list[i] == bp::object()) {
+          n_part[i] = 0;
+          continue;
+        }
+
+        csds_error(
+            "The list of ids should contain numpy arrays or None."
+            " Found: "
+            << PYTHON_STR(py_ids_list[i]));
+      }
+      bp::numpy::ndarray array = get_array();
+      n_part[i] = len(array);
+    }
+  }
+  /* If the ids are provided, use them to get the number of particles */
+  else {
+    /* If no ids provided, read everything from the index files */
+    mReader->GetNumberParticles(n_part, part_type);
+  }
+
+  /* Count the total number of particles. */
+  uint64_t n_tot = 0;
+  for (int i = 0; i < csds_type_count; i++) {
+    n_tot += n_part[i];
+  }
+
+  /* Print the number of particles */
+  if (mReader->mVerbose > 0) {
+    message("Found " << n_tot << " particles");
+  }
+
+  /* Allocate the output memory. */
+  std::vector<CsdsArray> output;
+  output.reserve(n_fields);
+  for (int i = 0; i < n_fields; i++) {
+    output.emplace_back(n_tot, fields[i]);
+  }
+
+  /* Read the particles. */
+  if (use_ids) {
+    /* Get the arrays in a C compatible way */
+    std::array<long long *, csds_type_count> part_ids;
+    for (int i = 0; i < csds_type_count; i++) {
+
+      /* Skip unnecessary types */
+      if (n_part[i] == 0) {
+        part_ids[i] = NULL;
+        continue;
+      }
+
+      /* Now get the array */
+      bp::extract<bp::numpy::ndarray> get_array(py_ids_list[i]);
+      bp::numpy::ndarray array = get_array();
+      part_ids[i] = (long long *)array.get_data();
+    }
+
+    /* Enable multithreading */
+    Py_BEGIN_ALLOW_THREADS;
+
+    /* Read the particles. */
+    mReader->ReadParticlesFromIds(time, fields, output, n_part, part_ids);
+
+    /* Disable multithreading */
+    Py_END_ALLOW_THREADS;
+
+    /* Recompute n_tot for the particles removed */
+    n_tot = 0;
+    for (int i = 0; i < csds_type_count; i++) {
+      n_tot += n_part[i];
+    }
+  } else {
+    /* Enable multithreading */
+    Py_BEGIN_ALLOW_THREADS;
+
+    mReader->ReadAllParticles(time, fields, output, n_part);
+
+    /* Disable multithreading */
+    Py_END_ALLOW_THREADS;
+  }
+
+  /* Create the python output. */
+  bp::dict out;
+
+  for (int i = 0; i < n_fields; i++) {
+    out[py_fields[i]] = output[i].ToNumpy(/* stealing_buffer */ true);
+  }
+
+  return out;
+}
+
+/**
+ * @brief Read the particles from a file.
+ *
+ * @param fields The list of fields to read.
+ * @param time The requested time
+ * @param ids The IDs of the particle to read (NULL in order to read everything)
+ *
+ * @return The fields
+ */
+bp::dict PyReader::UpdateParticles(bp::list &py_fields, double time) {
+  if (this->mReader == NULL) {
+    csds_error("You need to open the reader with the keyword 'with'");
+  }
+
+  /* Convert the fields */
+  std::vector<field_enum> fields = GetFieldsFromNames(py_fields);
+  bp::object py_part_type = bp::object();
+  std::array<bool, csds_type_count> part_type =
+      GetParticleTypesFromObject(py_part_type);
+  const int n_fields = len(py_fields);
+
+  /* Set the time. */
+  mReader->SetTime(time);
+
+  /* Get the number of particles. */
+  std::array<uint64_t, csds_type_count> n_part;
+  mReader->GetNumberParticles(n_part, part_type);
+
+  /* Count the total number of particles. */
+  uint64_t n_tot = 0;
+  for (int i = 0; i < csds_type_count; i++) {
+    n_tot += n_part[i];
+  }
+
+  /* Check if we have enough cache */
+  for (int i = 0; i < csds_type_count; i++) {
+    if (n_part[i] != 0 && n_part[i] >= (uint64_t)mReader->mCache.capacity[i]) {
+      csds_error("Not enough memory for particle type "
+                 << part_type_names[i]
+                 << ". Are you sure that you are requesting the correct type?"
+                    " You can try to increase Cache:OverAllocation or"
+                    " Cache:InitialAllocation");
+    }
+  }
+
+  /* Print the number of particles */
+  if (mReader->mVerbose > 0) {
+    message("Found " << n_tot << " particles");
+  }
+
+  /* Allocate the output memory. */
+  std::vector<CsdsArray> output;
+  output.reserve(n_fields);
+  for (int i = 0; i < n_fields; i++) {
+    output.emplace_back(n_tot, fields[i]);
+  }
+
+  /* Read the particles. */
+
+  /* Enable multithreading */
+  Py_BEGIN_ALLOW_THREADS;
+
+  mReader->UpdateParticles(time, fields, output, n_part);
+
+  /* Disable multithreading */
+  Py_END_ALLOW_THREADS;
+
+  /* Create the python output. */
+  bp::dict out;
+
+  for (int i = 0; i < n_fields; i++) {
+    out[py_fields[i]] = output[i].ToNumpy(/* stealing_buffer */ true);
+  }
+
+  return out;
+}
+
+/**
+ * @brief Provides the list of fields available in the logfile.
+ *
+ * @param part_type The particle type (int or list of int)
+ *
+ * @return The list of fields by name.
+ */
+bp::list PyReader::GetListFields(bp::object &part_type) {
+  if (this->mReader == NULL) {
+    csds_error("You need to open the reader with the keyword 'with'");
+  }
+
+  /* Convert the inputs */
+  std::array<bool, csds_type_count> types =
+      GetParticleTypesFromObject(part_type);
+
+  /* Inputs are done, now get the fields */
+  const struct header *h = &mReader->mLog->header;
+
+  /* Find if the fields are present in all the types. */
+  std::vector<int> field_present(field_enum_count, 1);
+  for (int i = 0; i < field_enum_count; i++) {
+    /* Check all the types */
+    for (int type = 0; type < csds_type_count; type++) {
+      /* Skip the types that are not required
+         and the field not found */
+      if (types[type] == 0 || field_present[i] == 0) continue;
+
+      /* Search among all the fields of the particle type */
+      int found = 0;
+      for (int j = 0; j < h->number_fields[type]; j++) {
+        if (i == h->fields[type][j].field.field) {
+          found = 1;
+          break;
+        }
+      }
+
+      /* Set the field as not found */
+      if (!found) field_present[i] = 0;
+    }
+  }
+
+  /* Remove the special flags */
+  field_present[field_enum_special_flags] = 0;
+
+  /* Count the number of fields found */
+  int number_fields = 0;
+  for (int i = 0; i < field_enum_count; i++) {
+    number_fields += field_present[i];
+  }
+
+  /* Create the python list for the output*/
+  bp::list list;
+  for (int i = 0; i < field_enum_count; i++) {
+    /* Keep only the field present. */
+    if (field_present[i] == 0) continue;
+
+    list.append(field_enum_get_name((field_enum)i));
+  }
+
+  return list;
+}
+
+/**
+ * @brief Convert the python fields into a C++ vector
+ *
+ * @param fields The list of fields to read in the form of strings.
+ *
+ * @return The list of fields to read as enum.
+ */
+std::vector<field_enum> PyReader::GetFieldsFromNames(bp::list &fields) {
+
+  /* Get the field enum from the header. */
+  std::vector<field_enum> enums(len(fields));
+  for (int i = 0; i < len(fields); i++) {
+    /* Get the an item in the list. */
+    bp::extract<std::string> get_string(fields[i]);
+    if (!get_string.check()) {
+      csds_error(
+          "You need to provide a field name, found: " << PYTHON_STR(fields[i]));
+    }
+
+    /* Set the enum */
+    enums[i] = field_enum_get(get_string().data());
+  }
+
+  return enums;
+}
+
+/**
+ * @brief Convert the type to read into a C++ vector
+ *
+ * @param types The object representing the types (None, int or array).
+ *
+ * @return An array containing if each type should be read or not
+ */
+std::array<bool, csds_type_count> PyReader::GetParticleTypesFromObject(
+    bp::object &py_types) {
+
+  bp::extract<int> get_type_int(py_types);
+  bp::extract<bp::list> get_type_list(py_types);
+  std::array<bool, csds_type_count> types;
+  for (int i = 0; i < csds_type_count; i++) {
+    types[i] = false;
+  }
+  /* The user does not provide anything */
+  if (py_types.is_none()) {
+    for (int i = 0; i < csds_type_count; i++) {
+      types[i] = true;
+    }
+  }
+  /* The user provides a single int */
+  else if (get_type_int.check()) {
+    types[get_type_int()] = true;
+  }
+  /* The users provides a list */
+  else if (get_type_list.check()) {
+    bp::list list_type = get_type_list();
+    for (int i = 0; i < len(list_type); i++) {
+      bp::extract<int> extract_int(list_type[i]);
+      if (!extract_int.check())
+        csds_error("Unexpected element: " << PYTHON_STR(list_type[i]));
+      types[extract_int()] = true;
+    }
+  }
+  /* Unexpected input */
+  else {
+    csds_error(
+        "Unexpected input for the particle type: " << PYTHON_STR(py_types));
+  }
+
+  return types;
+}
diff --git a/src/csds_python_wrapper.hpp b/src/csds_python_wrapper.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9f34f139d56d7b96b85e9e63c6e761f610c1536d
--- /dev/null
+++ b/src/csds_python_wrapper.hpp
@@ -0,0 +1,113 @@
+/*******************************************************************************
+ * This file is part of CSDS.
+ * Copyright (c) 2021 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+/* Standard headers */
+#include "boost/python/numpy.hpp"
+
+/* Include config.h */
+#include "config.hpp"
+
+/* Local headers */
+#include "csds_reader.hpp"
+
+namespace bp = boost::python;
+namespace np = boost::python::numpy;
+
+class PyReader {
+
+ public:
+  PyReader(std::string basename, int verbose = 0, int number_index = 10,
+           bool restart_init = true, bool use_cache = false)
+      : mBasename(basename),
+        mNumberIndex(number_index),
+        mRestartInit(restart_init),
+        mUseCache(use_cache),
+        mVerbose(verbose),
+        mReader(NULL){};
+
+  /** @brief Set the verbose level (property in python) */
+  void SetVerbose(int verbose) {
+    mVerbose = verbose;
+    if (mReader) {
+      mReader->mVerbose = verbose;
+    }
+  }
+
+  /** @brief Get the verbose level (property in python) */
+  int GetVerbose() { return mVerbose; }
+
+  /** @brief Defines the __enter__ method in python. */
+  static bp::object Enter(boost::python::object self) {
+
+    PyReader &myself = bp::extract<PyReader &>(self);
+
+    myself.mReader =
+        new Reader(myself.mBasename, myself.mVerbose, myself.mNumberIndex,
+                   myself.mRestartInit, myself.mUseCache);
+    return self;
+  };
+
+  /** @brief Defines the __exit__ method in python */
+  bool Exit(ATTR_UNUSED boost::python::object type,
+            ATTR_UNUSED boost::python::object value,
+            ATTR_UNUSED boost::python::object traceback) {
+    delete mReader;
+    mReader = NULL;
+    return false; /* Do not suppress the exception */
+  };
+
+  /** @brief Returns the initial time of the simulation */
+  double GetTimeBegin() {
+    if (mReader == NULL)
+      csds_error("Reader not initialized (done in the 'with' statement).");
+    return mReader->GetTimeBegin();
+  }
+
+  /** @brief Returns the final time of the simulation */
+  double GetTimeEnd() {
+    if (mReader == NULL)
+      csds_error("Reader not initialized (done in the 'with' statement).");
+    return mReader->GetTimeEnd();
+  }
+
+  /** @brief Returns both the initial and final time of the simulation */
+  /** @brief Returns the final time of the simulation */
+  bp::tuple GetTimeLimits() {
+    if (mReader == NULL)
+      csds_error("Reader not initialized (done in the 'with' statement).");
+    return bp::make_tuple(mReader->GetTimeBegin(), mReader->GetTimeEnd());
+  }
+
+  bp::dict GetData(bp::list &fields, double time, bp::object &part_type,
+                   bp::object &ids);
+  bp::dict UpdateParticles(bp::list &fields, double time);
+  bp::list GetListFields(bp::object &part_type);
+
+  std::string mBasename;
+  int mNumberIndex;
+  bool mRestartInit;
+  bool mUseCache;
+
+ protected:
+  std::vector<field_enum> GetFieldsFromNames(bp::list &fields);
+  std::array<bool, csds_type_count> GetParticleTypesFromObject(
+      bp::object &types);
+
+  int mVerbose;
+  Reader *mReader;
+};
diff --git a/src/csds_quick_sort.cpp b/src/csds_quick_sort.cpp
index 20cc3da135c74a9b552aacc3c179d16404282679..72cd0d85b2c43c89cf23f17aeb751c2139aa26f5 100644
--- a/src/csds_quick_sort.cpp
+++ b/src/csds_quick_sort.cpp
@@ -48,7 +48,7 @@ void quick_sort(struct index_data *data, size_t N) {
   qstack[0].hi = N - 1;
   qpos = 0;
   while (qpos >= 0) {
-    if (qpos >= stack_size) error("Quick sort stack too small");
+    if (qpos >= stack_size) csds_error("Quick sort stack too small");
     lo = qstack[qpos].lo;
     hi = qstack[qpos].hi;
     qpos -= 1;
@@ -101,26 +101,26 @@ void quick_sort(struct index_data *data, size_t N) {
       if (j > (lo + hi) / 2) {
         if (lo < j) {
           qpos += 1;
-          if (qpos >= stack_size) error("Quick sort stack too small");
+          if (qpos >= stack_size) csds_error("Quick sort stack too small");
           qstack[qpos].lo = lo;
           qstack[qpos].hi = j;
         }
         if (i < hi) {
           qpos += 1;
-          if (qpos >= stack_size) error("Quick sort stack too small");
+          if (qpos >= stack_size) csds_error("Quick sort stack too small");
           qstack[qpos].lo = i;
           qstack[qpos].hi = hi;
         }
       } else {
         if (i < hi) {
           qpos += 1;
-          if (qpos >= stack_size) error("Quick sort stack too small");
+          if (qpos >= stack_size) csds_error("Quick sort stack too small");
           qstack[qpos].lo = i;
           qstack[qpos].hi = hi;
         }
         if (lo < j) {
           qpos += 1;
-          if (qpos >= stack_size) error("Quick sort stack too small");
+          if (qpos >= stack_size) csds_error("Quick sort stack too small");
           qstack[qpos].lo = lo;
           qstack[qpos].hi = j;
         }
diff --git a/src/csds_reader.cpp b/src/csds_reader.cpp
index 42e5c10691d2a25c5ee575dfb33bc2cba2ba2efd..f70e8bb69031bf5190982ab02f9d1c879826012c 100644
--- a/src/csds_reader.cpp
+++ b/src/csds_reader.cpp
@@ -19,8 +19,8 @@
 
 /* Include standard library */
 #include <chrono>
-#include <iostream>
 #include <fstream>
+#include <iostream>
 
 /* Include corresponding header */
 #include "csds_reader.hpp"
@@ -41,9 +41,8 @@ using namespace std;
  * @param restart_init Are we restarting the initial reading?
  * @param use_cache Are we using the cache?
  */
-Reader::Reader(const string basename,
-               int verbose, int number_index, bool restart_init,
-               bool use_cache) {
+Reader::Reader(const string basename, int verbose, int number_index,
+               bool restart_init, bool use_cache) {
   if (verbose > 1) message("Initializing the reader.");
 
   /* Set the variable to the default values */
@@ -60,16 +59,13 @@ Reader::Reader(const string basename,
   /* Generate filename */
   string params_filename = this->mBasename;
   /* Remove the rank number */
-  params_filename.erase(params_filename.size() - 5,
-                        params_filename.size());
+  params_filename.erase(params_filename.size() - 5, params_filename.size());
   params_filename += ".yml";
-  csds_parameters_init(&this->mParams, params_filename,
-                       this->mVerbose);
+  csds_parameters_init(&this->mParams, params_filename, this->mVerbose);
 
   /* Initialize the log file. */
   csds_logfile_init_from_file(this->mLog, basename,
-                              /* only_header */ 0,
-                              this->mVerbose);
+                              /* only_header */ 0, this->mVerbose);
 
   /* Initialize the index files and creates them if needed */
   this->InitIndex(number_index, restart_init);
@@ -112,7 +108,7 @@ void Reader::InitIndex(int number_index, int restart) {
 
   /* Ensures that we do not have an unexpected situation */
   if (number_index == 1 || (count == 1 && !restart)) {
-    error(
+    csds_error(
         "The CSDS cannot run with only one index file."
         " Did you forget to restart their generation?");
   }
@@ -123,20 +119,19 @@ void Reader::InitIndex(int number_index, int restart) {
     count = number_index;
   } else if (this->mVerbose > 0 && count != number_index) {
     message("Found " << count << "index files. If you wish"
-            << "to have the correct number of files,"
-            << "please delete the old ones.");
+                     << "to have the correct number of files,"
+                     << "please delete the old ones.");
   }
 
   this->mIndex.n_files = count;
 
   /* Initialize the arrays */
-  if ((this->mIndex.times = (double *)malloc(count * sizeof(double))) ==
-      NULL) {
-    error("Failed to allocate the list of times");
+  if ((this->mIndex.times = (double *)malloc(count * sizeof(double))) == NULL) {
+    csds_error("Failed to allocate the list of times");
   }
   if ((this->mIndex.int_times =
            (integertime_t *)malloc(count * sizeof(integertime_t))) == NULL) {
-    error("Failed to allocate the list of times");
+    csds_error("Failed to allocate the list of times");
   }
 
   /* Get the information contained in the headers */
@@ -144,8 +139,7 @@ void Reader::InitIndex(int number_index, int restart) {
     string filename = this->GetIndexName(i);
 
     /* Read the header */
-    csds_index_read_header(&this->mIndex.index_prev, filename,
-                           this->mVerbose);
+    csds_index_read_header(&this->mIndex.index_prev, filename, this->mVerbose);
 
     /* Save the required information */
     this->mIndex.times[i] = this->mIndex.index_prev.time;
@@ -194,10 +188,10 @@ void Reader::SetTime(double time) {
 
   /* Check if the time is meaningful */
   if (this->mIndex.times[left] > time || this->mIndex.times[right] < time)
-    error("The requested time " << time
-          << " is not within the index limits ["
-          << this->mIndex.times[left] << ", "
-          << this->mIndex.times[right] << "]");
+    csds_error("The requested time "
+               << time << " is not within the index limits ["
+               << this->mIndex.times[left] << ", " << this->mIndex.times[right]
+               << "]");
 
   /* Find the correct index */
   while (left != right) {
@@ -299,7 +293,7 @@ size_t Reader::CountNumberNewParticles(enum part_type part_type) {
 
   /* Check if the binary search is correct. */
   if (data[ret].offset > threshold) {
-    error("The binary search has failed.");
+    csds_error("The binary search has failed.");
   }
 
   /* We need the count, not the index. */
@@ -348,7 +342,7 @@ size_t Reader::CountNumberRemovedParticles(enum part_type part_type) {
 
   /* Check if the binary search is correct. */
   if (data[ret].offset > threshold) {
-    error("The binary search has failed.");
+    csds_error("The binary search has failed.");
   }
 
   /* We need the count, not the index. */
@@ -361,11 +355,13 @@ size_t Reader::CountNumberRemovedParticles(enum part_type part_type) {
  * @param n_parts (out) Number of particles at the time set in the reader.
  * @param read_types Should we read this type of particles?
  */
-void Reader::GetNumberParticles(uint64_t *n_parts,
-                                const int *read_types) {
-  for (int i = 0; i < csds_type_count; i++) {
+void Reader::GetNumberParticles(
+    std::array<uint64_t, csds_type_count> &n_parts,
+    const std::array<bool, csds_type_count> &read_types) {
+
+  for (size_t i = 0; i < n_parts.size(); i++) {
     /* Should we skip this type of particles? */
-    if (read_types[i] == 0) {
+    if (!read_types[i]) {
       n_parts[i] = 0;
       continue;
     }
@@ -373,8 +369,7 @@ void Reader::GetNumberParticles(uint64_t *n_parts,
     /* Count the number of particles present in the last index file. */
     const uint64_t count_prev = this->mIndex.index_prev.nparts[i];
     /* Count the number of particles that have been created since last index. */
-    const uint64_t count_new =
-        this->CountNumberNewParticles((enum part_type)i);
+    const uint64_t count_new = this->CountNumberNewParticles((enum part_type)i);
     /* Count the number of particles that have been removed since last index. */
     const uint64_t count_removed =
         this->CountNumberRemovedParticles((enum part_type)i);
@@ -388,39 +383,39 @@ void Reader::GetNumberParticles(uint64_t *n_parts,
  * @param fields_wanted_wanted The fields to convert.
  * @param fields_info_wanted (out) #field_information for the wanted fields
  * (need to be allocated).
- * @param n_fields_wanted Number of elements in fields_wanted,
- * local_fields_wanted and its derivatives.
  * @param type The type of the particle.
  */
-void Reader::GetFieldsWanted(const enum field_enum *fields_wanted,
-    const struct field_information **fields_found, const int n_fields_wanted,
-    enum part_type type) {
+void Reader::GetFieldsWanted(const std::vector<field_enum> &fields_wanted,
+                             std::vector<field_information *> &fields_info,
+                             enum part_type type) {
 
   const struct header *h = &this->mLog->header;
+  std::vector<bool> fields_found(fields_wanted.size());
 
   /* Mark fields_found in order to check that the fields are found. */
-  for (int i = 0; i < n_fields_wanted; i++) {
-    fields_found[i] = NULL;
+  for (size_t i = 0; i < fields_wanted.size(); i++) {
+    fields_found[i] = false;
   }
 
   /* Find the corresponding fields */
-  for (int i = 0; i < n_fields_wanted; i++) {
+  for (size_t i = 0; i < fields_wanted.size(); i++) {
 
     for (int j = 0; j < h->number_fields[type]; j++) {
       /* Copy the structure if the field is found.  */
       if (h->fields[type][j].field.field == fields_wanted[i]) {
-        fields_found[i] = &h->fields[type][j];
+        fields_info[i] = &h->fields[type][j];
+        fields_found[i] = true;
         break;
       }
     }
   }
 
   /* Check that we found the fields */
-  for (int i = 0; i < n_fields_wanted; i++) {
-    if (fields_found[i] == NULL) {
+  for (size_t i = 0; i < fields_wanted.size(); i++) {
+    if (!fields_found[i]) {
       const char *name = field_enum_get_name(fields_wanted[i]);
-      error("Field " << name << " not found in particle type "
-            << part_type_names[type]);
+      csds_error("Field " << name << " not found in particle type "
+                          << part_type_names[type]);
     }
   }
 }
@@ -429,7 +424,6 @@ void Reader::GetFieldsWanted(const enum field_enum *fields_wanted,
  * @brief Read a single particle.
  *
  * @param fields_wanted The list of fields to return.
- * @param n_fields_wanted The number of fields to return
  * @param output The pointers to the output arrays
  * @param offset_from_index The offset of the particle to read
  * @param output_index The index of the particle within the output arrays
@@ -437,8 +431,8 @@ void Reader::GetFieldsWanted(const enum field_enum *fields_wanted,
  * @parma number_jumps (out) The number of jumps required to get the particle
  */
 int Reader::ReadParticle(double time,
-                         const struct field_information **fields_wanted,
-                         int n_fields_wanted, void **output,
+                         const std::vector<field_information *> &fields_wanted,
+                         std::vector<CsdsArray> &output,
                          size_t offset_from_index, size_t output_index,
                          enum part_type type, int *number_jumps) {
 
@@ -451,7 +445,7 @@ int Reader::ReadParticle(double time,
 
   /* Check if the offsets are correct. */
   if (offset_from_index > offset_time) {
-    error("Last offset is larger than the requested time.");
+    csds_error("Last offset is larger than the requested time.");
   }
 
   /* Record's header information */
@@ -469,18 +463,18 @@ int Reader::ReadParticle(double time,
   while (offset < offset_time) {
     *number_jumps += 1;
     /* Read the particle. */
-    csds_mapped_file_read_mask(&this->mLog->log, offset, &mask, &h_offset);
+    csds_mapped_file_read_mask(this->mLog->log, offset, &mask, &h_offset);
 
     /* Is the particle removed from the logfile? */
     if (mask & CSDS_SPECIAL_FLAGS_MASK) {
       int data = 0;
       int part_type = 0;
       enum csds_special_flags flag = csds_particle_read_special_flag(
-          offset, &mask, &h_offset, &data, &part_type, &this->mLog->log);
+          offset, &mask, &h_offset, &data, &part_type, this->mLog->log);
 
 #ifdef CSDS_DEBUG_CHECKS
       if (part_type != type) {
-        error("Found particles that do not correspond.");
+        csds_error("Found particles that do not correspond.");
       }
 #endif
 
@@ -492,7 +486,7 @@ int Reader::ReadParticle(double time,
 
     /* Check if there is a next record (avoid infinite loop). */
     if (h_offset == 0) {
-      error("No next record found.");
+      csds_error("No next record found.");
     }
 
     /* Go to the next record. */
@@ -505,16 +499,15 @@ int Reader::ReadParticle(double time,
   offset -= h_offset;
 
   /* Loop over each field. */
-  for (int field = 0; field < n_fields_wanted; field++) {
+  for (size_t field = 0; field < fields_wanted.size(); field++) {
     const struct field_information *current_field = fields_wanted[field];
-    void *current_output =
-        (char *)output[field] + output_index * current_field->field.size;
+    char *current_output = output[field][output_index];
 
     /* Read the field */
     csds_particle_read_field(offset, current_output, current_field,
                              /* derivative */ 0, &mask, &h_offset,
                              h->fields[type], h->number_fields[type],
-                             &this->mLog->log);
+                             this->mLog->log);
 
     /* Deal with the first derivative. */
     int first_found = current_field->first.field != field_enum_none &&
@@ -526,7 +519,7 @@ int Reader::ReadParticle(double time,
       csds_particle_read_field(offset, first_deriv, current_field,
                                /* derivative */ 1, &mask, &h_offset,
                                h->fields[type], h->number_fields[type],
-                               &this->mLog->log);
+                               this->mLog->log);
     }
 
     /* Deal with the second derivative. */
@@ -539,7 +532,7 @@ int Reader::ReadParticle(double time,
       csds_particle_read_field(offset, second_deriv, current_field,
                                /* derivative */ 2, &mask, &h_offset,
                                h->fields[type], h->number_fields[type],
-                               &this->mLog->log);
+                               this->mLog->log);
     }
 
     /* Get the time. */
@@ -547,7 +540,7 @@ int Reader::ReadParticle(double time,
     double time_before = time_array_get_time(&this->mLog->times, offset);
 
     /* Get the mask */
-    csds_mapped_file_read_mask(&this->mLog->log, offset_next, &mask, &h_offset);
+    csds_mapped_file_read_mask(this->mLog->log, offset_next, &mask, &h_offset);
 
     /* Output after the requested time. */
     char output_after[current_field->field.size];
@@ -556,7 +549,7 @@ int Reader::ReadParticle(double time,
     csds_particle_read_field(offset_next, output_after, current_field,
                              /* derivative */ 0, &mask, &h_offset,
                              h->fields[type], h->number_fields[type],
-                             &this->mLog->log);
+                             this->mLog->log);
 
     /* Deal with the first derivative. */
     char first_deriv_after[current_field->first.size];
@@ -569,7 +562,7 @@ int Reader::ReadParticle(double time,
       csds_particle_read_field(offset_next, first_deriv_after, current_field,
                                /*derivative*/ 1, &mask, &h_offset,
                                h->fields[type], h->number_fields[type],
-                               &this->mLog->log);
+                               this->mLog->log);
     }
 
     /* Deal with the second derivative. */
@@ -583,7 +576,7 @@ int Reader::ReadParticle(double time,
       csds_particle_read_field(offset_next, second_deriv_after, current_field,
                                /* derivative */ 2, &mask, &h_offset,
                                h->fields[type], h->number_fields[type],
-                               &this->mLog->log);
+                               this->mLog->log);
     }
 
     /* Get the time. */
@@ -621,28 +614,20 @@ int Reader::ReadParticle(double time,
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array.
  * @param n_part Number of particles of each type.
  * @param type The particle type
  */
 void Reader::ReadAllParticlesSingleType(
-    double time,
-    const enum field_enum *fields_wanted, const int n_fields_wanted,
-    void **output, const uint64_t *n_part, enum part_type type) {
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    const std::array<uint64_t, csds_type_count> &n_part, enum part_type type) {
 
   /* Allocate temporary memory. */
-  const struct field_information **fields_info_wanted =
-      (const struct field_information **)malloc(
-          sizeof(struct field_information *) * n_fields_wanted);
-  if (fields_info_wanted == NULL) {
-    error("Failed to allocate the field information.");
-  }
+  std::vector<field_information *> fields_info_wanted(fields_wanted.size());
 
   /* Convert fields into the local array. */
-  this->GetFieldsWanted(fields_wanted, fields_info_wanted,
-                        n_fields_wanted, type);
+  this->GetFieldsWanted(fields_wanted, fields_info_wanted, type);
 
   /* Count the number of previous parts for the shift in output */
   uint64_t prev_npart = 0;
@@ -652,11 +637,9 @@ void Reader::ReadAllParticlesSingleType(
 
   /* Create some information from the index files */
   const size_t size_index = this->mIndex.index_prev.nparts[type];
-  const size_t size_history =
-      this->CountNumberNewParticles(type);
+  const size_t size_history = this->CountNumberNewParticles(type);
 
-  struct index_data *data =
-      csds_index_get_data(&this->mIndex.index_prev, type);
+  struct index_data *data = csds_index_get_data(&this->mIndex.index_prev, type);
   struct index_data *data_created =
       csds_index_get_created_history(&this->mIndex.index_next, type);
 
@@ -667,7 +650,7 @@ void Reader::ReadAllParticlesSingleType(
   size_t total_number_jumps = 0;
   const int local_update_limit = 1000;
   const chrono::high_resolution_clock::time_point init =
-    chrono::high_resolution_clock::now();
+      chrono::high_resolution_clock::now();
 
   /* Read the particles */
 #pragma omp parallel for firstprivate(local_counter) reduction(+:total_number_jumps)
@@ -686,7 +669,7 @@ void Reader::ReadAllParticlesSingleType(
 
       /* Check if we still have some particles available. */
       if (reading_history && local_index == size_history + size_index) {
-        error("The CSDS was not able to find enough particles.");
+        csds_error("The CSDS was not able to find enough particles.");
       }
 
       /* Get the offset */
@@ -697,9 +680,8 @@ void Reader::ReadAllParticlesSingleType(
 
       /* Get the offset of the particle */
       int number_jumps = 0;
-      particle_removed = Reader::ReadParticle(
-          time, fields_info_wanted, n_fields_wanted, output, offset, i,
-          type, &number_jumps);
+      particle_removed = ReadParticle(time, fields_info_wanted, output, offset,
+                                      i, type, &number_jumps);
 
       /* Update the number of jumps */
       if (!particle_removed) {
@@ -731,14 +713,11 @@ void Reader::ReadAllParticlesSingleType(
 
   /* Print the number of jumps if required */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS) {
-    float avg =
-        (float)total_number_jumps / (float)(n_fields_wanted * n_part[type]);
-    message("Average number of jumps for " << part_type_names[type]
-            << ":" << avg);
+    float avg = (float)total_number_jumps /
+                (float)(fields_wanted.size() * n_part[type]);
+    message("Average number of jumps for " << part_type_names[type] << ":"
+                                           << avg);
   }
-
-  /* Free the memory */
-  free(fields_info_wanted);
 }
 
 /**
@@ -746,38 +725,36 @@ void Reader::ReadAllParticlesSingleType(
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array
  * @param n_part Number of particles of each type.
  */
-void Reader::ReadAllParticles(double time,
-                              const enum field_enum *fields_wanted,
-                              const int n_fields_wanted, void **output,
-                              const uint64_t *n_part) {
+void Reader::ReadAllParticles(
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    const std::array<uint64_t, csds_type_count> &n_part) {
 
-  const chrono::high_resolution_clock::time_point init = chrono::high_resolution_clock::now();
+  const chrono::high_resolution_clock::time_point init =
+      chrono::high_resolution_clock::now();
 
   /* Prepare the cache */
   if (this->mUseCache) {
     csds_cache_free(&this->mCache);
-    csds_cache_init(&this->mCache, n_part, fields_wanted, n_fields_wanted,
+    csds_cache_init(&this->mCache, n_part, fields_wanted,
                     this->mParams.cache_over_alloc, &this->mLog->header,
                     this->mParams.cache_init_size, this->mTime.time_offset);
   }
 
   /* Read the particles */
-  for(int i = 0; i < csds_type_count; i++) {
+  for (int i = 0; i < csds_type_count; i++) {
     if (n_part[i] != 0) {
-      this->ReadAllParticlesSingleType(time, fields_wanted,
-                                       n_fields_wanted, output, n_part,
-                                       (part_type) i);
+      this->ReadAllParticlesSingleType(time, fields_wanted, output, n_part,
+                                       (part_type)i);
     }
   }
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("took " << GetDeltaTime(init) << "s");
+    message("took " << GetDeltaTime(init) << "ms");
 }
 
 /**
@@ -785,9 +762,7 @@ void Reader::ReadAllParticles(double time,
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array.
  * @param n_part Number of particles of each type.
  * Updated with the number of particles found.
  * @param type The particle type
@@ -795,8 +770,10 @@ void Reader::ReadAllParticles(double time,
  * The ids not found are removed from this array.
  */
 void Reader::ReadParticlesFromIdsSingleType(
-    double time, const enum field_enum *fields_wanted, const int n_fields_wanted,
-    void **output, uint64_t *n_part, enum part_type type, long long *ids) {
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    std::array<uint64_t, csds_type_count> &n_part, enum part_type type,
+    long long *ids) {
 
   /* Count the number of previous parts for the shift in output */
   uint64_t prev_npart = 0;
@@ -805,16 +782,10 @@ void Reader::ReadParticlesFromIdsSingleType(
   }
 
   /* Allocate temporary memory. */
-  const struct field_information **fields_info_wanted =
-      (const struct field_information **)malloc(
-          sizeof(struct field_information *) * n_fields_wanted);
-  if (fields_info_wanted == NULL) {
-    error("Failed to allocate the field information.");
-  }
+  std::vector<field_information *> fields_info_wanted(fields_wanted.size());
 
   /* Convert fields into the local array. */
-  this->GetFieldsWanted(fields_wanted, fields_info_wanted,
-                        n_fields_wanted, type);
+  this->GetFieldsWanted(fields_wanted, fields_info_wanted, type);
 
   /* Read the particles */
   size_t total_number_jumps = 0;
@@ -822,8 +793,8 @@ void Reader::ReadParticlesFromIdsSingleType(
 
     /* Get the offset */
     size_t offset = 0;
-    int found =
-        csds_index_get_offset(&this->mIndex.index_prev, ids[i], type, &offset);
+    int found = csds_index_get_offset(&this->mIndex.index_prev, (int64_t)ids[i],
+                                      type, &offset);
 
     /* Deal with the particles not found */
     if (!found) {
@@ -836,9 +807,9 @@ void Reader::ReadParticlesFromIdsSingleType(
     /* Read the particle */
     size_t output_index = i + prev_npart;
     int number_jumps = 0;
-    int particle_removed = this->ReadParticle(
-        time, fields_info_wanted, n_fields_wanted, output, offset,
-        output_index, type, &number_jumps);
+    int particle_removed =
+        this->ReadParticle(time, fields_info_wanted, output, offset,
+                           output_index, type, &number_jumps);
 
     total_number_jumps += number_jumps;
 
@@ -852,14 +823,11 @@ void Reader::ReadParticlesFromIdsSingleType(
 
   /* Print the number of jumps if required */
   if (this->mVerbose > 1 || this->mVerbose == CSDS_VERBOSE_TIMERS) {
-    float avg =
-        (float)total_number_jumps / (float)(n_fields_wanted * n_part[type]);
-    message("Average number of jumps for " << part_type_names[type]
-            << ": " << avg);
+    float avg = (float)total_number_jumps /
+                (float)(fields_wanted.size() * n_part[type]);
+    message("Average number of jumps for " << part_type_names[type] << ": "
+                                           << avg);
   }
-
-  /* Free the memory */
-  free(fields_info_wanted);
 }
 
 /**
@@ -867,42 +835,40 @@ void Reader::ReadParticlesFromIdsSingleType(
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array.
  * @param n_part Number of particles of each type.
  * Updated with the number of particles found.
  * @param ids The particles' ids.
  * The ids not found are removed from this array.
  */
 void Reader::ReadParticlesFromIds(
-    double time, const enum field_enum *fields_wanted,
-    const int n_fields_wanted, void **output, uint64_t *n_part,
-    long long **ids) {
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    std::array<uint64_t, csds_type_count> &n_part,
+    std::array<long long *, csds_type_count> &ids) {
 
   const chrono::high_resolution_clock::time_point init =
-    chrono::high_resolution_clock::now();
+      chrono::high_resolution_clock::now();
 
   /* Prepare the cache */
   if (this->mUseCache) {
     csds_cache_free(&this->mCache);
-    csds_cache_init(&this->mCache, n_part, fields_wanted, n_fields_wanted,
+    csds_cache_init(&this->mCache, n_part, fields_wanted,
                     this->mParams.cache_over_alloc, &this->mLog->header,
                     this->mParams.cache_init_size, this->mTime.time_offset);
   }
 
   /* Read the particles */
-  for(int i = 0; i < csds_type_count; i++) {
+  for (int i = 0; i < csds_type_count; i++) {
     if (n_part[i] != 0) {
-      this->ReadParticlesFromIdsSingleType(
-          time, fields_wanted, n_fields_wanted, output, n_part,
-          (part_type) i, ids[i]);
+      this->ReadParticlesFromIdsSingleType(time, fields_wanted, output, n_part,
+                                           (part_type)i, ids[i]);
     }
   }
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("took " << GetDeltaTime(init) << "s");
+    message("took " << GetDeltaTime(init) << "ms");
 }
 
 /**
@@ -910,9 +876,7 @@ void Reader::ReadParticlesFromIds(
  *
  * @return The initial time
  */
-double Reader::GetTimeBegin() {
-  return this->mLog->times.records[0].time;
-}
+double Reader::GetTimeBegin() { return this->mLog->times.records[0].time; }
 
 /**
  * @brief Get the simulation final time.
@@ -954,7 +918,8 @@ size_t Reader::GetNextOffsetFromTime(double time) {
  *
  * @return The offset after the record.
  */
-size_t Reader::ReadRecord(void **output, double *time, int *is_particle, size_t offset) {
+size_t Reader::ReadRecord(void **output, double *time, int *is_particle,
+                          size_t offset) {
 
   /* Get a few pointers. */
   const struct header *h = &this->mLog->header;
@@ -963,7 +928,7 @@ size_t Reader::ReadRecord(void **output, double *time, int *is_particle, size_t
   size_t h_offset = 0;
 
   /* Read the record's mask. */
-  csds_mapped_file_read_mask(&this->mLog->log, offset, &mask, &h_offset);
+  csds_mapped_file_read_mask(this->mLog->log, offset, &mask, &h_offset);
 
   *is_particle = !(mask & CSDS_TIMESTAMP_MASK);
   /* The record is a particle. */
@@ -973,7 +938,7 @@ size_t Reader::ReadRecord(void **output, double *time, int *is_particle, size_t
       offset = csds_particle_read_field(
           offset_tmp, output[i], &h->fields[csds_type_gas][i],
           /* derivative */ 0, &mask, &h_offset, h->fields[csds_type_gas],
-          h->number_fields[csds_type_gas], &this->mLog->log);
+          h->number_fields[csds_type_gas], this->mLog->log);
     }
   }
   /* The record is a timestamp. */
@@ -1007,7 +972,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
 
   /* Check the index */
   if (index >= (size_t)cache->capacity[type]) {
-    error("Requesting an index too large");
+    csds_error("Requesting an index too large");
   }
 
   /* Get the time */
@@ -1023,7 +988,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
     /* Get the new offset */
     while (offset_next < offset_time) {
       /* Read the particle. */
-      csds_mapped_file_read_mask(&this->mLog->log, offset_next, &mask,
+      csds_mapped_file_read_mask(this->mLog->log, offset_next, &mask,
                                  &h_offset);
 
       /* Is the particle removed? */
@@ -1031,11 +996,11 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
         int data = 0;
         int part_type = 0;
         enum csds_special_flags flag = csds_particle_read_special_flag(
-            offset_next, &mask, &h_offset, &data, &part_type, &this->mLog->log);
+            offset_next, &mask, &h_offset, &data, &part_type, this->mLog->log);
 
 #ifdef CSDS_DEBUG_CHECKS
         if (part_type != type) {
-          error("Found particles that do not correspond.");
+          csds_error("Found particles that do not correspond.");
         }
 #endif
 
@@ -1047,7 +1012,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
 
       /* Check if there is a next record (avoid infinite loop). */
       if (h_offset == 0) {
-        error("No next record found.");
+        csds_error("No next record found.");
       }
 
       offset_next += h_offset;
@@ -1078,7 +1043,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
       csds_particle_read_field(offset_next, current_output, info,
                                /* derivative */ 0, &mask, &h_offset,
                                h->fields[type], h->number_fields[type],
-                               &this->mLog->log);
+                               this->mLog->log);
       current_output += info->field.size;
 
       /* Read the first derivative */
@@ -1086,7 +1051,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
         csds_particle_read_field(offset_next, current_output, info,
                                  /* derivative */ 1, &mask, &h_offset,
                                  h->fields[type], h->number_fields[type],
-                                 &this->mLog->log);
+                                 this->mLog->log);
         current_output += info->first.size;
       }
 
@@ -1095,7 +1060,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
         csds_particle_read_field(offset_next, current_output, info,
                                  /* derivative */ 2, &mask, &h_offset,
                                  h->fields[type], h->number_fields[type],
-                                 &this->mLog->log);
+                                 this->mLog->log);
       }
     }
   }
@@ -1141,28 +1106,20 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time,
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array.
  * @param n_part Number of particles of each type.
  * @param type The particle type
  */
 void Reader::UpdateParticlesSingleType(
-    double time, const enum field_enum *fields_wanted,
-    const int n_fields_wanted, void **output, const uint64_t *n_part,
-    enum part_type type) {
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    const std::array<uint64_t, csds_type_count> &n_part, enum part_type type) {
 
   /* Allocate temporary memory. */
-  const struct field_information **fields_info_wanted =
-      (const struct field_information **)malloc(
-          sizeof(struct field_information *) * n_fields_wanted);
-  if (fields_info_wanted == NULL) {
-    error("Failed to allocate the field information.");
-  }
+  std::vector<field_information *> fields_info_wanted(fields_wanted.size());
 
   /* Convert fields into the local array. */
-  this->GetFieldsWanted(fields_wanted, fields_info_wanted,
-                        n_fields_wanted, type);
+  this->GetFieldsWanted(fields_wanted, fields_info_wanted, type);
 
   /* Count the number of previous parts for the shift in output */
   uint64_t prev_npart = 0;
@@ -1175,11 +1132,11 @@ void Reader::UpdateParticlesSingleType(
   offset_time = this->mLog->times.records[offset_time].offset;
 
   /* Get the field index */
-  int *field_index_in_cache = (int *)malloc(n_fields_wanted * sizeof(int));
+  int *field_index_in_cache = (int *)malloc(fields_wanted.size() * sizeof(int));
   if (field_index_in_cache == NULL) {
-    error("Failed to allocate the field index");
+    csds_error("Failed to allocate the field index");
   }
-  for (int field = 0; field < n_fields_wanted; field++) {
+  for (size_t field = 0; field < fields_wanted.size(); field++) {
     field_index_in_cache[field] =
         csds_cache_get_field_index(&this->mCache, fields_wanted[field], type);
   }
@@ -1190,7 +1147,7 @@ void Reader::UpdateParticlesSingleType(
     size_t ind = this->mCache.current_index[type];
     this->mCache.current_index[type]++;
 
-    for (int field = 0; field < n_fields_wanted; field++) {
+    for (size_t field = 0; field < fields_wanted.size(); field++) {
       /* Get the output */
       const struct field_information *current_field = fields_info_wanted[field];
       char current_output[current_field->field.size];
@@ -1207,7 +1164,7 @@ void Reader::UpdateParticlesSingleType(
       }
 
       /* Write the particle back into the array */
-      void *buffer = (char *)output[field] + part * current_field->field.size;
+      char *buffer = output[field][part];
       memcpy(buffer, current_output, current_field->field.size);
     }
   }
@@ -1219,7 +1176,7 @@ void Reader::UpdateParticlesSingleType(
       &this->mLog->times, this->mIndex.index_prev.time);
   offset_index = this->mLog->times.records[offset_index].offset;
   if (offset_index > this->mCache.last_time_offset) {
-    error("Requesting an update over more than one index file");
+    csds_error("Requesting an update over more than one index file");
   }
   offset_index = time_array_get_index_from_time(&this->mLog->times,
                                                 this->mIndex.index_next.time);
@@ -1260,32 +1217,36 @@ void Reader::UpdateParticlesSingleType(
 
   /* Get the size of the fields */
   int fields_size = 0;
-  for (int field = 0; field < n_fields_wanted; field++) {
+  for (size_t field = 0; field < fields_wanted.size(); field++) {
     fields_size += fields_info_wanted[field]->field.size;
   }
 
+  /* Create a temporary buffer */
+  std::vector<CsdsArray> buffers;
+  buffers.reserve(fields_wanted.size());
+  for (size_t i = 0; i < fields_wanted.size(); i++) {
+    buffers.emplace_back(1, fields_wanted[i]);
+  }
+
   /* Now read all the missing particles */
   for (size_t i = center; i < size_max; i++) {
     /* Stop as soon as we reach the required offset */
     if (index_data[i].offset > offset_time) break;
 
     /* Now read and store the particle in the cache */
-    char buffer[fields_size];
-    char *p_buffer[n_fields_wanted];
-    p_buffer[0] = (char *)&buffer;
-    for (int field = 1; field < n_fields_wanted; field++) {
-      p_buffer[field] =
-          p_buffer[field - 1] + fields_info_wanted[field]->field.size;
-    }
     int number_jumps = 0;
-    int removed = this->ReadParticle(
-        time, fields_info_wanted, n_fields_wanted, (void **)p_buffer,
-        index_data[i].offset, /* output_index */ 0, type, &number_jumps);
+    int removed = this->ReadParticle(time, fields_info_wanted, buffers,
+                                     index_data[i].offset, /* output_index */ 0,
+                                     type, &number_jumps);
 
     /* Deal with the case where the particle is removed */
     if (removed) {
       this->mCache.current_index[type]--;
+      continue;
     }
+
+    /* Write the particle back into the array */
+    // TODO
   }
 
   /* Do the same for the next index file */
@@ -1297,28 +1258,22 @@ void Reader::UpdateParticlesSingleType(
       if (index_data[i].offset > offset_time) break;
 
       /* Now read and store the particle in the cache */
-      char buffer[fields_size];
-      char *p_buffer[n_fields_wanted];
-      p_buffer[0] = (char *)&buffer;
-      for (int field = 1; field < n_fields_wanted; field++) {
-        p_buffer[field] =
-            p_buffer[field - 1] + fields_info_wanted[field]->field.size;
-      }
       int number_jumps = 0;
       int removed = this->ReadParticle(
-          time, fields_info_wanted, n_fields_wanted, (void **)p_buffer,
-          index_data[i].offset, /* output_index */ 0, type, &number_jumps);
+          time, fields_info_wanted, buffers, index_data[i].offset,
+          /* output_index */ 0, type, &number_jumps);
 
       /* Deal with the case where the particle is removed */
       if (removed) {
         this->mCache.current_index[type]--;
       }
+      /* Write the particle back into the array */
+      // TODO
     }
   }
 
   /* Free the memory */
   free(field_index_in_cache);
-  free(fields_info_wanted);
 }
 
 /**
@@ -1326,32 +1281,29 @@ void Reader::UpdateParticlesSingleType(
  *
  * @param time The requested time for the particle.
  * @param fields_wanted The fields requested.
- * @param n_fields_wanted Number of field requested.
- * @param output Pointer to the output array. Size: (n_fields_wanted,
- * sum(n_part)).
+ * @param output Pointer to the output array.
  * @param n_part Number of particles of each type.
  */
-void Reader::UpdateParticles(double time,
-                             const enum field_enum *fields_wanted,
-                             const int n_fields_wanted, void **output,
-                             const uint64_t *n_part) {
+void Reader::UpdateParticles(
+    double time, const std::vector<field_enum> &fields_wanted,
+    std::vector<CsdsArray> &output,
+    const std::array<uint64_t, csds_type_count> &n_part) {
   const chrono::high_resolution_clock::time_point init =
-    chrono::high_resolution_clock::now();
+      chrono::high_resolution_clock::now();
 
   /* Check if the cache is enabled */
   if (!this->mUseCache) {
-    error("Cannot update the particles without cache");
+    csds_error("Cannot update the particles without cache");
   }
 
   /* Prepare the cache */
   csds_cache_reset_current_index(&this->mCache);
 
   /* Update the particles */
-  for(int i = 0; i < csds_type_count; i++) {
+  for (int i = 0; i < csds_type_count; i++) {
     if (n_part[i] != 0) {
-      this->UpdateParticlesSingleType(time, fields_wanted,
-                                      n_fields_wanted, output, n_part,
-                                      (part_type) i);
+      this->UpdateParticlesSingleType(time, fields_wanted, output, n_part,
+                                      (part_type)i);
     }
   }
 
@@ -1360,5 +1312,5 @@ void Reader::UpdateParticles(double time,
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("took " << GetDeltaTime(init) << "s");
+    message("took " << GetDeltaTime(init) << "ms");
 }
diff --git a/src/csds_reader.hpp b/src/csds_reader.hpp
index dac6b9cb3b9e69ae621b6d69997a951d68b4083c..314b22daa355797a471f11fa01d80d2ca4b86044 100644
--- a/src/csds_reader.hpp
+++ b/src/csds_reader.hpp
@@ -47,8 +47,12 @@
 #ifndef CSDS_READER_H
 #define CSDS_READER_H
 
+/* Standard header */
 #include <string>
+#include <vector>
 
+/* Local headers */
+#include "csds_array.hpp"
 #include "csds_cache.hpp"
 #include "csds_index.hpp"
 #include "csds_logfile.hpp"
@@ -64,75 +68,37 @@
  */
 class Reader {
 
-public:
-  Reader(std::string basename, int verbose, int number_index,
-         bool restart_init, bool use_cache);
+ public:
+  Reader(std::string basename, int verbose, int number_index, bool restart_init,
+         bool use_cache);
 
   ~Reader();
 
-  void InitIndex(int number_index, int current_index);
-
   void SetTime(double time);
 
   double GetTimeBegin();
   double GetTimeEnd();
-  size_t GetNextOffsetFromTime(double time);
-  void GetNumberParticles(uint64_t *n_parts, const int *read_types);
+  void GetNumberParticles(std::array<uint64_t, csds_type_count> &n_parts,
+                          const std::array<bool, csds_type_count> &read_types);
 
   void ReadAllParticles(double time,
-                        const enum field_enum *fields_wanted,
-                        const int n_fields_wanted, void **output,
-                        const uint64_t *n_part);
+                        const std::vector<field_enum> &fields_wanted,
+                        std::vector<CsdsArray> &output,
+                        const std::array<uint64_t, csds_type_count> &n_part);
   void UpdateParticles(double time,
-                       const enum field_enum *fields_wanted,
-                       const int n_fields_wanted, void **output,
-                       const uint64_t *n_part);
+                       const std::vector<field_enum> &fields_wanted,
+                       std::vector<CsdsArray> &output,
+                       const std::array<uint64_t, csds_type_count> &n_part);
   void ReadParticlesFromIds(double time,
-                            const enum field_enum *id_masks_wanted,
-                            const int n_mask_wanted, void **output,
-                            uint64_t *n_part, long long **ids);
-  size_t ReadRecord(void **output,
-                    double *time, int *is_particle, size_t offset);
-  void GenerateIndexFiles(int number_index, int restart);
-  void GetFieldsWanted(const enum field_enum *fields_wanted,
-                       const struct field_information **fields_found, const int n_fields_wanted,
+                            const std::vector<field_enum> &id_masks_wanted,
+                            std::vector<CsdsArray> &output,
+                            std::array<uint64_t, csds_type_count> &n_part,
+                            std::array<long long *, csds_type_count> &ids);
+  void GetFieldsWanted(const std::vector<field_enum> &fields_wanted,
+                       std::vector<field_information *> &fields_found,
                        enum part_type type);
-  size_t CountNumberNewParticles(enum part_type part_type);
-  size_t CountNumberRemovedParticles(enum part_type part_type);
-  int ReadParticle(double time,
-                   const struct field_information **fields_wanted,
-                   int n_fields_wanted, void **output,
-                   size_t offset_from_index, size_t output_index,
-                   enum part_type type, int *number_jumps);
-  void ReadAllParticlesSingleType(
-      double time, const enum field_enum *fields_wanted,
-      const int n_fields_wanted, void **output, const uint64_t *n_part,
-      enum part_type type);
-  void ReadParticlesFromIdsSingleType(
-      double time, const enum field_enum *fields_wanted, const int n_fields_wanted,
-      void **output, uint64_t *n_part, enum part_type type, long long *ids);
-  void UpdateParticlesSingleType(
-      double time, const enum field_enum *fields_wanted,
-      const int n_fields_wanted, void **output, const uint64_t *n_part,
-      enum part_type type);
-  int UpdateSingleParticle(size_t index, enum part_type type, double time,
-                           size_t offset_time, size_t field_index,
-                           void *output);
-
-  size_t GetInitialState(struct csds_hashmap **current_state,
-                         struct time_record *time_record);
-  void WriteIndex(struct csds_hashmap **current_state,
-                  struct index_writer *parts_created,
-                  struct index_writer *parts_removed,
-                  const struct time_record *time, int file_number);
-  size_t GetLastOffsetBefore(const struct index_data *data,
-                             size_t offset_limit);
-  size_t UpdateStateToNextIndex(
-      size_t init_offset, struct time_record time_record,
-      struct csds_hashmap **current_state, struct index_writer *parts_created,
-      struct index_writer *parts_removed);
-
-  std::string GetIndexName(int count);
+  size_t ReadRecord(void **output, double *time, int *is_particle,
+                    size_t offset);
 
   // TODO protect everything
 
@@ -185,6 +151,49 @@ public:
 
   /* The cache for the evolution of particles. */
   struct csds_cache mCache;
+
+ protected:
+  int ReadParticle(double time,
+                   const std::vector<field_information *> &fields_wanted,
+                   std::vector<CsdsArray> &output, size_t offset_from_index,
+                   size_t output_index, enum part_type type, int *number_jumps);
+  void GenerateIndexFiles(int number_index, int restart);
+  void ReadAllParticlesSingleType(
+      double time, const std::vector<field_enum> &fields_wanted,
+      std::vector<CsdsArray> &output,
+      const std::array<uint64_t, csds_type_count> &n_part, enum part_type type);
+  void ReadParticlesFromIdsSingleType(
+      double time, const std::vector<field_enum> &fields_wanted,
+      std::vector<CsdsArray> &output,
+      std::array<uint64_t, csds_type_count> &n_part, enum part_type type,
+      long long *ids);
+  void UpdateParticlesSingleType(
+      double time, const std::vector<field_enum> &fields_wanted,
+      std::vector<CsdsArray> &output,
+      const std::array<uint64_t, csds_type_count> &n_part, enum part_type type);
+  int UpdateSingleParticle(size_t index, enum part_type type, double time,
+                           size_t offset_time, size_t field_index,
+                           void *output);
+
+  size_t GetInitialState(struct csds_hashmap **current_state,
+                         struct time_record *time_record);
+  void WriteIndex(struct csds_hashmap **current_state,
+                  struct index_writer *parts_created,
+                  struct index_writer *parts_removed,
+                  const struct time_record *time, int file_number);
+  size_t GetLastOffsetBefore(const struct index_data *data,
+                             size_t offset_limit);
+  size_t UpdateStateToNextIndex(size_t init_offset,
+                                struct time_record time_record,
+                                struct csds_hashmap **current_state,
+                                struct index_writer *parts_created,
+                                struct index_writer *parts_removed);
+
+  std::string GetIndexName(int count);
+  size_t CountNumberNewParticles(enum part_type part_type);
+  size_t CountNumberRemovedParticles(enum part_type part_type);
+  size_t GetNextOffsetFromTime(double time);
+  void InitIndex(int number_index, int current_index);
 };
 
 #endif  // CSDS_READER_H
diff --git a/src/csds_reader_generate_index.cpp b/src/csds_reader_generate_index.cpp
index f5e729e1dce501d1fed99b06ed9d6ecc9bdc6cf9..08cc2bace51dbb4bd55145cdc3af54f87db02308 100644
--- a/src/csds_reader_generate_index.cpp
+++ b/src/csds_reader_generate_index.cpp
@@ -18,8 +18,8 @@
  ******************************************************************************/
 
 /* Standard headers */
-#include <iostream>
 #include <fstream>
+#include <iostream>
 
 /* Include corresponding header */
 #include "csds_reader.hpp"
@@ -73,7 +73,7 @@ void index_writer_init(struct index_writer *writer, size_t init_size) {
   writer->data =
       (struct index_data *)malloc(sizeof(struct index_data) * init_size);
   if (writer->data == NULL) {
-    error("Failed to allocate memory for the index_writer.");
+    csds_error("Failed to allocate memory for the index_writer.");
   }
 }
 
@@ -118,7 +118,7 @@ void index_writer_log(struct index_writer *writer, const int64_t id,
 
   /* Ensure that it was correctly allocated */
   if (writer->capacity == 0) {
-    error(
+    csds_error(
         "Trying to add a particle to an empty index_writer."
         " If your are sure that your particle type is implemented, "
         "please increase the value of ReaderOptions:Number*.");
@@ -126,7 +126,7 @@ void index_writer_log(struct index_writer *writer, const int64_t id,
 
 #ifdef CSDS_DEBUG_CHECKS
   if (id < 0) {
-    error("Negative ID for a particle.");
+    csds_error("Negative ID for a particle.");
   }
 #endif
 
@@ -164,28 +164,29 @@ void index_writer_check_implemented(const struct index_writer *writers) {
   if (writers[csds_type_sink].size != 0 ||
       writers[csds_type_black_hole].size != 0 ||
       writers[csds_type_neutrino].size != 0) {
-    error("TODO implement additional particle types");
+    csds_error("TODO implement additional particle types");
   }
 }
 
 /**
  * @brief Write the number of particles and their data into the index file
  */
-void index_writer_write_in_index(const struct index_writer *writers, ofstream &f) {
+void index_writer_write_in_index(const struct index_writer *writers,
+                                 ofstream &f) {
 
   /* Write the number of particles */
   uint64_t N_total[csds_type_count];
   for (int type = 0; type < csds_type_count; type++) {
     N_total[type] = writers[type].size;
   }
-  f.write((char *) N_total, sizeof(uint64_t) * csds_type_count);
+  f.write((char *)N_total, sizeof(uint64_t) * csds_type_count);
 
   /* Write the index data */
   for (int type = 0; type < csds_type_count; type++) {
     if (N_total[type] == 0) continue;
 
     const size_t size = sizeof(struct index_data) * writers[type].size;
-    f.write((const char *) writers[type].data, size);
+    f.write((const char *)writers[type].data, size);
   }
 }
 
@@ -216,31 +217,31 @@ void Reader::WriteIndex(struct csds_hashmap **current_state,
   if (csds_hashmap_count(current_state[csds_type_sink]) != 0 ||
       csds_hashmap_count(current_state[csds_type_black_hole]) != 0 ||
       csds_hashmap_count(current_state[csds_type_neutrino]) != 0)
-    error("Not implemented");
+    csds_error("Not implemented");
   index_writer_check_implemented(parts_created);
   index_writer_check_implemented(parts_removed);
 
   /* Open file */
   std::ofstream f(filename, std::ofstream::out | std::ofstream::binary);
   if (!f.good()) {
-    error("Failed to open the file: " << filename);
+    csds_error("Failed to open the file: " << filename);
   }
 
   /* Write the time */
-  f.write((char *) &time->time, sizeof(double));
-  f.write((char *) &time->int_time, sizeof(integertime_t));
+  f.write((char *)&time->time, sizeof(double));
+  f.write((char *)&time->int_time, sizeof(integertime_t));
 
   /* Write number of particles */
   uint64_t N_total[csds_type_count];
   for (int type = 0; type < csds_type_count; type++) {
     N_total[type] = csds_hashmap_count(current_state[type]);
   }
-  f.write((char *) N_total, sizeof(uint64_t) * csds_type_count);
+  f.write((char *)N_total, sizeof(uint64_t) * csds_type_count);
 
   /* Write if the file is sorted */
   // TODO change it if the array is sorted
   const char sorted = 0;
-  f.write((char *) &sorted, sizeof(char));
+  f.write((char *)&sorted, sizeof(char));
 
   /* Ensure the data to be aligned */
   size_t cur_pos = f.tellp();
@@ -248,7 +249,7 @@ void Reader::WriteIndex(struct csds_hashmap **current_state,
   if (d_align > 0) {
     long int tmp = 0;
     /* Fill the memory with garbage */
-    f.write((const char *) &tmp, d_align);
+    f.write((const char *)&tmp, d_align);
   }
 
   /* Write the current state */
@@ -297,7 +298,7 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
   /* Get the offset after the dump of all the particles
      and the time information of the first time record. */
   if (log->times.size < 2) {
-    error("The time array is not large enough");
+    csds_error("The time array is not large enough");
   }
   const size_t offset_max = log->times.records[1].offset;
   /* Here we cheat a bit by using the 0.
@@ -313,7 +314,7 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
 
   /* Skip the time record */
   mask_type time_mask = 0;
-  csds_mapped_file_read_mask(&log->log, offset_first, &time_mask, NULL);
+  csds_mapped_file_read_mask(log->log, offset_first, &time_mask, NULL);
   const int time_size = header_get_record_size_from_mask(h, time_mask);
   offset_first += time_size + size_record_header;
 
@@ -327,15 +328,16 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
     int data = 0;
 
     enum csds_special_flags flag = csds_particle_read_special_flag(
-        offset, &mask, &prev_offset, &data, &part_type, &log->log);
+        offset, &mask, &prev_offset, &data, &part_type, log->log);
     if (flag != csds_flag_create) {
-      error("Reading a particle from ICs without the created flag.");
+      csds_error("Reading a particle from ICs without the created flag.");
     }
 
     /* TODO Implement missing particle types */
     if (part_type == csds_type_neutrino || part_type == csds_type_sink ||
         part_type == csds_type_black_hole) {
-      error("Particle type not implemented: " << part_type_names[part_type]);
+      csds_error(
+          "Particle type not implemented: " << part_type_names[part_type]);
     }
 
     /* Get the mask for the IDs */
@@ -344,7 +346,7 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
 
     /* Get the particle ID */
     if (!(mask & field_id->field.mask)) {
-      error("The particle ID is missing in the first log");
+      csds_error("The particle ID is missing in the first log");
     }
 
     /* Read the particle ID */
@@ -352,13 +354,13 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
     csds_particle_read_field(offset, &id, field_id,
                              /* derivative */ 0, &mask, &prev_offset,
                              h->fields[part_type], h->number_fields[part_type],
-                             &this->mLog->log);
+                             this->mLog->log);
 
     /* Log the particle */
     struct index_data item = {id, offset};
     void *p = (void *)csds_hashmap_set(current_state[part_type], &item);
     if (p != NULL) {
-      error("Already found a particle with the same ID");
+      csds_error("Already found a particle with the same ID");
     }
 
     /* Increment the offset */
@@ -384,7 +386,7 @@ size_t Reader::GetInitialState(struct csds_hashmap **current_state,
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("took " << GetDeltaTime(init) << "s");
+    message("took " << GetDeltaTime(init) << "ms");
 
   /* Go back to normal */
   CSDS_ADVICE_NORMAL(log->log);
@@ -412,7 +414,7 @@ size_t Reader::GetLastOffsetBefore(const struct index_data *data,
   /* Get the full mask */
   mask_type full_mask = 0;
   size_t h_offset = 0;
-  csds_mapped_file_read_mask(&log->log, current_offset, &full_mask, &h_offset);
+  csds_mapped_file_read_mask(log->log, current_offset, &full_mask, &h_offset);
 
   /* Ensures that a special flag is present in the mask */
   full_mask |= CSDS_SPECIAL_FLAGS_MASK;
@@ -427,7 +429,7 @@ size_t Reader::GetLastOffsetBefore(const struct index_data *data,
     /* Get the mask */
     mask_type mask = 0;
     h_offset = 0;
-    csds_mapped_file_read_mask(&log->log, current_offset, &mask, &h_offset);
+    csds_mapped_file_read_mask(log->log, current_offset, &mask, &h_offset);
 
     /* update the offset */
     current_offset += h_offset;
@@ -438,7 +440,7 @@ size_t Reader::GetLastOffsetBefore(const struct index_data *data,
     /* The particle should not have a special flag
        due to the previous loop */
     if (mask & CSDS_SPECIAL_FLAGS_MASK) {
-      error("Found a special flag when updating the particles");
+      csds_error("Found a special flag when updating the particles");
     }
 
     /* Update the last full offset */
@@ -471,10 +473,11 @@ size_t Reader::GetLastOffsetBefore(const struct index_data *data,
  *
  * @return The starting offset for the update.
  */
-size_t Reader::UpdateStateToNextIndex(
-    size_t init_offset,
-    struct time_record time_record, struct csds_hashmap **current_state,
-    struct index_writer *parts_created, struct index_writer *parts_removed) {
+size_t Reader::UpdateStateToNextIndex(size_t init_offset,
+                                      struct time_record time_record,
+                                      struct csds_hashmap **current_state,
+                                      struct index_writer *parts_created,
+                                      struct index_writer *parts_removed) {
   struct csds_logfile *log = this->mLog;
   const struct header *h = &log->header;
   const int size_record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE;
@@ -512,7 +515,7 @@ size_t Reader::UpdateStateToNextIndex(
     int data = 0;
 
     /* Get the mask */
-    csds_mapped_file_read_mask(&log->log, offset, &mask, &h_offset);
+    csds_mapped_file_read_mask(log->log, offset, &mask, &h_offset);
 
     /* Go to the next record */
     const size_t old_offset = offset;
@@ -526,18 +529,18 @@ size_t Reader::UpdateStateToNextIndex(
 
     /* Get the special flag */
     enum csds_special_flags flag = csds_particle_read_special_flag(
-        old_offset, &mask, &h_offset, &data, &part_type, &log->log);
+        old_offset, &mask, &h_offset, &data, &part_type, log->log);
 
 #ifdef CSDS_DEBUG_CHECKS
     if (flag == csds_flag_none) {
-      error(
+      csds_error(
           "A record should not have a mask "
           "for a flag and the flag set to 0");
     }
 #endif
     /* Check if we have a meaningful particle type */
     if (part_type < 0) {
-      error("Found a special flag without a particle type");
+      csds_error("Found a special flag without a particle type");
     }
 
     /* Get the mask for the IDs */
@@ -549,21 +552,21 @@ size_t Reader::UpdateStateToNextIndex(
     csds_particle_read_field(old_offset, &id, field_id,
                              /* derivative */ 0, &mask, &h_offset,
                              h->fields[part_type], h->number_fields[part_type],
-                             &this->mLog->log);
+                             this->mLog->log);
 
     /* Add the particle to the arrays */
     if (flag == csds_flag_change_type || flag == csds_flag_mpi_exit ||
         flag == csds_flag_delete) {
       index_writer_log(&parts_removed[part_type], id, old_offset);
       if (csds_hashmap_delete(current_state[part_type], id) == NULL) {
-        error("Failed to remove a particle");
+        csds_error("Failed to remove a particle");
       };
     } else if (flag == csds_flag_create || flag == csds_flag_mpi_enter) {
       index_writer_log(&parts_created[part_type], id, old_offset);
       struct index_data item = {id, old_offset};
       void *p = (void *)csds_hashmap_set(current_state[part_type], &item);
       if (p != NULL) {
-        error("Already found a particle with the same ID");
+        csds_error("Already found a particle with the same ID");
       }
     }
   }
@@ -578,8 +581,8 @@ size_t Reader::UpdateStateToNextIndex(
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("Finding new/removed particles took "
-            << GetDeltaTime(init) << "s");
+    message("Finding new/removed particles took " << GetDeltaTime(init)
+                                                  << "ms");
 
   /* Initialize the total number of particles for the progress bar */
   size_t total_number_particles = 0;
@@ -610,8 +613,8 @@ size_t Reader::UpdateStateToNextIndex(
       }
 
       /* Update the offset */
-      index_data->offset = this->GetLastOffsetBefore(
-          index_data, time_record.offset);
+      index_data->offset =
+          this->GetLastOffsetBefore(index_data, time_record.offset);
 
       /* Are we done or should we print something? */
       if (!(this->mVerbose > 0)) continue;
@@ -641,7 +644,7 @@ size_t Reader::UpdateStateToNextIndex(
 
   /* Print the time */
   if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS)
-    message("Updating particles took " << GetDeltaTime(init2) << "s");
+    message("Updating particles took " << GetDeltaTime(init2) << "ms");
 
   return offset;
 }
@@ -668,12 +671,12 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
 
   /* Check that the number of index is meaningful */
   if (number_index < 2) {
-    error("The CSDS requires at least 2 index files.");
+    csds_error("The CSDS requires at least 2 index files.");
   }
 
   /* Ensure that the offset are in the assumed direction */
   if (!header_is_forward(h)) {
-    error("The offset are not in the expected direction");
+    csds_error("The offset are not in the expected direction");
   }
 
   /* Create the different arrays that will store the information */
@@ -705,7 +708,7 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
           csds_hashmap_new(hashmap_overallocation *
                            this->mParams.approximate_number_particles[i]);
       if (current_state[i] == NULL) {
-        error("Failed to initialize the hashmap");
+        csds_error("Failed to initialize the hashmap");
       }
     }
 
@@ -713,8 +716,8 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
     struct time_record time_record;
     offset = this->GetInitialState(current_state, &time_record);
     /* Write the first index file */
-    this->WriteIndex(current_state, parts_created, parts_removed,
-                     &time_record, /* file_number */ 0);
+    this->WriteIndex(current_state, parts_created, parts_removed, &time_record,
+                     /* file_number */ 0);
 
   }
   /* We are restarting => read state from file */
@@ -729,8 +732,7 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
 
     /* Read it */
     csds_index_read_header(&index, filename, this->mVerbose);
-    csds_index_map_file(&index, filename, /* sorted */ 1,
-                        this->mVerbose);
+    csds_index_map_file(&index, filename, /* sorted */ 1, this->mVerbose);
 
     /* Loop over all the particle types */
     for (int i = 0; i < csds_type_count; i++) {
@@ -738,7 +740,7 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
       current_state[i] =
           csds_hashmap_new(hashmap_overallocation * index.nparts[i]);
       if (current_state[i] == NULL) {
-        error("Failed to initialize the hashmap");
+        csds_error("Failed to initialize the hashmap");
       }
 
       /* Copy the index file into the arrays. */
@@ -746,7 +748,7 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
         struct index_data *data = csds_index_get_data(&index, i);
         void *out = (void *)csds_hashmap_set(current_state[i], data + p);
         if (out != NULL) {
-          error("Already found a particle with the same ID");
+          csds_error("Already found a particle with the same ID");
         }
       }
     }
@@ -765,7 +767,8 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
 
       /* Check if we are reading the correct file */
       if (index_time_record.int_time != index.integer_time) {
-        error("The time in the index file and the expected one do not match");
+        csds_error(
+            "The time in the index file and the expected one do not match");
       }
     }
 
@@ -793,13 +796,12 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) {
     }
 
     /* Update the state until the next index file. */
-    offset = this->UpdateStateToNextIndex(
-        offset, time_record, current_state, parts_created,
-        parts_removed);
+    offset = this->UpdateStateToNextIndex(offset, time_record, current_state,
+                                          parts_created, parts_removed);
 
     /* Write the index file */
-    this->WriteIndex(current_state, parts_created, parts_removed,
-                     &time_record, file_number);
+    this->WriteIndex(current_state, parts_created, parts_removed, &time_record,
+                     file_number);
   }
 
   /* Free the memory */
diff --git a/src/csds_time.cpp b/src/csds_time.cpp
index 4f749960ca6d82c291105afd1694818510919257..71f0521cb3e78ac0ed178a4485874f9d616cb37e 100644
--- a/src/csds_time.cpp
+++ b/src/csds_time.cpp
@@ -47,7 +47,7 @@ void time_array_ensure_size(struct time_array *t) {
   /* Allocate the new array */
   struct time_record *tmp =
       (struct time_record *)malloc(sizeof(struct time_record) * t->capacity);
-  if (tmp == NULL) error("Failed to allocate the time records.");
+  if (tmp == NULL) csds_error("Failed to allocate the time records.");
 
   /* Copy the memory */
   memcpy(tmp, t->records, sizeof(struct time_record) * t->size);
@@ -91,11 +91,11 @@ void time_array_append(struct time_array *t, const integertime_t int_time,
  *
  * @return The offset after the time record
  */
-size_t time_read(struct time_record *time_record,
-                 size_t offset, struct csds_logfile *logfile) {
+size_t time_read(struct time_record *time_record, size_t offset,
+                 struct csds_logfile *logfile) {
 
   /* Initialize variables. */
-  struct mapped_file *log = &logfile->log;
+  struct mapped_file &log = logfile->log;
 
   mask_type mask = 0;
   size_t prev_offset = 0;
@@ -106,7 +106,7 @@ size_t time_read(struct time_record *time_record,
   offset = csds_mapped_file_read_mask(log, offset, &mask, &prev_offset);
 
   /* check if reading a time record. */
-  if (CSDS_TIMESTAMP_MASK != mask) error("Not a time record.");
+  if (CSDS_TIMESTAMP_MASK != mask) csds_error("Not a time record.");
 
   /* read the record. */
   offset = csds_mapped_file_read_data(
@@ -128,13 +128,13 @@ size_t time_offset_first_record(const struct header *h) {
 
   /* Initialize a few variables. */
   size_t offset = h->offset_first_record;
-  struct mapped_file *map = &h->log->log;
+  struct mapped_file &map = h->log->log;
 
   mask_type mask = 0;
   csds_mapped_file_read_mask(map, offset, &mask, NULL);
 
   if (mask != CSDS_TIMESTAMP_MASK)
-    error("Log file should begin by timestep.");
+    csds_error("Log file should begin by timestep.");
 
   return h->offset_first_record;
 }
@@ -148,8 +148,7 @@ void time_array_init(struct time_array *t, size_t initial_size) {
   /* Allocate the arrays */
   t->records =
       (struct time_record *)malloc(sizeof(struct time_record) * initial_size);
-  if (t->records == NULL)
-    error("Failed to initialize the time records.");
+  if (t->records == NULL) csds_error("Failed to initialize the time records.");
 
   /* Initialize the sizes */
   t->size = 0;
@@ -164,13 +163,13 @@ void time_array_init(struct time_array *t, size_t initial_size) {
  *
  * @return Does the file exists?
  */
-int time_array_get_filename_savefile(std::string filename,
+int time_array_get_filename_savefile(std::string *filename,
                                      std::string basename) {
   /* Generate the name */
-  filename = basename + "_0000.index";
+  *filename = basename + "_0000.index";
 
   /* Check if the file exists. */
-  std::ifstream file(filename);
+  std::ifstream file(*filename);
   bool ret = file.good();
   file.close();
   return ret;
@@ -184,27 +183,26 @@ int time_array_get_filename_savefile(std::string filename,
  */
 void time_array_save(struct time_array *t, string basename) {
   /* Get the filename of the saved file. */
-  std::string filename = basename + "_0000.index";
-  int savefile = time_array_get_filename_savefile(filename, basename);
+  std::string filename;
+  int savefile = time_array_get_filename_savefile(&filename, basename);
 
   if (!savefile) {
-    error("Cannot save the time array without the first index file.");
+    csds_error("Cannot save the time array without the first index file.");
   }
 
   /* Open the file */
   std::ofstream f(filename, std::ofstream::out | std::ofstream::app |
-                  std::ofstream::binary);
+                                std::ofstream::binary);
   if (!f.good()) {
-    error("Failed to open the file: " << filename);
+    csds_error("Failed to open the file: " << filename);
   }
 
   /* Dump the array */
   uint64_t size = t->size;
   f << size;
-  f.write((char *) t->records, sizeof(struct time_record) * t->size);
+  f.write((char *)t->records, sizeof(struct time_record) * t->size);
 }
 
-
 /**
  * @brief Load the array from a file
  *
@@ -240,11 +238,11 @@ void time_array_load(struct time_array *t, struct csds_index *index) {
  *
  * @return Is the time array restored?
  */
-int time_array_restore(struct time_array *t,
-                       const string basename, int verbose) {
+int time_array_restore(struct time_array *t, const string basename,
+                       int verbose) {
   /* Get the filename of the saved file. */
   std::string filename;
-  int savefile = time_array_get_filename_savefile(filename, basename);
+  int savefile = time_array_get_filename_savefile(&filename, basename);
 
   if (!savefile) return 0;
 
@@ -252,8 +250,7 @@ int time_array_restore(struct time_array *t,
   struct csds_index index;
   csds_index_init(&index);
   csds_index_read_header(&index, filename, verbose);
-  csds_index_map_file(&index, filename, /* sorted */ 1,
-                      verbose);
+  csds_index_map_file(&index, filename, /* sorted */ 1, verbose);
 
   /* Check if the index file contains the time array */
   if (csds_index_contains_time_array(&index)) {
@@ -302,7 +299,7 @@ void time_array_populate(struct time_array *t, struct csds_logfile *log,
     time_array_append(t, time.int_time, time.time, offset);
 
     /* get next record. */
-    int test = tools_get_next_record(&log->header, &log->log, &offset,
+    int test = tools_get_next_record(&log->header, log->log, &offset,
                                      log->log.mmap_size);
     if (test == -1) break;
   }
@@ -346,12 +343,12 @@ double time_array_get_time(const struct time_array *t, const size_t offset) {
 size_t time_array_get_index(const struct time_array *t, const size_t offset) {
 
 #ifdef CSDS_DEBUG_CHECKS
-  if (!t) error("NULL pointer.");
+  if (!t) csds_error("NULL pointer.");
 
   if (offset < t->records[0].offset || offset > t->records[t->size - 1].offset)
-    error("Offset outside of range. In the expected order: "
-          << t->records[t->size - 1].offset << " > "
-          << offset << " > " << t->records[0].offset);
+    csds_error("Offset outside of range. In the expected order: "
+               << t->records[t->size - 1].offset << " > " << offset << " > "
+               << t->records[0].offset);
 #endif
 
   /* right will contain the index at the end of the loop */
@@ -381,7 +378,7 @@ size_t time_array_get_index(const struct time_array *t, const size_t offset) {
 #ifdef CSDS_DEBUG_CHECKS
   if (t->records[right].offset > offset ||
       t->records[right + 1].offset <= offset) {
-    error("Found the wrong element");
+    csds_error("Found the wrong element");
   }
 
 #endif
@@ -401,12 +398,12 @@ size_t time_array_get_index_from_time(const struct time_array *t,
                                       const double time) {
 
 #ifdef CSDS_DEBUG_CHECKS
-  if (!t) error("NULL pointer.");
+  if (!t) csds_error("NULL pointer.");
 
   if (time < t->records[0].time || time > t->records[t->size - 1].time)
-    error("Time outside of range. In the expected order: " <<
-          t->records[0].time << " < " << time <<
-          " < " << t->records[t->size - 1].time);
+    csds_error("Time outside of range. In the expected order: "
+               << t->records[0].time << " < " << time << " < "
+               << t->records[t->size - 1].time);
 #endif
 
   /* right will contain the index at the end of the loop */
@@ -434,7 +431,7 @@ size_t time_array_get_index_from_time(const struct time_array *t,
 
 #ifdef CSDS_DEBUG_CHECKS
   if (t->records[right].time > time || t->records[right + 1].time <= time) {
-    error("Found the wrong element");
+    csds_error("Found the wrong element");
   }
 
 #endif
diff --git a/src/csds_tools.cpp b/src/csds_tools.cpp
index 8979fee3e5f967f3551d9c446d4d860d719dd656..a7c1de69330a0f1cf4ff2c87aab0b838d7b84692 100644
--- a/src/csds_tools.cpp
+++ b/src/csds_tools.cpp
@@ -17,8 +17,8 @@
  *
  ******************************************************************************/
 /* Standard headers */
-#include <stdio.h>
 #include <iomanip>
+#include <stdio.h>
 
 /* This file's header */
 #include "csds_tools.hpp"
@@ -39,13 +39,13 @@
  *
  * @return -1 if no next record, otherwise 0
  */
-int tools_get_next_record(const struct header *h, struct mapped_file *map,
+int tools_get_next_record(const struct header *h, struct mapped_file &map,
                           size_t *offset, size_t file_size) {
   if (header_is_forward(h)) return _tools_get_next_record_forward(map, offset);
   if (header_is_backward(h))
     return _tools_get_next_record_backward(h, map, offset, file_size);
   else
-    error("Offsets are corrupted.");
+    csds_error("Offsets are corrupted.");
 }
 
 /**
@@ -57,7 +57,7 @@ int tools_get_next_record(const struct header *h, struct mapped_file *map,
  *
  * @return error code, -1 if no next record
  */
-int _tools_get_next_record_forward(struct mapped_file *map, size_t *offset) {
+int _tools_get_next_record_forward(struct mapped_file &map, size_t *offset) {
   size_t diff_offset = 0;
 
   /* Read the offset. */
@@ -82,10 +82,10 @@ int _tools_get_next_record_forward(struct mapped_file *map, size_t *offset) {
  * @return error code, -1 if no next record
  */
 int _tools_get_next_record_backward(const struct header *h,
-                                    struct mapped_file *map, size_t *offset,
+                                    struct mapped_file &map, size_t *offset,
                                     size_t file_size) {
 #ifndef CSDS_DEBUG_CHECKS
-  error("Should not be used, method too slow");
+  csds_error("Should not be used, method too slow");
 #endif
   size_t current_offset = *offset;
   size_t record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE;
@@ -117,7 +117,7 @@ int _tools_get_next_record_backward(const struct header *h,
  *
  * @return position after the record.
  */
-size_t tools_reverse_offset(const struct header *h, struct mapped_file *map,
+size_t tools_reverse_offset(const struct header *h, struct mapped_file &map,
                             size_t offset) {
   mask_type mask = 0;
   size_t prev_offset = 0;
@@ -138,8 +138,8 @@ size_t tools_reverse_offset(const struct header *h, struct mapped_file *map,
   /* first records do not have a previous partner. */
   if (prev_offset == cur_offset) return after_current_record;
   if (prev_offset > cur_offset)
-    error("Unexpected offset: header " << prev_offset
-           << ", current " << cur_offset);
+    csds_error("Unexpected offset: header " << prev_offset << ", current "
+                                            << cur_offset);
 
   /* modify previous offset. */
   offset = cur_offset - prev_offset + CSDS_MASK_SIZE;
@@ -154,7 +154,7 @@ size_t tools_reverse_offset(const struct header *h, struct mapped_file *map,
   /* Check if we are not mixing timestamp and particles */
   if ((prev_mask != CSDS_TIMESTAMP_MASK && mask == CSDS_TIMESTAMP_MASK) ||
       (prev_mask == CSDS_TIMESTAMP_MASK && mask != CSDS_TIMESTAMP_MASK))
-    error("Unexpected mask: "<< mask << " got " << prev_mask);
+    csds_error("Unexpected mask: " << mask << " got " << prev_mask);
 
 #endif  // CSDS_DEBUG_CHECKS
 
@@ -172,14 +172,13 @@ size_t tools_reverse_offset(const struct header *h, struct mapped_file *map,
  *
  * @return position after the record.
  */
-size_t tools_check_record_consistency(struct csds_logfile *log,
-                                      size_t offset) {
+size_t tools_check_record_consistency(struct csds_logfile *log, size_t offset) {
 #ifndef CSDS_DEBUG_CHECKS
-  error("Should not check in non debug mode.");
+  csds_error("Should not check in non debug mode.");
 #endif
 
   const struct header *h = &log->header;
-  struct mapped_file *map = &log->log;
+  struct mapped_file &map = log->log;
   const size_t init_offset = offset;
 
   mask_type mask;
@@ -202,10 +201,10 @@ size_t tools_check_record_consistency(struct csds_logfile *log,
     pointed_offset += init_offset;
   else if (header_is_backward(h)) {
     if (init_offset < pointed_offset)
-      error("Offset too large for mask: " << mask);
+      csds_error("Offset too large for mask: " << mask);
     pointed_offset = init_offset - pointed_offset;
   } else {
-    error("Offset are corrupted.");
+    csds_error("Offset are corrupted.");
   }
 
   if (pointed_offset == init_offset || pointed_offset == 0) return offset_ret;
@@ -217,7 +216,7 @@ size_t tools_check_record_consistency(struct csds_logfile *log,
   /* check if not mixing timestamp and particles. */
   if ((pointed_mask != CSDS_TIMESTAMP_MASK && mask == CSDS_TIMESTAMP_MASK) ||
       (pointed_mask == CSDS_TIMESTAMP_MASK && mask != CSDS_TIMESTAMP_MASK))
-    error("Error in the offset for mask: " << mask);
+    csds_error("Error in the offset for mask: " << mask);
 
   return offset_ret;
 }
@@ -231,12 +230,12 @@ using namespace std::chrono;
  * @param tic The initial time
  * @param message The message to display before the progress message.
  */
-void tools_print_progress(
-    float percent, const high_resolution_clock::time_point init,
-    const std::string message) {
+void tools_print_progress(float percent,
+                          const high_resolution_clock::time_point init,
+                          const std::string message) {
 
   /* Get the remaining time */
-  const int delta_time = GetDeltaTime(init);
+  const float delta_time = GetDeltaTime(init) / 1000.0;
   int remaining_time = delta_time * (100. - percent) / percent;
 
   /* Compute the time */
@@ -251,12 +250,10 @@ void tools_print_progress(
   std::cout << "% done, Remaining time: ";
 
   /* Write the hour */
-  if (hour != 0)
-    std::cout << hour << "h ";
+  if (hour != 0) std::cout << hour << "h ";
 
   /* Write the minutes */
-  if (min != 0)
-    std::cout << min << "min ";
+  if (min != 0) std::cout << min << "min ";
 
   /* Write the seconds */
   std::cout << sec << "s";
diff --git a/src/csds_tools.hpp b/src/csds_tools.hpp
index e005219a0c8c602dee25fc1374a5b96d8ce8ba22..1b946fc33a2c15c10ddcb6f7089ab7b4649e9d5d 100644
--- a/src/csds_tools.hpp
+++ b/src/csds_tools.hpp
@@ -51,19 +51,18 @@ struct csds_reader_field {
   void *second_deriv;
 };
 
-int tools_get_next_record(const struct header *h, struct mapped_file *map,
+int tools_get_next_record(const struct header *h, struct mapped_file &map,
                           size_t *offset, size_t file_size);
 int _tools_get_next_record_backward(const struct header *h,
-                                    struct mapped_file *map, size_t *offset,
+                                    struct mapped_file &map, size_t *offset,
                                     size_t file_size);
-int _tools_get_next_record_forward(struct mapped_file *map, size_t *offset);
-size_t tools_reverse_offset(const struct header *h, struct mapped_file *map,
+int _tools_get_next_record_forward(struct mapped_file &map, size_t *offset);
+size_t tools_reverse_offset(const struct header *h, struct mapped_file &map,
                             size_t offset);
-size_t tools_check_record_consistency(struct csds_logfile *log,
-                                      size_t offset);
+size_t tools_check_record_consistency(struct csds_logfile *log, size_t offset);
 
-void tools_print_progress(float percentage,
-                          const std::chrono::high_resolution_clock::time_point init,
-                          const std::string message);
+void tools_print_progress(
+    float percentage, const std::chrono::high_resolution_clock::time_point init,
+    const std::string message);
 
 #endif  // CSDS_TOOLS_H
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 17100ee4d0691217ba61c3e16e554957828d3b6e..ddc12046c8da74f2dcde7eefabbea9eb243e4e2c 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -23,11 +23,17 @@ AM_LDFLAGS = ../src/.libs/libcsds.a ../src/.libs/libcsds_writer.a $(PYTHON_LIBS)
 
 # List of programs and scripts to run in the test suite
 TESTS = testLogfileHeader testLogfileReader testTimeArray testQuickSort testVirtualReality
-TESTS += testHashmap testInterpolation testParser testWriter
+TESTS += testHashmap testInterpolation testParser
+if !HAVE_SANITIZER
+TESTS += testWriter
+endif
 
 # List of test programs to compile
 check_PROGRAMS = testLogfileHeader testLogfileReader testTimeArray testQuickSort testVirtualReality
-check_PROGRAMS += testHashmap testInterpolation testParser testWriter
+check_PROGRAMS += testHashmap testInterpolation testParser
+if !HAVE_SANITIZER
+check_PROGRAMS += testWriter
+endif
 
 # Rebuild tests when CSDS is updated.
 $(check_PROGRAMS): ../src/.libs/libcsds.a ../src/.libs/libcsds_writer.a
@@ -41,7 +47,9 @@ testVirtualReality_SOURCES = testVirtualReality.cpp
 testHashmap_SOURCES = testHashmap.cpp
 testInterpolation_SOURCES = testInterpolation.cpp
 testParser_SOURCES = testParser.cpp
+if !HAVE_SANITIZER
 testWriter_SOURCES = testWriter.c
+endif
 
 # Files necessary for distribution
 EXTRA_DIST = 
diff --git a/tests/generate_log.hpp b/tests/generate_log.hpp
index 7f47f03fdaf478e0803d5b4389a0ff9682b5787b..1613ccee7f2b44dc9549f3d513219422dae9090d 100644
--- a/tests/generate_log.hpp
+++ b/tests/generate_log.hpp
@@ -242,7 +242,8 @@ void write_particles(struct csds_logfile_writer *log, struct csds_part *parts,
  * @param nparts The number of particles.
  * @param filename The filename of the logfile.
  */
-void generate_log(struct csds_part *parts, size_t nparts, std::string filename) {
+void generate_log(struct csds_part *parts, size_t nparts,
+                  std::string filename) {
 
 #ifdef HAVE_PYTHON
   message(
diff --git a/tests/testHashmap.cpp b/tests/testHashmap.cpp
index 774ee455aaf6432465d992a0742918e041f6a383..c0a08ce320dac7a13f5fd837113f37eda87b9827 100644
--- a/tests/testHashmap.cpp
+++ b/tests/testHashmap.cpp
@@ -42,7 +42,7 @@ static void all(void) {
   struct index_data *vals;
   vals = (struct index_data *)malloc(N * sizeof(struct index_data));
   if (vals == NULL) {
-    error("Failed to allocate the index array");
+    csds_error("Failed to allocate the index array");
   }
   for (int i = 0; i < N; i++) {
     vals[i].id = i;
@@ -52,7 +52,7 @@ static void all(void) {
   struct csds_hashmap *map;
 
   map = csds_hashmap_new(0);
-  if (map == NULL) error("Failed to allocate the hashmap");
+  if (map == NULL) csds_error("Failed to allocate the hashmap");
 
   shuffle(vals, N);
 
@@ -81,19 +81,20 @@ static void all(void) {
   /* Open file */
   std::ofstream f(filename, std::ofstream::out | std::ofstream::binary);
   if (!f.good()) {
-    error("Failed to open the file: " << filename);
+    csds_error("Failed to open the file: " << filename);
   }
   csds_hashmap_write(map, f);
 
   /* Read the particles from the file */
-  std::ifstream input = std::ifstream(filename, std::ofstream::in | std::ofstream::binary);
+  std::ifstream input =
+      std::ifstream(filename, std::ofstream::in | std::ofstream::binary);
   if (!f.good()) {
-    error("Failed to open the file: " << filename);
+    csds_error("Failed to open the file: " << filename);
   }
   struct index_data *test =
       (struct index_data *)malloc(N * sizeof(struct index_data));
-  if (test == NULL) error("Failed to allocate array");
-  input.read((char *) test, sizeof(struct index_data) * N);
+  if (test == NULL) csds_error("Failed to allocate array");
+  input.read((char *)test, sizeof(struct index_data) * N);
 
   /* Test the i/o */
   for (size_t i = 0; i < N; i++) {
@@ -142,7 +143,7 @@ static void benchmarks(void) {
   struct csds_hashmap *map;
   map = csds_hashmap_new(0);
   if (map == NULL) {
-    error("Failed to initialize the hashmap");
+    csds_error("Failed to initialize the hashmap");
   }
 
   shuffle(vals, N);
diff --git a/tests/testInterpolation.cpp b/tests/testInterpolation.cpp
index 4905eb412401d7938dde4eb3375d9dc276833e31..ebd7f8148bb0c976b52bb53d9b296215fb35bb36 100644
--- a/tests/testInterpolation.cpp
+++ b/tests/testInterpolation.cpp
@@ -107,8 +107,8 @@ int main(void) {
     const double dt = 0.5 * (t_end - t_beg) / i;
     const double t1 = t_eval - dt;
     const double t2 = t_eval + dt;
-    message("Interpolating at t=" << t_eval << " from t=["
-            << t1 << ", " << t2 << "]");
+    message("Interpolating at t=" << t_eval << " from t=[" << t1 << ", " << t2
+                                  << "]");
 
     /* Set the state at t1 and t2 */
     double p1[3];
diff --git a/tests/testLogfileHeader.cpp b/tests/testLogfileHeader.cpp
index 330bc4ff38d7d35facff0f865e662a96d796f2e3..b620f40149f8f3be9333117e1dd4d0218baf3835 100644
--- a/tests/testLogfileHeader.cpp
+++ b/tests/testLogfileHeader.cpp
@@ -77,5 +77,8 @@ int main(void) {
   message("Checking offset direction");
   assert(h->offset_direction == csds_offset_backward);
 
+  /* Cleanup */
+  csds_logfile_free(&logfile);
+
   return 0;
 }
diff --git a/tests/testLogfileReader.cpp b/tests/testLogfileReader.cpp
index 778d69da4687f367caec829a69b2252c3a39b1bf..21d27fee16d5a7c59a36e3f11f12da3f041eeb2a 100644
--- a/tests/testLogfileReader.cpp
+++ b/tests/testLogfileReader.cpp
@@ -78,12 +78,10 @@ void check_data(Reader &reader, struct csds_part *parts) {
       header_get_field_from_name(h, "Coordinates", csds_type_gas);
 
   /* Loop over each record. */
-  for (size_t offset =
-           reader.ReadRecord(output, &time, &is_particle,
-                             logfile->header.offset_first_record);
+  for (size_t offset = reader.ReadRecord(output, &time, &is_particle,
+                                         logfile->header.offset_first_record);
        offset < logfile->log.mmap_size;
-       offset = reader.ReadRecord(output, &time, &is_particle,
-                                  offset)) {
+       offset = reader.ReadRecord(output, &time, &is_particle, offset)) {
 
     /* Do the particle case */
     if (is_particle) {
@@ -95,12 +93,12 @@ void check_data(Reader &reader, struct csds_part *parts) {
       */
       const uint64_t current_id = *(uint64_t *)output[field_id->field.position];
       if (previous_id != id_flag && previous_id >= current_id) {
-        error("Wrong particle found");
+        csds_error("Wrong particle found");
         previous_id = current_id;
       }
 
       /* Get the corresponding particle */
-      if (current_id >= number_parts) error("Wrong id: " << current_id);
+      if (current_id >= number_parts) csds_error("Wrong id: " << current_id);
 
       struct csds_part *p = &parts[current_id];
       const double *pos = (double *)output[field_pos->field.position];
@@ -126,10 +124,11 @@ void check_data(Reader &reader, struct csds_part *parts) {
       message("Step: " << step);
       /* Check if we have the current amount of particles in previous step. */
       if (step != 0 && count != get_number_active_particles(step, parts))
-        error(
+        csds_error(
             "The reader did not find the correct number of particles during "
-            "step " << step << ": " << count << " != "
-            << get_number_active_particles(step, parts));
+            "step "
+            << step << ": " << count
+            << " != " << get_number_active_particles(step, parts));
 
       /* Avoid the initial log */
       if (init_log_all_done > 0) {
@@ -177,7 +176,7 @@ int main(void) {
   struct csds_part *parts;
   if ((parts = (struct csds_part *)malloc(sizeof(struct csds_part) *
                                           number_parts)) == NULL)
-    error("Failed to allocate particles array.");
+    csds_error("Failed to allocate particles array.");
 
   /* Write a 'simulation' */
   generate_log(parts, number_parts, filename);
diff --git a/tests/testQuickSort.cpp b/tests/testQuickSort.cpp
index 347241dc0477ac47c878f421839bef898b7c6c1e..1fa2b6585c1d76d74bcae58cb19c7a312a27c5a3 100644
--- a/tests/testQuickSort.cpp
+++ b/tests/testQuickSort.cpp
@@ -51,7 +51,7 @@ void init_array(struct index_data *data) {
 void check_sort(struct index_data *data) {
   for (size_t i = 1; i < N; i++) {
     if (data[i].id < data[i - 1].id) {
-      error("The array is not sorted");
+      csds_error("The array is not sorted");
     }
   }
 }
@@ -72,7 +72,7 @@ int main(void) {
       (struct index_data *)malloc(N * sizeof(struct index_data));
 
   if (data == NULL) {
-    error("Failed to allocate the memory");
+    csds_error("Failed to allocate the memory");
   }
 
   /* Initialize the array */
diff --git a/tests/testTimeArray.cpp b/tests/testTimeArray.cpp
index 88d64e31212be4b8d2fc3598e10ecc132d0249a7..b06bbfcd49874bf11ac10b5b2815978f001776fb 100644
--- a/tests/testTimeArray.cpp
+++ b/tests/testTimeArray.cpp
@@ -31,7 +31,7 @@ int main(void) {
 
   /* Check that we are really testing the reallocation */
   if (NUMBER_OF_ELEMENT < CSDS_TIME_INIT_SIZE) {
-    error("Not testing the reallocation.");
+    csds_error("Not testing the reallocation.");
   }
 
   /* Fix the random seed in order to reproduce the results */
diff --git a/tests/testVirtualReality.cpp b/tests/testVirtualReality.cpp
index 1d31d73ee9f44ce85fe574b79de2ee36982f10a0..706b959dd883c78b829af4cd47c2918769d0c866 100644
--- a/tests/testVirtualReality.cpp
+++ b/tests/testVirtualReality.cpp
@@ -51,7 +51,7 @@ int main(void) {
   struct csds_part *parts;
   if ((parts = (struct csds_part *)malloc(sizeof(struct csds_part) *
                                           number_parts)) == NULL)
-    error("Failed to allocate particles array.");
+    csds_error("Failed to allocate particles array.");
 
   /* Write a 'simulation' */
   generate_log(parts, number_parts, filename);
@@ -73,15 +73,12 @@ int main(void) {
 
   /* Create the variables for the number of particles */
   const int n_type = csds_type_count;
-  uint64_t *n_parts = (uint64_t *)malloc(n_type * sizeof(uint64_t));
-  int *read_types = (int *)malloc(n_type * sizeof(int));
-  if (read_types == NULL || n_parts == NULL) {
-    error("Failed to allocate arrays.");
-  }
+  std::array<uint64_t, csds_type_count> n_parts;
+  std::array<bool, csds_type_count> read_types;
 
   /* Set the flags in order to read everything */
   for (int i = 0; i < n_type; i++) {
-    read_types[i] = 1;
+    read_types[i] = true;
   }
 
   /* Get the number of particles */
@@ -92,48 +89,37 @@ int main(void) {
     n_tot += n_parts[i];
   }
 
-  /* Allocate the particles memory */
-  double *pos = (double *)malloc(n_tot * 3 * sizeof(double));
-  long long *ids = (long long *)malloc(n_tot * sizeof(long long));
-
   /* Create the list of fields. */
   const int n_fields = 2;
-  enum field_enum *required_fields =
-      (enum field_enum *)malloc(n_fields * sizeof(enum field_enum));
+  std::vector<field_enum> required_fields(n_fields);
   required_fields[0] = field_enum_coordinates;
   required_fields[1] = field_enum_particles_ids;
 
   /* Create the output */
-  void **output = (void **)malloc(n_fields * sizeof(void *));
-  output[0] = (void *)pos;
-  output[1] = (void *)ids;
+  std::vector<CsdsArray> output;
+  output.emplace_back(n_tot, field_enum_coordinates);
+  output.emplace_back(n_tot, field_enum_particles_ids);
 
   /* Set the time of the next reading */
   reader.SetTime(begin);
 
   /* Read the next time */
-  reader.ReadAllParticles(begin, required_fields, n_fields,
-                          output, n_parts);
+  reader.ReadAllParticles(begin, required_fields, output, n_parts);
 
   /* Loop over time for a single particle */
   int part_ind = 0;
   for (double t = begin; t < end; t += (end - begin) / number_steps) {
 
-    reader.UpdateParticles(t, required_fields, n_fields,
-                           output, n_parts);
+    reader.UpdateParticles(t, required_fields, output, n_parts);
+
+    long long *ids = (long long *)output[0][part_ind];
+    double *pos = (double *)output[1][part_ind];
 
-    message("Particle " << ids[part_ind] << ": " << pos[3 * part_ind + 0]
-            << " " << pos[3 * part_ind + 1] << ": " << pos[3 * part_ind + 2]
-            << " " << t);
+    message("Particle " << *ids << ": " << pos[0] << " " << pos[1] << ": "
+                        << pos[2] << " " << t);
   }
 
   /* Cleanup the memory */
-  free(required_fields);
-  free(ids);
-  free(pos);
   free(parts);
-  free(n_parts);
-  free(read_types);
-  free(output);
   return 0;
 }