diff --git a/examples/ExternalPointMass/makeIC.py b/examples/ExternalPointMass/makeIC.py
index b1b032e47383d6e757306f09063a8931635ea8e9..37fc46a9243b2a4c42029de4587082f9efb11f43 100644
--- a/examples/ExternalPointMass/makeIC.py
+++ b/examples/ExternalPointMass/makeIC.py
@@ -89,6 +89,14 @@ grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp = file.create_group("/RuntimePars")
 grp.attrs["PeriodicBoundariesOn"] = periodic
 
+#Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = 3.0856776e21
+grp.attrs["Unit mass in cgs (U_M)"] = 1.9885e33
+grp.attrs["Unit time in cgs (U_t)"] = 3.0856776e16
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
 #Particle group
 #grp0 = file.create_group("/PartType0")
 grp1 = file.create_group("/PartType1")
diff --git a/examples/UniformBox/uniformBox.yml b/examples/UniformBox/uniformBox.yml
index 50afbee02ddc8a801ca77ed4900b7d2e0e3b50b5..069a17fb549775acf918da88221f7b3ed7e80565 100644
--- a/examples/UniformBox/uniformBox.yml
+++ b/examples/UniformBox/uniformBox.yml
@@ -38,3 +38,11 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./uniformBox.hdf5     # The file to read
+
+
+  # External potential parameters
+PointMass:
+  position_x:      50.     # location of external point mass in internal units
+  position_y:      50.
+  position_z:      50.	
+  mass:            1e10     # mass of external point mass in internal units
diff --git a/examples/main.c b/examples/main.c
index b7397dfc7a195f7ea1ac5630ab4f0b1ddc8029b1..a592c974feda86fd7c6537017a7b31480f4a1bc2 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -291,7 +291,7 @@ int main(int argc, char *argv[]) {
   struct swift_params *params = malloc(sizeof(struct swift_params));
   if (params == NULL) error("Error allocating memory for the parameter file.");
   if (myrank == 0) {
-    message("Reading parameters from file '%s'", paramFileName);
+    message("Reading runtime parameters from file '%s'", paramFileName);
     parser_read_file(paramFileName, params);
     // parser_print_params(&params);
     parser_write_params_to_file(params, "used_parameters.yml");
@@ -324,11 +324,11 @@ int main(int argc, char *argv[]) {
   units_init(&us, params, "InternalUnitSystem");
   phys_const_init(&us, &prog_const);
   if (myrank == 0 && verbose > 0) {
-    message("Unit system: U_M = %e g.", us.UnitMass_in_cgs);
-    message("Unit system: U_L = %e cm.", us.UnitLength_in_cgs);
-    message("Unit system: U_t = %e s.", us.UnitTime_in_cgs);
-    message("Unit system: U_I = %e A.", us.UnitCurrent_in_cgs);
-    message("Unit system: U_T = %e K.", us.UnitTemperature_in_cgs);
+    message("Internal unit system: U_M = %e g.", us.UnitMass_in_cgs);
+    message("Internal unit system: U_L = %e cm.", us.UnitLength_in_cgs);
+    message("Internal unit system: U_t = %e s.", us.UnitTime_in_cgs);
+    message("Internal unit system: U_I = %e A.", us.UnitCurrent_in_cgs);
+    message("Internal unit system: U_T = %e K.", us.UnitTemperature_in_cgs);
     phys_const_print(&prog_const);
   }
 
@@ -354,17 +354,17 @@ int main(int argc, char *argv[]) {
   if (myrank == 0) clocks_gettime(&tic);
 #if defined(WITH_MPI)
 #if defined(HAVE_PARALLEL_HDF5)
-  read_ic_parallel(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
-                   &flag_entropy_ICs, myrank, nr_nodes, MPI_COMM_WORLD,
-                   MPI_INFO_NULL, dry_run);
+  read_ic_parallel(ICfileName, &us, dim, &parts, &gparts, &Ngas, &Ngpart,
+                   &periodic, &flag_entropy_ICs, myrank, nr_nodes,
+                   MPI_COMM_WORLD, MPI_INFO_NULL, dry_run);
 #else
-  read_ic_serial(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
+  read_ic_serial(ICfileName, &us, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
                  &flag_entropy_ICs, myrank, nr_nodes, MPI_COMM_WORLD,
                  MPI_INFO_NULL, dry_run);
 #endif
 #else
-  read_ic_single(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
-                 &flag_entropy_ICs, dry_run);
+  read_ic_single(ICfileName, &us, dim, &parts, &gparts, &Ngas, &Ngpart,
+                 &periodic, &flag_entropy_ICs, dry_run);
 #endif
   if (myrank == 0) {
     clocks_gettime(&toc);
@@ -452,7 +452,7 @@ int main(int argc, char *argv[]) {
   if (myrank == 0) clocks_gettime(&tic);
   struct engine e;
   engine_init(&e, &s, params, nr_nodes, myrank, nr_threads, with_aff,
-              engine_policies, talking, &prog_const, &hydro_properties,
+              engine_policies, talking, &us, &prog_const, &hydro_properties,
               &potential);
   if (myrank == 0) {
     clocks_gettime(&toc);
diff --git a/src/Makefile.am b/src/Makefile.am
index a95151cfd6ca83c48c585996a231c212dc34d90c..c8554ce61940650898cfb21e282e0a6d174c79f1 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -54,7 +54,7 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
 # Include files for distribution, not installation.
 nobase_noinst_HEADERS = approx_math.h atomic.h cycle.h error.h inline.h kernel_hydro.h kernel_gravity.h \
 		 vector.h runner_doiact.h runner_doiact_grav.h units.h intrinsics.h minmax.h kick.h \
-		 timestep.h drift.h adiabatic_index.h \
+		 timestep.h drift.h adiabatic_index.h io_properties.h \
 		 gravity.h gravity_io.h \
 		 gravity/Default/gravity.h gravity/Default/gravity_iact.h gravity/Default/gravity_io.h \
 		 gravity/Default/gravity_debug.h gravity/Default/gravity_part.h  \
diff --git a/src/common_io.c b/src/common_io.c
index 971fe6b01c682c489d3444ec1d47d6a902250bb8..7108ff8d9209011cba5e131220d8e33fb1de00cf 100644
--- a/src/common_io.c
+++ b/src/common_io.c
@@ -58,6 +58,7 @@ const char* particle_type_names[NUM_PARTICLE_TYPES] = {
  *way.
  */
 hid_t hdf5Type(enum DATA_TYPE type) {
+
   switch (type) {
     case INT:
       return H5T_NATIVE_INT;
@@ -87,6 +88,7 @@ hid_t hdf5Type(enum DATA_TYPE type) {
  * @brief Returns the memory size of the data type
  */
 size_t sizeOfType(enum DATA_TYPE type) {
+
   switch (type) {
     case INT:
       return sizeof(int);
@@ -112,6 +114,24 @@ size_t sizeOfType(enum DATA_TYPE type) {
   }
 }
 
+/**
+ * @brief Return 1 if the type has double precision
+ *
+ * Returns an error if the type is not FLOAT or DOUBLE
+ */
+int isDoublePrecision(enum DATA_TYPE type) {
+  
+  switch (type) {
+    case FLOAT:
+      return 0;
+    case DOUBLE:
+      return 1;
+    default:
+      error("Invalid type");
+      return 0;
+  }
+}
+
 /**
  * @brief Reads an attribute from a given HDF5 group.
  *
@@ -273,15 +293,55 @@ void writeAttribute_s(hid_t grp, const char* name, const char* str) {
   writeStringAttribute(grp, name, str, strlen(str));
 }
 
+/**
+ * @brief Reads the Unit System from an IC file.
+ * @param h_file The (opened) HDF5 file from which to read.
+ * @param us The UnitSystem to fill.
+ *
+ * If the 'Units' group does not exist in the ICs, cgs units will be assumed
+ */
+void readUnitSystem(hid_t h_file, struct UnitSystem* us) {
+
+  hid_t h_grp = H5Gopen(h_file, "/Units", H5P_DEFAULT);
+
+  if (h_grp < 0) {
+    message("'Units' group not found in ICs. Assuming CGS unit system.");
+
+    /* Default to CGS */
+    us->UnitMass_in_cgs = 1.;
+    us->UnitLength_in_cgs = 1.;
+    us->UnitTime_in_cgs = 1.;
+    us->UnitCurrent_in_cgs = 1.;
+    us->UnitTemperature_in_cgs = 1.;
+
+    return;
+  }
+
+  /* Ok, Read the damn thing */
+  readAttribute(h_grp, "Unit length in cgs (U_L)", DOUBLE,
+                &us->UnitLength_in_cgs);
+  readAttribute(h_grp, "Unit mass in cgs (U_M)", DOUBLE, &us->UnitMass_in_cgs);
+  readAttribute(h_grp, "Unit time in cgs (U_t)", DOUBLE, &us->UnitTime_in_cgs);
+  readAttribute(h_grp, "Unit current in cgs (U_I)", DOUBLE,
+                &us->UnitCurrent_in_cgs);
+  readAttribute(h_grp, "Unit temperature in cgs (U_T)", DOUBLE,
+                &us->UnitTemperature_in_cgs);
+
+  /* Clean up */
+  H5Gclose(h_grp);
+}
+
 /**
  * @brief Writes the current Unit System
  * @param h_file The (opened) HDF5 file in which to write
- * @param us The UnitSystem used in the run
+ * @param us The UnitSystem to dump
+ * @param groupName The name of the HDF5 group to write to
  */
-void writeUnitSystem(hid_t h_file, struct UnitSystem* us) {
-  hid_t h_grpunit = 0;
+void writeUnitSystem(hid_t h_file, const struct UnitSystem* us,
+                     const char* groupName) {
 
-  h_grpunit = H5Gcreate1(h_file, "/Units", 0);
+  hid_t h_grpunit = 0;
+  h_grpunit = H5Gcreate1(h_file, groupName, 0);
   if (h_grpunit < 0) error("Error while creating Unit System group");
 
   writeAttribute_d(h_grpunit, "Unit mass in cgs (U_M)",
@@ -484,8 +544,9 @@ void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype) {
  *
  * @todo Treat the types in a better way.
  */
-void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName,
-                  char* name, size_t N, int dim, enum DATA_TYPE type) {
+void writeXMFline(FILE* xmfFile, const char* fileName,
+                  const char* partTypeGroupName, const char* name, size_t N,
+                  int dim, enum DATA_TYPE type) {
   fprintf(xmfFile,
           "<Attribute Name=\"%s\" AttributeType=\"%s\" Center=\"Node\">\n",
           name, dim == 1 ? "Scalar" : "Vector");
diff --git a/src/common_io.h b/src/common_io.h
index fa6811a26671a2ed85c12ada7bb380094f55795d..7aedee0f2624dcff916a8398e244009a87109915 100644
--- a/src/common_io.h
+++ b/src/common_io.h
@@ -45,13 +45,6 @@ enum DATA_TYPE {
   CHAR
 };
 
-/**
- * @brief The two sorts of data present in the GADGET IC files: compulsory to
- *start a run or optional.
- *
- */
-enum DATA_IMPORTANCE { COMPULSORY = 1, OPTIONAL = 0 };
-
 /**
  * @brief The different particle types present in a GADGET IC file
  *
@@ -69,10 +62,12 @@ enum PARTICLE_TYPE {
 extern const char* particle_type_names[];
 
 #define FILENAME_BUFFER_SIZE 150
-#define PARTICLE_GROUP_BUFFER_SIZE 20
+#define FIELD_BUFFER_SIZE 200
+#define PARTICLE_GROUP_BUFFER_SIZE 50
 
 hid_t hdf5Type(enum DATA_TYPE type);
 size_t sizeOfType(enum DATA_TYPE type);
+int isDoublePrecision(enum DATA_TYPE type);
 
 void collect_dm_gparts(const struct gpart* const gparts, size_t Ntot,
                        struct gpart* const dmparts, size_t Ndm);
@@ -99,11 +94,15 @@ void writeXMFoutputfooter(FILE* xmfFile, int outputCount, float time);
 void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N,
                          enum PARTICLE_TYPE ptype);
 void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype);
-void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName,
-                  char* name, size_t N, int dim, enum DATA_TYPE type);
+void writeXMFline(FILE* xmfFile, const char* fileName,
+                  const char* partTypeGroupName, const char* name, size_t N,
+                  int dim, enum DATA_TYPE type);
 
 void writeCodeDescription(hid_t h_file);
-void writeUnitSystem(hid_t h_file, struct UnitSystem* us);
+
+void readUnitSystem(hid_t h_file, struct UnitSystem* us);
+void writeUnitSystem(hid_t h_grp, const struct UnitSystem* us,
+                     const char* groupName);
 
 #endif /* defined HDF5 */
 
diff --git a/src/engine.c b/src/engine.c
index 2d7903f147f3948dd5590ad11811bce91c6d073a..96d72669ca6eb4282e3baa4276fab2e7987bd97a 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -2677,14 +2677,17 @@ void engine_dump_snapshot(struct engine *e) {
 /* Dump... */
 #if defined(WITH_MPI)
 #if defined(HAVE_PARALLEL_HDF5)
-  write_output_parallel(e, e->snapshotBaseName, e->snapshotUnits, e->nodeID,
-                        e->nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
+  write_output_parallel(e, e->snapshotBaseName, e->internalUnits,
+                        e->snapshotUnits, e->nodeID, e->nr_nodes,
+                        MPI_COMM_WORLD, MPI_INFO_NULL);
 #else
-  write_output_serial(e, e->snapshotBaseName, e->snapshotUnits, e->nodeID,
-                      e->nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
+  write_output_serial(e, e->snapshotBaseName, e->internalUnits,
+                      e->snapshotUnits, e->nodeID, e->nr_nodes, MPI_COMM_WORLD,
+                      MPI_INFO_NULL);
 #endif
 #else
-  write_output_single(e, e->snapshotBaseName, e->snapshotUnits);
+  write_output_single(e, e->snapshotBaseName, e->internalUnits,
+                      e->snapshotUnits);
 #endif
 
   clocks_gettime(&time2);
@@ -2759,6 +2762,7 @@ void engine_unpin() {
  * @param with_aff use processor affinity, if supported.
  * @param policy The queuing policy to use.
  * @param verbose Is this #engine talkative ?
+ * @param internal_units The system of units used internally.
  * @param physical_constants The #phys_const used for this run.
  * @param hydro The #hydro_props used for this run.
  * @param potential The properties of the external potential.
@@ -2766,6 +2770,7 @@ void engine_unpin() {
 void engine_init(struct engine *e, struct space *s,
                  const struct swift_params *params, int nr_nodes, int nodeID,
                  int nr_threads, int with_aff, int policy, int verbose,
+                 const struct UnitSystem *internal_units,
                  const struct phys_const *physical_constants,
                  const struct hydro_props *hydro,
                  const struct external_potential *potential) {
@@ -2795,6 +2800,7 @@ void engine_init(struct engine *e, struct space *s,
   e->timeStep = 0.;
   e->timeBase = 0.;
   e->timeBase_inv = 0.;
+  e->internalUnits = internal_units;
   e->timeFirstSnapshot =
       parser_get_param_double(params, "Snapshots:time_first");
   e->deltaTimeSnapshot =
diff --git a/src/engine.h b/src/engine.h
index 8aa95b689e7cd8774cf8ddd57d46a4e838b9095a..9ec42cd3654d012fc42adda021cab7bb5a81dd58 100644
--- a/src/engine.h
+++ b/src/engine.h
@@ -139,6 +139,9 @@ struct engine {
   /* Number of particles updated */
   size_t updates, g_updates;
 
+  /* The internal system of units */
+  const struct UnitSystem *internalUnits;
+
   /* Snapshot information */
   double timeFirstSnapshot;
   double deltaTimeSnapshot;
@@ -212,6 +215,7 @@ void engine_dump_snapshot(struct engine *e);
 void engine_init(struct engine *e, struct space *s,
                  const struct swift_params *params, int nr_nodes, int nodeID,
                  int nr_threads, int with_aff, int policy, int verbose,
+                 const struct UnitSystem *internal_units,
                  const struct phys_const *physical_constants,
                  const struct hydro_props *hydro,
                  const struct external_potential *potential);
diff --git a/src/gravity/Default/gravity_io.h b/src/gravity/Default/gravity_io.h
index 74f364dd97361f0513755bedec83fe7cb277c36b..e0fd15a91b6e20048f6844b03ea1dde40114d7ec 100644
--- a/src/gravity/Default/gravity_io.h
+++ b/src/gravity/Default/gravity_io.h
@@ -17,64 +17,54 @@
  *
  ******************************************************************************/
 
+#include "io_properties.h"
+
 /**
- * @brief Read dark matter particles from HDF5.
- *
- * @param h_grp The HDF5 group in which to read the arrays.
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param gparts The particle array
+ * @brief Specifies which g-particle fields to read from a dataset
  *
+ * @param gparts The g-particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
  */
-__attribute__((always_inline)) INLINE static void darkmatter_read_particles(
-    hid_t h_grp, int N, long long N_total, long long offset,
-    struct gpart* gparts) {
+void darkmatter_read_particles(struct gpart* gparts, struct io_props* list,
+                               int* num_fields) {
+
+  /* Say how much we want to read */
+  *num_fields = 4;
 
-  /* Read arrays */
-  readArray(h_grp, "Coordinates", DOUBLE, N, 3, gparts, N_total, offset, x,
-            COMPULSORY);
-  readArray(h_grp, "Masses", FLOAT, N, 1, gparts, N_total, offset, mass,
-            COMPULSORY);
-  readArray(h_grp, "Velocities", FLOAT, N, 3, gparts, N_total, offset, v_full,
-            COMPULSORY);
-  readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, gparts, N_total, offset,
-            id_or_neg_offset, COMPULSORY);
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, gparts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, gparts, v_full);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                gparts, mass);
+  list[3] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, gparts, id_or_neg_offset);
 }
 
 /**
- * @brief Writes the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to write the arrays.
- * @param fileName The name of the file (unsued in MPI mode).
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param xmfFile The XMF file to write to (unused in MPI mode).
- * @param Ndm The number of DM particles on that MPI rank.
- * @param Ndm_total The total number of g-particles (only used in MPI mode)
- * @param mpi_rank The MPI rank of this node (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param gparts The #gpart array
- * @param us The unit system to use
+ * @brief Specifies which g-particle fields to write to a dataset
  *
+ * @param gparts The g-particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
  */
-__attribute__((always_inline)) INLINE static void darkmatter_write_particles(
-    hid_t h_grp, char* fileName, char* partTypeGroupName, FILE* xmfFile,
-    int Ndm, long long Ndm_total, int mpi_rank, long long offset,
-    struct gpart* gparts, struct UnitSystem* us) {
+void darkmatter_write_particles(struct gpart* gparts, struct io_props* list,
+                                int* num_fields) {
+
+  /* Say how much we want to read */
+  *num_fields = 5;
 
-  /* Write arrays */
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", DOUBLE,
-             Ndm, 3, gparts, Ndm_total, mpi_rank, offset, x, us,
-             UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, Ndm,
-             1, gparts, Ndm_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", FLOAT,
-             Ndm, 3, gparts, Ndm_total, mpi_rank, offset, v_full, us,
-             UNIT_CONV_SPEED);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs",
-             ULONGLONG, Ndm, 1, gparts, Ndm_total, mpi_rank, offset,
-             id_or_neg_offset, us, UNIT_CONV_NO_UNITS);
+  /* List what we want to read */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 gparts, x);
+  list[1] = io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED,
+                                 gparts, v_full);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, gparts, mass);
+  list[3] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, gparts, id_or_neg_offset);
+  list[4] = io_make_output_field("Acceleration", FLOAT, 3,
+                                 UNIT_CONV_ACCELERATION, gparts, a_grav);
 }
diff --git a/src/hydro/Default/hydro_io.h b/src/hydro/Default/hydro_io.h
index 31990de2e053d4c4293288f3eeede84276016df3..a122fc24961644073470f9638bad967a52ef4926 100644
--- a/src/hydro/Default/hydro_io.h
+++ b/src/hydro/Default/hydro_io.h
@@ -17,83 +17,69 @@
  *
  ******************************************************************************/
 
+#include "io_properties.h"
+#include "kernel_hydro.h"
+
 /**
- * @brief Reads the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to read the arrays.
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
+ * @brief Specifies which particle fields to read from a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
  */
-__attribute__((always_inline)) INLINE static void hydro_read_particles(
-    hid_t h_grp, int N, long long N_total, long long offset,
-    struct part* parts) {
+void hydro_read_particles(struct part* parts, struct io_props* list,
+                          int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Read arrays */
-  readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total, offset, x,
-            COMPULSORY);
-  readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total, offset, v,
-            COMPULSORY);
-  readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total, offset, mass,
-            COMPULSORY);
-  readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total, offset, h,
-            COMPULSORY);
-  readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total, offset, u,
-            COMPULSORY);
-  readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total, offset, id,
-            COMPULSORY);
-  readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total, offset, a_hydro,
-            OPTIONAL);
-  readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total, offset, rho,
-            OPTIONAL);
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, parts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                parts, mass);
+  list[3] = io_make_input_field("SmoothingLength", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, h);
+  list[4] = io_make_input_field("InternalEnergy", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_ENERGY, parts, u);
+  list[5] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_input_field("Accelerations", FLOAT, 3, OPTIONAL,
+                                UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
- * @brief Writes the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to write the arrays.
- * @param fileName The name of the file (unsued in MPI mode).
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param xmfFile The XMF file to write to (unused in MPI mode).
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param mpi_rank The MPI rank of this node (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
- * @param us The unit system to use
+ * @brief Specifies which particle fields to write to a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
  */
-__attribute__((always_inline)) INLINE static void hydro_write_particles(
-    hid_t h_grp, char* fileName, char* partTypeGroupName, FILE* xmfFile, int N,
-    long long N_total, int mpi_rank, long long offset, struct part* parts,
-    struct UnitSystem* us) {
+void hydro_write_particles(struct part* parts, struct io_props* list,
+                           int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Write arrays */
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", DOUBLE,
-             N, 3, parts, N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, N, 1,
-             parts, N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "SmoothingLength",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, h, us,
-             UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "InternalEnergy",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, u, us,
-             UNIT_CONV_ENERGY_PER_UNIT_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs",
-             ULONGLONG, N, 1, parts, N_total, mpi_rank, offset, id, us,
-             UNIT_CONV_NO_UNITS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Acceleration", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, a_hydro, us,
-             UNIT_CONV_ACCELERATION);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Density", FLOAT, N,
-             1, parts, N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
+  /* List what we want to write */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 parts, x);
+  list[1] =
+      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, v);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
+  list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 parts, h);
+  list[4] = io_make_output_field("Entropy", FLOAT, 1,
+                                 UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_output_field("Acceleration", FLOAT, 3,
+                                 UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] =
+      io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h
index 9cbdef6dd14b487e991bd84ed6545ffe4282155f..c8db0b3b64cb03e4e013c6dd8daeb16a138b6709 100644
--- a/src/hydro/Gadget2/hydro_io.h
+++ b/src/hydro/Gadget2/hydro_io.h
@@ -17,83 +17,69 @@
  *
  ******************************************************************************/
 
+#include "io_properties.h"
+#include "kernel_hydro.h"
+
 /**
- * @brief Reads the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to read the arrays.
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
+ * @brief Specifies which particle fields to read from a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
  */
-__attribute__((always_inline)) INLINE static void hydro_read_particles(
-    hid_t h_grp, int N, long long N_total, long long offset,
-    struct part* parts) {
+void hydro_read_particles(struct part* parts, struct io_props* list,
+                          int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Read arrays */
-  readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total, offset, x,
-            COMPULSORY);
-  readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total, offset, v,
-            COMPULSORY);
-  readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total, offset, mass,
-            COMPULSORY);
-  readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total, offset, h,
-            COMPULSORY);
-  readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total, offset,
-            entropy, COMPULSORY);
-  readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total, offset, id,
-            COMPULSORY);
-  readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total, offset, a_hydro,
-            OPTIONAL);
-  readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total, offset, rho,
-            OPTIONAL);
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, parts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                parts, mass);
+  list[3] = io_make_input_field("SmoothingLength", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, h);
+  list[4] = io_make_input_field("InternalEnergy", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_ENERGY, parts, entropy);
+  list[5] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_input_field("Accelerations", FLOAT, 3, OPTIONAL,
+                                UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
- * @brief Writes the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to write the arrays.
- * @param fileName The name of the file (unsued in MPI mode).
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param xmfFile The XMF file to write to (unused in MPI mode).
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param mpi_rank The MPI rank of this node (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
- * @param us The unit system to use
+ * @brief Specifies which particle fields to write to a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
  */
-__attribute__((always_inline)) INLINE static void hydro_write_particles(
-    hid_t h_grp, char* fileName, char* partTypeGroupName, FILE* xmfFile, int N,
-    long long N_total, int mpi_rank, long long offset, struct part* parts,
-    struct UnitSystem* us) {
+void hydro_write_particles(struct part* parts, struct io_props* list,
+                           int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Write arrays */
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", DOUBLE,
-             N, 3, parts, N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, N, 1,
-             parts, N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "SmoothingLength",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, h, us,
-             UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "InternalEnergy",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, entropy, us,
-             UNIT_CONV_ENTROPY_PER_UNIT_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs",
-             ULONGLONG, N, 1, parts, N_total, mpi_rank, offset, id, us,
-             UNIT_CONV_NO_UNITS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Acceleration", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, a_hydro, us,
-             UNIT_CONV_ACCELERATION);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Density", FLOAT, N,
-             1, parts, N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
+  /* List what we want to write */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 parts, x);
+  list[1] =
+      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, v);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
+  list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 parts, h);
+  list[4] = io_make_output_field(
+      "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, parts, entropy);
+  list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_output_field("Acceleration", FLOAT, 3,
+                                 UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] =
+      io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
diff --git a/src/hydro/Minimal/hydro_io.h b/src/hydro/Minimal/hydro_io.h
index 40b9a4b0e66bb5e540cb28228fb2fe01bd608b22..6b52481e9e77ffb7d3c35edd0b2da834ff035a87 100644
--- a/src/hydro/Minimal/hydro_io.h
+++ b/src/hydro/Minimal/hydro_io.h
@@ -17,83 +17,69 @@
  *
  ******************************************************************************/
 
+#include "io_properties.h"
+#include "kernel_hydro.h"
+
 /**
- * @brief Reads the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to read the arrays.
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
+ * @brief Specifies which particle fields to read from a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
  */
-__attribute__((always_inline)) INLINE static void hydro_read_particles(
-    hid_t h_grp, int N, long long N_total, long long offset,
-    struct part* parts) {
+void hydro_read_particles(struct part* parts, struct io_props* list,
+                          int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Read arrays */
-  readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total, offset, x,
-            COMPULSORY);
-  readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total, offset, v,
-            COMPULSORY);
-  readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total, offset, mass,
-            COMPULSORY);
-  readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total, offset, h,
-            COMPULSORY);
-  readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total, offset, u,
-            COMPULSORY);
-  readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total, offset, id,
-            COMPULSORY);
-  readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total, offset, a_hydro,
-            OPTIONAL);
-  readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total, offset, rho,
-            OPTIONAL);
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, parts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                parts, mass);
+  list[3] = io_make_input_field("SmoothingLength", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, h);
+  list[4] = io_make_input_field("InternalEnergy", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_ENERGY, parts, u);
+  list[5] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_input_field("Accelerations", FLOAT, 3, OPTIONAL,
+                                UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
- * @brief Writes the different particles to the HDF5 file
- *
- * @param h_grp The HDF5 group in which to write the arrays.
- * @param fileName The name of the file (unsued in MPI mode).
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param xmfFile The XMF file to write to (unused in MPI mode).
- * @param N The number of particles on that MPI rank.
- * @param N_total The total number of particles (only used in MPI mode)
- * @param mpi_rank The MPI rank of this node (only used in MPI mode)
- * @param offset The offset of the particles for this MPI rank (only used in MPI
- *mode)
- * @param parts The particle array
- * @param us The unit system to use
+ * @brief Specifies which particle fields to write to a dataset
  *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
  */
-__attribute__((always_inline)) INLINE static void hydro_write_particles(
-    hid_t h_grp, char* fileName, char* partTypeGroupName, FILE* xmfFile, int N,
-    long long N_total, int mpi_rank, long long offset, struct part* parts,
-    struct UnitSystem* us) {
+void hydro_write_particles(struct part* parts, struct io_props* list,
+                           int* num_fields) {
+
+  *num_fields = 8;
 
-  /* Write arrays */
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", DOUBLE,
-             N, 3, parts, N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, N, 1,
-             parts, N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "SmoothingLength",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, h, us,
-             UNIT_CONV_LENGTH);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "InternalEnergy",
-             FLOAT, N, 1, parts, N_total, mpi_rank, offset, u, us,
-             UNIT_CONV_ENERGY_PER_UNIT_MASS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs",
-             ULONGLONG, N, 1, parts, N_total, mpi_rank, offset, id, us,
-             UNIT_CONV_NO_UNITS);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Acceleration", FLOAT,
-             N, 3, parts, N_total, mpi_rank, offset, a_hydro, us,
-             UNIT_CONV_ACCELERATION);
-  writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Density", FLOAT, N,
-             1, parts, N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
+  /* List what we want to write */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 parts, x);
+  list[1] =
+      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, v);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
+  list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 parts, h);
+  list[4] = io_make_output_field("Entropy", FLOAT, 1,
+                                 UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_output_field("Acceleration", FLOAT, 3,
+                                 UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] =
+      io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
 }
 
 /**
diff --git a/src/io_properties.h b/src/io_properties.h
new file mode 100644
index 0000000000000000000000000000000000000000..5e19420d006ee8043ac418045b6889d885fbd66a
--- /dev/null
+++ b/src/io_properties.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016  Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_IO_PROPERTIES_H
+#define SWIFT_IO_PROPERTIES_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/**
+ * @brief The two sorts of data present in the GADGET IC files: compulsory to
+ * start a run or optional.
+ */
+enum DATA_IMPORTANCE { COMPULSORY = 1, OPTIONAL = 0 };
+
+/**
+ * @brief The properties of a given dataset for i/o
+ */
+struct io_props {
+
+  /* Name */
+  char name[FIELD_BUFFER_SIZE];
+
+  /* Type of the field */
+  enum DATA_TYPE type;
+
+  /* Dimension (1D, 3D, ...) */
+  int dimension;
+
+  /* Is it compulsory ? (input only) */
+  enum DATA_IMPORTANCE importance;
+
+  /* Units of the quantity */
+  enum UnitConversionFactor units;
+
+  /* Pointer to the field of the first particle in the array */
+  char* field;
+
+  /* The size of the particles */
+  size_t partSize;
+};
+
+/**
+ * @brief Constructs an #io_props from its parameters
+ */
+#define io_make_input_field(name, type, dim, importance, units, part, field) \
+  io_make_input_field_(name, type, dim, importance, units,                   \
+                       (char*)(&(part[0]).field), sizeof(part[0]))
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (!D, 3D, ...)
+ * @param importance Is this dataset compulsory ?
+ * @param units The units of the dataset
+ * @param field Pointer to the field of the first particle
+ * @param partSize The size in byte of the particle
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+struct io_props io_make_input_field_(char name[FIELD_BUFFER_SIZE],
+                                     enum DATA_TYPE type, int dimension,
+                                     enum DATA_IMPORTANCE importance,
+                                     enum UnitConversionFactor units,
+                                     char* field, size_t partSize) {
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = importance;
+  r.units = units;
+  r.field = field;
+  r.partSize = partSize;
+
+  return r;
+}
+
+/**
+ * @brief Constructs an #io_props from its parameters
+ */
+#define io_make_output_field(name, type, dim, units, part, field)          \
+  io_make_output_field_(name, type, dim, units, (char*)(&(part[0]).field), \
+                        sizeof(part[0]))
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (!D, 3D, ...)
+ * @param units The units of the dataset
+ * @param field Pointer to the field of the first particle
+ * @param partSize The size in byte of the particle
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+struct io_props io_make_output_field_(char name[FIELD_BUFFER_SIZE],
+                                      enum DATA_TYPE type, int dimension,
+                                      enum UnitConversionFactor units,
+                                      char* field, size_t partSize) {
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = 0;
+  r.units = units;
+  r.field = field;
+  r.partSize = partSize;
+
+  return r;
+}
+
+#endif /* SWIFT_IO_PROPERTIES_H */
diff --git a/src/parallel_io.c b/src/parallel_io.c
index cd752822e8038448fc4e6664c4a42b9b71bc7033..47f2e893f9046dc9f517c1d362b57217a520a318 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -40,6 +40,10 @@
 #include "engine.h"
 #include "error.h"
 #include "hydro_properties.h"
+#include "gravity_io.h"
+#include "hydro_io.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
 #include "part.h"
 #include "units.h"
 
@@ -62,55 +66,54 @@
  *
  * Calls #error() if an error occurs.
  */
-void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
-                      int dim, long long N_total, long long offset,
-                      char* part_c, enum DATA_IMPORTANCE importance) {
-  hid_t h_data = 0, h_err = 0, h_type = 0, h_memspace = 0, h_filespace = 0,
-        h_plist_id = 0;
-  hsize_t shape[2], offsets[2];
-  htri_t exist = 0;
-  void* temp;
-  int i = 0, rank = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  const size_t partSize = sizeof(struct part);
-  char* temp_c = 0;
+void readArray(hid_t grp, const struct io_props prop, size_t N,
+               long long N_total, long long offset,
+               const struct UnitSystem* internal_units,
+               const struct UnitSystem* ic_units) {
+
+  const size_t typeSize = sizeOfType(prop.type);
+  const size_t copySize = typeSize * prop.dimension;
+  const size_t num_elements = N * prop.dimension;
 
   /* Check whether the dataspace exists or not */
-  exist = H5Lexists(grp, name, 0);
+  const htri_t exist = H5Lexists(grp, prop.name, 0);
   if (exist < 0) {
-    error("Error while checking the existence of data set '%s'.", name);
+    error("Error while checking the existence of data set '%s'.", prop.name);
   } else if (exist == 0) {
-    if (importance == COMPULSORY) {
-      error("Compulsory data set '%s' not present in the file.", name);
+    if (prop.importance == COMPULSORY) {
+      error("Compulsory data set '%s' not present in the file.", prop.name);
     } else {
-      for (i = 0; i < N; ++i) memset(part_c + i * partSize, 0, copySize);
+      for (size_t i = 0; i < N; ++i)
+        memset(prop.field + i * prop.partSize, 0, copySize);
       return;
     }
   }
 
-  /* message( "Reading %s '%s' array...", importance == COMPULSORY ?
-   * "compulsory": "optional  ", name); */
+  /* message("Reading %s '%s' array...", */
+  /*         prop.importance == COMPULSORY ? "compulsory" : "optional  ", */
+  /*         prop.name); */
 
   /* Open data space in file */
-  h_data = H5Dopen2(grp, name, H5P_DEFAULT);
-  if (h_data < 0) error("Error while opening data space '%s'.", name);
+  const hid_t h_data = H5Dopen2(grp, prop.name, H5P_DEFAULT);
+  if (h_data < 0) error("Error while opening data space '%s'.", prop.name);
 
   /* Check data type */
-  h_type = H5Dget_type(h_data);
+  const hid_t h_type = H5Dget_type(h_data);
   if (h_type < 0) error("Unable to retrieve data type from the file");
   /* if (!H5Tequal(h_type, hdf5Type(type))) */
   /*   error("Non-matching types between the code and the file"); */
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * typeSize);
+  void* temp = malloc(num_elements * typeSize);
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Prepare information for hyper-slab */
-  if (dim > 1) {
+  hsize_t shape[2], offsets[2];
+  int rank;
+  if (prop.dimension > 1) {
     rank = 2;
     shape[0] = N;
-    shape[1] = dim;
+    shape[1] = prop.dimension;
     offsets[0] = offset;
     offsets[1] = 0;
   } else {
@@ -122,29 +125,45 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
   }
 
   /* Create data space in memory */
-  h_memspace = H5Screate_simple(rank, shape, NULL);
+  const hid_t h_memspace = H5Screate_simple(rank, shape, NULL);
 
   /* Select hyper-slab in file */
-  h_filespace = H5Dget_space(h_data);
+  const hid_t h_filespace = H5Dget_space(h_data);
   H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);
 
   /* Set collective reading properties */
-  h_plist_id = H5Pcreate(H5P_DATASET_XFER);
+  const hid_t h_plist_id = H5Pcreate(H5P_DATASET_XFER);
   H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);
 
   /* Read HDF5 dataspace in temporary buffer */
   /* Dirty version that happens to work for vectors but should be improved */
   /* Using HDF5 dataspaces would be better */
-  h_err = H5Dread(h_data, hdf5Type(type), h_memspace, h_filespace, h_plist_id,
-                  temp);
+  const hid_t h_err = H5Dread(h_data, hdf5Type(prop.type), h_memspace,
+                              h_filespace, h_plist_id, temp);
   if (h_err < 0) {
-    error("Error while reading data array '%s'.", name);
+    error("Error while reading data array '%s'.", prop.name);
+  }
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(ic_units, internal_units, prop.units);
+  if (factor != 1. && exist != 0) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(prop.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
   }
 
   /* Copy temporary buffer to particle data */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(part_c + i * partSize, &temp_c[i * copySize], copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(prop.field + i * prop.partSize, &temp_c[i * copySize], copySize);
 
   /* Free and close everything */
   free(temp);
@@ -165,66 +184,75 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
  * @param grp The group in which to write.
  * @param fileName The name of the file in which the data is written
  * @param xmfFile The FILE used to write the XMF description
- * @param name The name of the array to write.
- * @param type The #DATA_TYPE of the array.
  * @param N The number of particles to write.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
  * @param N_total Total number of particles across all cores
  * @param offset Offset in the array where this mpi task starts writing
- * @param part_c A (char*) pointer on the first occurrence of the field of
- *interest in the parts array
- * @param us The UnitSystem currently in use
- * @param convFactor The UnitConversionFactor for this array
+ * @param internal_units The #UnitSystem used internally
+ * @param snapshot_units The #UnitSystem used in the snapshots
  *
  * @todo A better version using HDF5 hyper-slabs to write the file directly from
- *the part array
- * will be written once the structures have been stabilized.
+ * the part array will be written once the structures have been stabilized.
  *
- * Calls #error() if an error occurs.
  */
-void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
-                       char* partTypeGroupName, char* name, enum DATA_TYPE type,
-                       int N, int dim, long long N_total, int mpi_rank,
-                       long long offset, char* part_c, size_t partSize,
-                       struct UnitSystem* us,
-                       enum UnitConversionFactor convFactor) {
-  hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0, h_plist_id = 0;
-  hsize_t shape[2], shape_total[2], offsets[2];
-  void* temp = 0;
-  int i = 0, rank = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  char* temp_c = 0;
-  char buffer[150];
-
-  /* message("Writing '%s' array...", name); */
+void writeArray(hid_t grp, char* fileName, FILE* xmfFile,
+                char* partTypeGroupName, const struct io_props props, size_t N,
+                long long N_total, int mpi_rank, long long offset,
+                const struct UnitSystem* internal_units,
+                const struct UnitSystem* snapshot_units) {
+
+  const size_t typeSize = sizeOfType(props.type);
+  const size_t copySize = typeSize * props.dimension;
+  const size_t num_elements = N * props.dimension;
+
+  /* message("Writing '%s' array...", props.name); */
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * sizeOfType(type));
+  void* temp = malloc(num_elements * sizeOfType(props.type));
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Copy particle data to temporary buffer */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(&temp_c[i * copySize], part_c + i * partSize, copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(&temp_c[i * copySize], props.field + i * props.partSize, copySize);
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(internal_units, snapshot_units, props.units);
+  if (factor != 1.) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(props.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
+  }
 
   /* Create data space */
-  h_memspace = H5Screate(H5S_SIMPLE);
+  const hid_t h_memspace = H5Screate(H5S_SIMPLE);
   if (h_memspace < 0) {
-    error("Error while creating data space (memory) for field '%s'.", name);
+    error("Error while creating data space (memory) for field '%s'.",
+          props.name);
   }
 
-  h_filespace = H5Screate(H5S_SIMPLE);
+  hid_t h_filespace = H5Screate(H5S_SIMPLE);
   if (h_filespace < 0) {
-    error("Error while creating data space (file) for field '%s'.", name);
+    error("Error while creating data space (file) for field '%s'.", props.name);
   }
 
-  if (dim > 1) {
+  int rank;
+  hsize_t shape[2];
+  hsize_t shape_total[2];
+  hsize_t offsets[2];
+  if (props.dimension > 1) {
     rank = 2;
     shape[0] = N;
-    shape[1] = dim;
+    shape[1] = props.dimension;
     shape_total[0] = N_total;
-    shape_total[1] = dim;
+    shape_total[1] = props.dimension;
     offsets[0] = offset;
     offsets[1] = 0;
   } else {
@@ -238,23 +266,25 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   }
 
   /* Change shape of memory data space */
-  h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
+  hid_t h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
   if (h_err < 0) {
     error("Error while changing data space (memory) shape for field '%s'.",
-          name);
+          props.name);
   }
 
   /* Change shape of file data space */
   h_err = H5Sset_extent_simple(h_filespace, rank, shape_total, NULL);
   if (h_err < 0) {
-    error("Error while changing data space (file) shape for field '%s'.", name);
+    error("Error while changing data space (file) shape for field '%s'.",
+          props.name);
   }
 
   /* Create dataset */
-  h_data = H5Dcreate(grp, name, hdf5Type(type), h_filespace, H5P_DEFAULT,
-                     H5P_DEFAULT, H5P_DEFAULT);
+  const hid_t h_data =
+      H5Dcreate(grp, props.name, hdf5Type(props.type), h_filespace, H5P_DEFAULT,
+                H5P_DEFAULT, H5P_DEFAULT);
   if (h_data < 0) {
-    error("Error while creating dataset '%s'.", name);
+    error("Error while creating dataset '%s'.", props.name);
   }
 
   H5Sclose(h_filespace);
@@ -262,27 +292,30 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);
 
   /* Create property list for collective dataset write.    */
-  h_plist_id = H5Pcreate(H5P_DATASET_XFER);
+  const hid_t h_plist_id = H5Pcreate(H5P_DATASET_XFER);
   H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);
 
   /* Write temporary buffer to HDF5 dataspace */
-  h_err = H5Dwrite(h_data, hdf5Type(type), h_memspace, h_filespace, h_plist_id,
-                   temp);
+  h_err = H5Dwrite(h_data, hdf5Type(props.type), h_memspace, h_filespace,
+                   h_plist_id, temp);
   if (h_err < 0) {
-    error("Error while writing data array '%s'.", name);
+    error("Error while writing data array '%s'.", props.name);
   }
 
   /* Write XMF description for this data set */
   if (mpi_rank == 0)
-    writeXMFline(xmfFile, fileName, partTypeGroupName, name, N_total, dim,
-                 type);
+    writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N_total,
+                 props.dimension, props.type);
 
   /* Write unit conversion factors for this data set */
-  units_conversion_string(buffer, us, convFactor);
+  char buffer[FIELD_BUFFER_SIZE];
+  units_cgs_conversion_string(buffer, snapshot_units, props.units);
   writeAttribute_d(h_data, "CGS conversion factor",
-                   units_conversion_factor(us, convFactor));
-  writeAttribute_f(h_data, "h-scale exponent", units_h_factor(us, convFactor));
-  writeAttribute_f(h_data, "a-scale exponent", units_a_factor(us, convFactor));
+                   units_cgs_conversion_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "h-scale exponent",
+                   units_h_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "a-scale exponent",
+                   units_a_factor(snapshot_units, props.units));
   writeAttribute_s(h_data, "Conversion factor", buffer);
 
   /* Free and close everything */
@@ -293,66 +326,21 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   H5Sclose(h_filespace);
 }
 
-/**
- * @brief A helper macro to call the readArrayBackEnd function more easily.
- *
- * @param grp The group from which to read.
- * @param name The name of the array to read.
- * @param type The #DATA_TYPE of the attribute.
- * @param N The number of particles on this MPI task.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part The array of particles to fill
- * @param N_total Total number of particles
- * @param offset Offset in the array where this task starts reading
- * @param field The name of the field (C code name as defined in part.h) to fill
- * @param importance Is the data compulsory or not
- *
- */
-#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
-                  importance)                                            \
-  readArrayBackEnd(grp, name, type, N, dim, N_total, offset,             \
-                   (char*)(&(part[0]).field), importance)
-
-/**
- * @brief A helper macro to call the writeArrayBackEnd function more easily.
- *
- * @param grp The group in which to write.
- * @param fileName The name of the file in which the data is written
- * @param xmfFile The FILE used to write the XMF description
- * @param name The name of the array to write.
- * @param type The #DATA_TYPE of the array.
- * @param N The number of particles to write from this core.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param N_total Total number of particles across all cores
- * @param mpi_rank The MPI task rank calling the function
- * @param offset Offset in the array where this mpi task starts writing
- * @param part A (char*) pointer on the first occurrence of the field of
- *interest
- *in the parts array
- * @param field The name (code name) of the field to read from.
- * @param us The UnitSystem currently in use
- * @param convFactor The UnitConversionFactor for this array
- *
- */
-#define writeArray(grp, fileName, xmfFile, pTypeGroupName, name, type, N, dim, \
-                   part, N_total, mpi_rank, offset, field, us, convFactor)     \
-  writeArrayBackEnd(grp, fileName, xmfFile, pTypeGroupName, name, type, N,     \
-                    dim, N_total, mpi_rank, offset, (char*)(&(part[0]).field), \
-                    sizeof(part[0]), us, convFactor)
-
-/* Import the right hydro definition */
-#include "hydro_io.h"
-/* Import the right gravity definition */
-#include "gravity_io.h"
-
 /**
  * @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel
  *
  * @param fileName The file to read.
+ * @param internal_units The system units used internally
  * @param dim (output) The dimension of the volume read from the file.
  * @param parts (output) The array of #part read from the file.
  * @param N (output) The number of particles read from the file.
  * @param periodic (output) 1 if the volume is periodic, 0 if not.
+ * @param flag_entropy (output) 1 if the ICs contained Entropy in the
+ * InternalEnergy field
+ * @param mpi_rank The MPI rank of this node
+ * @param mpi_size The number of MPI ranks
+ * @param comm The MPI communicator
+ * @param info The MPI information object
  * @param dry_run If 1, don't read the particle. Only allocates the arrays.
  *
  * Opens the HDF5 file fileName and reads the particles contained
@@ -365,10 +353,11 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
  * Calls #error() if an error occurs.
  *
  */
-void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
-                      struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
-                      int* periodic, int* flag_entropy, int mpi_rank,
-                      int mpi_size, MPI_Comm comm, MPI_Info info, int dry_run) {
+void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units,
+                      double dim[3], struct part** parts, struct gpart** gparts,
+                      size_t* Ngas, size_t* Ngparts, int* periodic,
+                      int* flag_entropy, int mpi_rank, int mpi_size,
+                      MPI_Comm comm, MPI_Info info, int dry_run) {
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
   double boxSize[3] = {0.0, -1.0, -1.0};
@@ -431,6 +420,42 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
   /* Close header */
   H5Gclose(h_grp);
 
+  /* Read the unit system used in the ICs */
+  struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem));
+  if (ic_units == NULL) error("Unable to allocate memory for IC unit system");
+  readUnitSystem(h_file, ic_units);
+
+  /* Tell the user if a conversion will be needed */
+  if (mpi_rank == 0) {
+    if (units_are_equal(ic_units, internal_units)) {
+
+      message("IC and internal units match. No conversion needed.");
+
+    } else {
+
+      message("Conversion needed from:");
+      message("(ICs) Unit system: U_M =      %e g.", ic_units->UnitMass_in_cgs);
+      message("(ICs) Unit system: U_L =      %e cm.",
+              ic_units->UnitLength_in_cgs);
+      message("(ICs) Unit system: U_t =      %e s.", ic_units->UnitTime_in_cgs);
+      message("(ICs) Unit system: U_I =      %e A.",
+              ic_units->UnitCurrent_in_cgs);
+      message("(ICs) Unit system: U_T =      %e K.",
+              ic_units->UnitTemperature_in_cgs);
+      message("to:");
+      message("(internal) Unit system: U_M = %e g.",
+              internal_units->UnitMass_in_cgs);
+      message("(internal) Unit system: U_L = %e cm.",
+              internal_units->UnitLength_in_cgs);
+      message("(internal) Unit system: U_t = %e s.",
+              internal_units->UnitTime_in_cgs);
+      message("(internal) Unit system: U_I = %e A.",
+              internal_units->UnitCurrent_in_cgs);
+      message("(internal) Unit system: U_T = %e K.",
+              internal_units->UnitTemperature_in_cgs);
+    }
+  }
+
   /* Allocate memory to store SPH particles */
   *Ngas = N[0];
   if (posix_memalign((void*)parts, part_align, (*Ngas) * sizeof(struct part)) !=
@@ -471,25 +496,42 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
       error("Error while opening particle group %s.", partTypeGroupName);
     }
 
+    int num_fields = 0;
+    struct io_props list[100];
+    size_t N = 0;
+
     /* Read particle fields into the particle structure */
     switch (ptype) {
 
       case GAS:
-        if (!dry_run)
-          hydro_read_particles(h_grp, N[ptype], N_total[ptype], offset[ptype],
-                               *parts);
+        /* if (!dry_run) */
+        /*   hydro_read_particles(h_grp, N[ptype], N_total[ptype],
+         * offset[ptype], */
+        /*                        *parts); */
+        /* break; */
+        N = *Ngas;
+        hydro_read_particles(*parts, list, &num_fields);
         break;
 
       case DM:
-        if (!dry_run)
-          darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
-                                    offset[ptype], *gparts);
+        /* if (!dry_run) */
+        /*   darkmatter_read_particles(h_grp, N[ptype], N_total[ptype], */
+        /*                             offset[ptype], *gparts); */
+        /* break; */
+        N = Ndm;
+        darkmatter_read_particles(*gparts, list, &num_fields);
         break;
 
       default:
         message("Particle Type %d not yet supported. Particles ignored", ptype);
     }
 
+    /* Read everything */
+    if (!dry_run)
+      for (int i = 0; i < num_fields; ++i)
+        readArray(h_grp, list[i], N, N_total[ptype], offset[ptype],
+                  internal_units, ic_units);
+
     /* Close particle group */
     H5Gclose(h_grp);
   }
@@ -502,6 +544,9 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
 
   /* message("Done Reading particles..."); */
 
+  /* Clean up */
+  free(ic_units);
+
   /* Close property handler */
   H5Pclose(h_plist_id);
 
@@ -515,25 +560,27 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
  *
  * @param e The engine containing all the system.
  * @param baseName The common part of the snapshot file name.
- * @param us The UnitSystem used for the conversion of units in the output.
+ * @param internal_units The #UnitSystem used internally
+ * @param snapshot_units The #UnitSystem used in the snapshots
  * @param mpi_rank The MPI rank of this node.
  * @param mpi_size The number of MPI ranks.
  * @param comm The MPI communicator.
  * @param info The MPI information object
  *
  * Creates an HDF5 output file and writes the particles
- *contained
- * in the engine. If such a file already exists, it is
- *erased and replaced
- * by the new one.
+ * contained in the engine. If such a file already exists, it is
+ * erased and replaced by the new one.
  * The companion XMF file is also updated accordingly.
  *
  * Calls #error() if an error occurs.
  *
  */
 void write_output_parallel(struct engine* e, const char* baseName,
-                           struct UnitSystem* us, int mpi_rank, int mpi_size,
-                           MPI_Comm comm, MPI_Info info) {
+                           const struct UnitSystem* internal_units,
+                           const struct UnitSystem* snapshot_units,
+                           int mpi_rank, int mpi_size, MPI_Comm comm,
+                           MPI_Info info) {
+
   hid_t h_file = 0, h_grp = 0;
   const size_t Ngas = e->s->nr_parts;
   const size_t Ntot = e->s->nr_gparts;
@@ -653,8 +700,11 @@ void write_output_parallel(struct engine* e, const char* baseName,
   parser_write_params_to_hdf5(e->parameter_file, h_grp);
   H5Gclose(h_grp);
 
-  /* Print the system of Units */
-  writeUnitSystem(h_file, us);
+  /* Print the system of Units used in the spashot */
+  writeUnitSystem(h_file, snapshot_units, "Units");
+
+  /* Print the system of Units used internally */
+  writeUnitSystem(h_file, internal_units, "InternalCodeUnits");
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) {
@@ -677,14 +727,16 @@ void write_output_parallel(struct engine* e, const char* baseName,
       error("Error while opening particle group %s.", partTypeGroupName);
     }
 
-    /* Read particle fields into the particle structure */
+    int num_fields = 0;
+    struct io_props list[100];
+    size_t N = 0;
+
+    /* Write particle fields from the particle structure */
     switch (ptype) {
 
       case GAS:
-        hydro_write_particles(h_grp, fileName, partTypeGroupName, xmfFile,
-                              N[ptype], N_total[ptype], mpi_rank, offset[ptype],
-                              parts, us);
-
+        N = Ngas;
+        hydro_write_particles(parts, list, &num_fields);
         break;
 
       case DM:
@@ -700,9 +752,8 @@ void write_output_parallel(struct engine* e, const char* baseName,
         collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
 
         /* Write DM particles */
-        darkmatter_write_particles(h_grp, fileName, partTypeGroupName, xmfFile,
-                                   N[ptype], N_total[ptype], mpi_rank,
-                                   offset[ptype], dmparts, us);
+        N = Ndm;
+        darkmatter_write_particles(dmparts, list, &num_fields);
 
         /* Free temporary array */
         free(dmparts);
@@ -712,6 +763,15 @@ void write_output_parallel(struct engine* e, const char* baseName,
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
+    /* Write everything */
+    for (int i = 0; i < num_fields; ++i)
+      writeArray(h_grp, fileName, xmfFile, partTypeGroupName, list[i], N,
+                 N_total[ptype], mpi_rank, offset[ptype], internal_units,
+                 snapshot_units);
+
+    /* Free temporary array */
+    free(dmparts);
+
     /* Close particle group */
     H5Gclose(h_grp);
 
diff --git a/src/parallel_io.h b/src/parallel_io.h
index 709479fab029e862caddb36c4216b85077e96cec..e5b12aa50c30b4d63ccc81835d2d8454e01b3889 100644
--- a/src/parallel_io.h
+++ b/src/parallel_io.h
@@ -34,15 +34,17 @@
 
 #if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
 
-void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
-                      struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
-                      int* periodic, int* flag_entropy, int mpi_rank,
-                      int mpi_size, MPI_Comm comm, MPI_Info info, int dry_run);
+void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units,
+                      double dim[3], struct part** parts, struct gpart** gparts,
+                      size_t* Ngas, size_t* Ngparts, int* periodic,
+                      int* flag_entropy, int mpi_rank, int mpi_size,
+                      MPI_Comm comm, MPI_Info info, int dry_run);
 
 void write_output_parallel(struct engine* e, const char* baseName,
-                           struct UnitSystem* us, int mpi_rank, int mpi_size,
-                           MPI_Comm comm, MPI_Info info);
-
+                           const struct UnitSystem* internal_units,
+                           const struct UnitSystem* snapshot_units,
+                           int mpi_rank, int mpi_size, MPI_Comm comm,
+                           MPI_Info info);
 #endif
 
 #endif /* SWIFT_PARALLEL_IO_H */
diff --git a/src/physical_constants.c b/src/physical_constants.c
index d00d63df1c1e6a4821ce4ba50dfef9c4e0def9d2..b726678ec1c0f2c85e409da3898dc3b6b842750f 100644
--- a/src/physical_constants.c
+++ b/src/physical_constants.c
@@ -40,65 +40,68 @@ void phys_const_init(struct UnitSystem* us, struct phys_const* internal_const) {
 
   const float dimension_G[5] = {-1, 3, -2, 0, 0};
   internal_const->const_newton_G =
-      const_newton_G_cgs / units_general_conversion_factor(us, dimension_G);
+      const_newton_G_cgs / units_general_cgs_conversion_factor(us, dimension_G);
 
   const float dimension_c[5] = {0, 1, -1, 0, 0};
   internal_const->const_speed_light_c =
       const_speed_light_c_cgs /
-      units_general_conversion_factor(us, dimension_c);
+      units_general_cgs_conversion_factor(us, dimension_c);
 
   const float dimension_h[5] = {1, -2, -1, 0, 0};
   internal_const->const_planck_h =
-      const_planck_h_cgs / units_general_conversion_factor(us, dimension_h);
+      const_planck_h_cgs / units_general_cgs_conversion_factor(us, dimension_h);
   internal_const->const_planck_hbar =
-      const_planck_hbar_cgs / units_general_conversion_factor(us, dimension_h);
+      const_planck_hbar_cgs /
+      units_general_cgs_conversion_factor(us, dimension_h);
 
   const float dimension_k[5] = {1, 2, -2, 0, -1};
   internal_const->const_boltzmann_k =
-      const_boltzmann_k_cgs / units_general_conversion_factor(us, dimension_k);
+      const_boltzmann_k_cgs /
+      units_general_cgs_conversion_factor(us, dimension_k);
 
   const float dimension_thomson[5] = {0, 2, 0, 0, 0};
   internal_const->const_thomson_cross_section =
       const_thomson_cross_section_cgs /
-      units_general_conversion_factor(us, dimension_thomson);
+      units_general_cgs_conversion_factor(us, dimension_thomson);
 
   const float dimension_ev[5] = {1, 2, -2, 0, 0};
   internal_const->const_electron_volt =
       const_electron_volt_cgs /
-      units_general_conversion_factor(us, dimension_ev);
+      units_general_cgs_conversion_factor(us, dimension_ev);
 
   const float dimension_charge[5] = {0, 0, -1, 1, 0};
   internal_const->const_electron_charge =
       const_electron_charge_cgs /
-      units_general_conversion_factor(us, dimension_charge);
+      units_general_cgs_conversion_factor(us, dimension_charge);
 
   const float dimension_mass[5] = {1, 0, 0, 0, 0};
   internal_const->const_electron_mass =
       const_electron_mass_cgs /
-      units_general_conversion_factor(us, dimension_mass);
+      units_general_cgs_conversion_factor(us, dimension_mass);
   internal_const->const_proton_mass =
       const_proton_mass_cgs /
-      units_general_conversion_factor(us, dimension_mass);
+      units_general_cgs_conversion_factor(us, dimension_mass);
   internal_const->const_solar_mass =
       const_solar_mass_cgs /
-      units_general_conversion_factor(us, dimension_mass);
+      units_general_cgs_conversion_factor(us, dimension_mass);
   internal_const->const_earth_mass =
       const_earth_mass_cgs /
-      units_general_conversion_factor(us, dimension_mass);
+      units_general_cgs_conversion_factor(us, dimension_mass);
 
   const float dimension_time[5] = {0, 0, 1, 0, 0};
   internal_const->const_year =
-      const_year_cgs / units_general_conversion_factor(us, dimension_time);
+      const_year_cgs / units_general_cgs_conversion_factor(us, dimension_time);
 
   const float dimension_length[5] = {0, 1, 0, 0, 0};
   internal_const->const_astronomical_unit =
       const_astronomical_unit_cgs /
-      units_general_conversion_factor(us, dimension_length);
+      units_general_cgs_conversion_factor(us, dimension_length);
   internal_const->const_parsec =
-      const_parsec_cgs / units_general_conversion_factor(us, dimension_length);
+      const_parsec_cgs /
+      units_general_cgs_conversion_factor(us, dimension_length);
   internal_const->const_light_year =
       const_light_year_cgs /
-      units_general_conversion_factor(us, dimension_length);
+      units_general_cgs_conversion_factor(us, dimension_length);
 }
 
 void phys_const_print(struct phys_const* internal_const) {
diff --git a/src/serial_io.c b/src/serial_io.c
index ece269cc37ba009074a2745da1069099b355a686..7bb829174c04545913a7ea560af1b4993526a0c5 100644
--- a/src/serial_io.c
+++ b/src/serial_io.c
@@ -40,6 +40,10 @@
 #include "engine.h"
 #include "error.h"
 #include "hydro_properties.h"
+#include "gravity_io.h"
+#include "hydro_io.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
 #include "part.h"
 #include "units.h"
 
@@ -65,28 +69,25 @@
  *the part array
  * will be written once the structures have been stabilized.
  */
-void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
-                      int dim, long long N_total, long long offset,
-                      char* part_c, size_t partSize,
-                      enum DATA_IMPORTANCE importance) {
-  hid_t h_data = 0, h_err = 0, h_type = 0, h_memspace = 0, h_filespace = 0;
-  hsize_t shape[2], offsets[2];
-  htri_t exist = 0;
-  void* temp;
-  int i = 0, rank = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  char* temp_c = 0;
+void readArray(hid_t grp, const struct io_props props, size_t N,
+               long long N_total, long long offset,
+               const struct UnitSystem* internal_units,
+               const struct UnitSystem* ic_units) {
+
+  const size_t typeSize = sizeOfType(props.type);
+  const size_t copySize = typeSize * props.dimension;
+  const size_t num_elements = N * props.dimension;
 
   /* Check whether the dataspace exists or not */
-  exist = H5Lexists(grp, name, 0);
+  const htri_t exist = H5Lexists(grp, props.name, 0);
   if (exist < 0) {
-    error("Error while checking the existence of data set '%s'.", name);
+    error("Error while checking the existence of data set '%s'.", props.name);
   } else if (exist == 0) {
-    if (importance == COMPULSORY) {
-      error("Compulsory data set '%s' not present in the file.", name);
+    if (props.importance == COMPULSORY) {
+      error("Compulsory data set '%s' not present in the file.", props.name);
     } else {
-      for (i = 0; i < N; ++i) memset(part_c + i * partSize, 0, copySize);
+      for (size_t i = 0; i < N; ++i)
+        memset(props.field + i * props.partSize, 0, copySize);
       return;
     }
   }
@@ -96,24 +97,26 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
   /* fflush(stdout); */
 
   /* Open data space */
-  h_data = H5Dopen(grp, name, H5P_DEFAULT);
-  if (h_data < 0) error("Error while opening data space '%s'.", name);
+  const hid_t h_data = H5Dopen(grp, props.name, H5P_DEFAULT);
+  if (h_data < 0) error("Error while opening data space '%s'.", props.name);
 
   /* Check data type */
-  h_type = H5Dget_type(h_data);
+  const hid_t h_type = H5Dget_type(h_data);
   if (h_type < 0) error("Unable to retrieve data type from the file");
   /* if (!H5Tequal(h_type, hdf5Type(type))) */
   /*   error("Non-matching types between the code and the file"); */
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * typeSize);
+  void* temp = malloc(num_elements * typeSize);
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Prepare information for hyper-slab */
-  if (dim > 1) {
+  hsize_t shape[2], offsets[2];
+  int rank;
+  if (props.dimension > 1) {
     rank = 2;
     shape[0] = N;
-    shape[1] = dim;
+    shape[1] = props.dimension;
     offsets[0] = offset;
     offsets[1] = 0;
   } else {
@@ -125,39 +128,41 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
   }
 
   /* Create data space in memory */
-  h_memspace = H5Screate_simple(rank, shape, NULL);
+  const hid_t h_memspace = H5Screate_simple(rank, shape, NULL);
 
   /* Select hyper-slab in file */
-  h_filespace = H5Dget_space(h_data);
+  const hid_t h_filespace = H5Dget_space(h_data);
   H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);
 
-  /* int rank_memspace = H5Sget_simple_extent_ndims(h_memspace); */
-  /* int rank_filespace = H5Sget_simple_extent_ndims(h_filespace); */
-
-  /* message("Memspace rank: %d", rank_memspace); */
-  /* message("Filespace rank: %d", rank_filespace); */
-  /* fflush(stdout); */
-
-  /* hsize_t dims_memspace[2], max_dims_memspace[2]; */
-  /* hsize_t dims_filespace[2], max_dims_filespace[2]; */
-
-  /* H5Sget_simple_extent_dims(h_memspace, dims_memspace, max_dims_memspace); */
-  /* H5Sget_simple_extent_dims(h_filespace, dims_filespace, max_dims_filespace);
-   */
-
   /* Read HDF5 dataspace in temporary buffer */
   /* Dirty version that happens to work for vectors but should be improved */
   /* Using HDF5 dataspaces would be better */
-  h_err = H5Dread(h_data, hdf5Type(type), h_memspace, h_filespace, H5P_DEFAULT,
-                  temp);
+  const hid_t h_err = H5Dread(h_data, hdf5Type(props.type), h_memspace,
+                              h_filespace, H5P_DEFAULT, temp);
   if (h_err < 0) {
-    error("Error while reading data array '%s'.", name);
+    error("Error while reading data array '%s'.", props.name);
+  }
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(ic_units, internal_units, props.units);
+  if (factor != 1. && exist != 0) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(props.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
   }
 
   /* Copy temporary buffer to particle data */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(part_c + i * partSize, &temp_c[i * copySize], copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(props.field + i * props.partSize, &temp_c[i * copySize], copySize);
 
   /* Free and close everything */
   free(temp);
@@ -172,27 +177,25 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
  *-----------------------------------------------------------------------------*/
 
 void prepareArray(hid_t grp, char* fileName, FILE* xmfFile,
-                  char* partTypeGroupName, char* name, enum DATA_TYPE type,
-                  long long N_total, int dim, struct UnitSystem* us,
-                  enum UnitConversionFactor convFactor) {
-  hid_t h_data = 0, h_err = 0, h_space = 0, h_prop = 0;
-  int rank = 0;
-  hsize_t shape[2];
-  hsize_t chunk_shape[2];
-  char buffer[150];
+                  char* partTypeGroupName, const struct io_props props,
+                  long long N_total, const struct UnitSystem* internal_units,
+                  const struct UnitSystem* snapshot_units) {
 
   /* Create data space */
-  h_space = H5Screate(H5S_SIMPLE);
+  const hid_t h_space = H5Screate(H5S_SIMPLE);
   if (h_space < 0) {
-    error("Error while creating data space for field '%s'.", name);
+    error("Error while creating data space for field '%s'.", props.name);
   }
 
-  if (dim > 1) {
+  int rank = 0;
+  hsize_t shape[2];
+  hsize_t chunk_shape[2];
+  if (props.dimension > 1) {
     rank = 2;
     shape[0] = N_total;
-    shape[1] = dim;
+    shape[1] = props.dimension;
     chunk_shape[0] = 1 << 16; /* Just a guess...*/
-    chunk_shape[1] = dim;
+    chunk_shape[1] = props.dimension;
   } else {
     rank = 1;
     shape[0] = N_total;
@@ -205,45 +208,51 @@ void prepareArray(hid_t grp, char* fileName, FILE* xmfFile,
   if (chunk_shape[0] > N_total) chunk_shape[0] = N_total;
 
   /* Change shape of data space */
-  h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
+  hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
   if (h_err < 0) {
-    error("Error while changing data space shape for field '%s'.", name);
+    error("Error while changing data space shape for field '%s'.", props.name);
   }
 
   /* Dataset properties */
-  h_prop = H5Pcreate(H5P_DATASET_CREATE);
+  const hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
 
   /* Set chunk size */
   h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
   if (h_err < 0) {
     error("Error while setting chunk size (%lld, %lld) for field '%s'.",
-          chunk_shape[0], chunk_shape[1], name);
+          chunk_shape[0], chunk_shape[1], props.name);
   }
 
   /* Impose data compression */
   h_err = H5Pset_deflate(h_prop, 4);
   if (h_err < 0) {
-    error("Error while setting compression options for field '%s'.", name);
+    error("Error while setting compression options for field '%s'.",
+          props.name);
   }
 
   /* Create dataset */
-  h_data = H5Dcreate(grp, name, hdf5Type(type), h_space, H5P_DEFAULT, h_prop,
-                     H5P_DEFAULT);
+  const hid_t h_data = H5Dcreate(grp, props.name, hdf5Type(props.type), h_space,
+                                 H5P_DEFAULT, h_prop, H5P_DEFAULT);
   if (h_data < 0) {
-    error("Error while creating dataspace '%s'.", name);
+    error("Error while creating dataspace '%s'.", props.name);
   }
 
   /* Write XMF description for this data set */
-  writeXMFline(xmfFile, fileName, partTypeGroupName, name, N_total, dim, type);
+  writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N_total,
+               props.dimension, props.type);
 
   /* Write unit conversion factors for this data set */
-  units_conversion_string(buffer, us, convFactor);
+  char buffer[FIELD_BUFFER_SIZE];
+  units_cgs_conversion_string(buffer, snapshot_units, props.units);
   writeAttribute_d(h_data, "CGS conversion factor",
-                   units_conversion_factor(us, convFactor));
-  writeAttribute_f(h_data, "h-scale exponent", units_h_factor(us, convFactor));
-  writeAttribute_f(h_data, "a-scale exponent", units_a_factor(us, convFactor));
+                   units_cgs_conversion_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "h-scale exponent",
+                   units_h_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "a-scale exponent",
+                   units_a_factor(snapshot_units, props.units));
   writeAttribute_s(h_data, "Conversion factor", buffer);
 
+  /* Close everything */
   H5Pclose(h_prop);
   H5Dclose(h_data);
   H5Sclose(h_space);
@@ -267,42 +276,56 @@ void prepareArray(hid_t grp, char* fileName, FILE* xmfFile,
  * @param us The UnitSystem currently in use
  * @param convFactor The UnitConversionFactor for this arrayo
  */
-void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
-                       char* partTypeGroupName, char* name, enum DATA_TYPE type,
-                       int N, int dim, long long N_total, int mpi_rank,
-                       long long offset, char* part_c, size_t partSize,
-                       struct UnitSystem* us,
-                       enum UnitConversionFactor convFactor) {
-
-  hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0;
-  hsize_t shape[2], offsets[2];
-  void* temp = 0;
-  int i = 0, rank = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  char* temp_c = 0;
+void writeArray(hid_t grp, char* fileName, FILE* xmfFile,
+                char* partTypeGroupName, const struct io_props props, size_t N,
+                long long N_total, int mpi_rank, long long offset,
+                const struct UnitSystem* internal_units,
+                const struct UnitSystem* snapshot_units) {
+
+  const size_t typeSize = sizeOfType(props.type);
+  const size_t copySize = typeSize * props.dimension;
+  const size_t num_elements = N * props.dimension;
 
-  /* message("Writing '%s' array...", name); */
+  /* message("Writing '%s' array...", props.name); */
 
   /* Prepare the arrays in the file */
   if (mpi_rank == 0)
-    prepareArray(grp, fileName, xmfFile, partTypeGroupName, name, type, N_total,
-                 dim, us, convFactor);
+    prepareArray(grp, fileName, xmfFile, partTypeGroupName, props, N_total,
+                 internal_units, snapshot_units);
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * sizeOfType(type));
+  void* temp = malloc(num_elements * sizeOfType(props.type));
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Copy particle data to temporary buffer */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(&temp_c[i * copySize], part_c + i * partSize, copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(&temp_c[i * copySize], props.field + i * props.partSize, copySize);
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(internal_units, snapshot_units, props.units);
+  if (factor != 1.) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(props.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
+  }
 
   /* Construct information for the hyper-slab */
-  if (dim > 1) {
+  int rank;
+  hsize_t shape[2];
+  hsize_t offsets[2];
+  if (props.dimension > 1) {
     rank = 2;
     shape[0] = N;
-    shape[1] = dim;
+    shape[1] = props.dimension;
     offsets[0] = offset;
     offsets[1] = 0;
   } else {
@@ -314,28 +337,29 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   }
 
   /* Create data space in memory */
-  h_memspace = H5Screate(H5S_SIMPLE);
+  const hid_t h_memspace = H5Screate(H5S_SIMPLE);
   if (h_memspace < 0)
-    error("Error while creating data space (memory) for field '%s'.", name);
+    error("Error while creating data space (memory) for field '%s'.",
+          props.name);
 
   /* Change shape of memory data space */
-  h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
+  hid_t h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
   if (h_err < 0)
     error("Error while changing data space (memory) shape for field '%s'.",
-          name);
+          props.name);
 
   /* Open pre-existing data set */
-  h_data = H5Dopen(grp, name, H5P_DEFAULT);
-  if (h_data < 0) error("Error while opening dataset '%s'.", name);
+  const hid_t h_data = H5Dopen(grp, props.name, H5P_DEFAULT);
+  if (h_data < 0) error("Error while opening dataset '%s'.", props.name);
 
   /* Select data space in that data set */
-  h_filespace = H5Dget_space(h_data);
+  const hid_t h_filespace = H5Dget_space(h_data);
   H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);
 
   /* Write temporary buffer to HDF5 dataspace */
-  h_err = H5Dwrite(h_data, hdf5Type(type), h_memspace, h_filespace, H5P_DEFAULT,
-                   temp);
-  if (h_err < 0) error("Error while writing data array '%s'.", name);
+  h_err = H5Dwrite(h_data, hdf5Type(props.type), h_memspace, h_filespace,
+                   H5P_DEFAULT, temp);
+  if (h_err < 0) error("Error while writing data array '%s'.", props.name);
 
   /* Free and close everything */
   free(temp);
@@ -344,70 +368,19 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   H5Sclose(h_filespace);
 }
 
-/**
- * @brief A helper macro to call the readArrayBackEnd function more easily.
- *
- * @param grp The group from which to read.
- * @param name The name of the array to read.
- * @param type The #DATA_TYPE of the attribute.
- * @param N The number of particles.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part The array of particles to fill
- * @param N_total Total number of particles
- * @param offset Offset in the array where this task starts reading
- * @param field The name of the field (C code name as defined in part.h) to fill
- * @param importance Is the data compulsory or not
- *
- */
-#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
-                  importance)                                            \
-  readArrayBackEnd(grp, name, type, N, dim, N_total, offset,             \
-                   (char*)(&(part[0]).field), sizeof(part[0]), importance)
-
-/**
- * @brief A helper macro to call the readArrayBackEnd function more easily.
- *
- * @param grp The group in which to write.
- * @param fileName Unused parameter in non-MPI mode
- * @param xmfFile Unused parameter in non-MPI mode
- * @param name The name of the array to write.
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param type The #DATA_TYPE of the array.
- * @param N The number of particles to write.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part A (char*) pointer on the first occurrence of the field of
- *interest in the parts array
- * @param N_total Unused parameter in non-MPI mode
- * @param mpi_rank Unused parameter in non-MPI mode
- * @param offset Unused parameter in non-MPI mode
- * @param field The name (code name) of the field to read from.
- * @param us The UnitSystem currently in use
- * @param convFactor The UnitConversionFactor for this array
- *
- */
-#define writeArray(grp, fileName, xmfFile, partTypeGroupName, name, type, N,   \
-                   dim, part, N_total, mpi_rank, offset, field, us,            \
-                   convFactor)                                                 \
-  writeArrayBackEnd(grp, fileName, xmfFile, partTypeGroupName, name, type, N,  \
-                    dim, N_total, mpi_rank, offset, (char*)(&(part[0]).field), \
-                    sizeof(part[0]), us, convFactor)
-
-/* Import the right hydro definition */
-#include "hydro_io.h"
-/* Import the right gravity definition */
-#include "gravity_io.h"
-
 /**
  * @brief Reads an HDF5 initial condition file (GADGET-3 type)
  *
  * @param fileName The file to read.
+ * @param internal_units The system units used internally
  * @param dim (output) The dimension of the volume read from the file.
  * @param parts (output) The array of #part (gas particles) read from the file.
  * @param gparts (output) The array of #gpart read from the file.
  * @param Ngas (output) The number of #part read from the file on that node.
  * @param Ngparts (output) The number of #gpart read from the file on that node.
  * @param periodic (output) 1 if the volume is periodic, 0 if not.
+ * @param flag_entropy (output) 1 if the ICs contained Entropy in the
+ * InternalEnergy field
  * @param mpi_rank The MPI rank of this node
  * @param mpi_size The number of MPI ranks
  * @param comm The MPI communicator
@@ -422,10 +395,11 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
  * @todo Read snapshots distributed in more than one file.
  *
  */
-void read_ic_serial(char* fileName, double dim[3], struct part** parts,
-                    struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
-                    int* periodic, int* flag_entropy, int mpi_rank,
-                    int mpi_size, MPI_Comm comm, MPI_Info info, int dry_run) {
+void read_ic_serial(char* fileName, const struct UnitSystem* internal_units,
+                    double dim[3], struct part** parts, struct gpart** gparts,
+                    size_t* Ngas, size_t* Ngparts, int* periodic,
+                    int* flag_entropy, int mpi_rank, int mpi_size,
+                    MPI_Comm comm, MPI_Info info, int dry_run) {
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
   double boxSize[3] = {0.0, -1.0, -1.0};
@@ -435,6 +409,7 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
   size_t N[NUM_PARTICLE_TYPES] = {0};
   long long N_total[NUM_PARTICLE_TYPES] = {0};
   long long offset[NUM_PARTICLE_TYPES] = {0};
+  struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem));
 
   /* First read some information about the content */
   if (mpi_rank == 0) {
@@ -484,6 +459,38 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
     /* Close header */
     H5Gclose(h_grp);
 
+    /* Read the unit system used in the ICs */
+    if (ic_units == NULL) error("Unable to allocate memory for IC unit system");
+    readUnitSystem(h_file, ic_units);
+
+    if (units_are_equal(ic_units, internal_units)) {
+
+      message("IC and internal units match. No conversion needed.");
+
+    } else {
+
+      message("Conversion needed from:");
+      message("(ICs) Unit system: U_M =      %e g.", ic_units->UnitMass_in_cgs);
+      message("(ICs) Unit system: U_L =      %e cm.",
+              ic_units->UnitLength_in_cgs);
+      message("(ICs) Unit system: U_t =      %e s.", ic_units->UnitTime_in_cgs);
+      message("(ICs) Unit system: U_I =      %e A.",
+              ic_units->UnitCurrent_in_cgs);
+      message("(ICs) Unit system: U_T =      %e K.",
+              ic_units->UnitTemperature_in_cgs);
+      message("to:");
+      message("(internal) Unit system: U_M = %e g.",
+              internal_units->UnitMass_in_cgs);
+      message("(internal) Unit system: U_L = %e cm.",
+              internal_units->UnitLength_in_cgs);
+      message("(internal) Unit system: U_t = %e s.",
+              internal_units->UnitTime_in_cgs);
+      message("(internal) Unit system: U_I = %e A.",
+              internal_units->UnitCurrent_in_cgs);
+      message("(internal) Unit system: U_T = %e K.",
+              internal_units->UnitTemperature_in_cgs);
+    }
+
     /* Close file */
     H5Fclose(h_file);
   }
@@ -493,6 +500,7 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
   MPI_Bcast(periodic, 1, MPI_INT, 0, comm);
   MPI_Bcast(&N_total, NUM_PARTICLE_TYPES, MPI_LONG_LONG, 0, comm);
   MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm);
+  MPI_Bcast(ic_units, sizeof(struct UnitSystem), MPI_BYTE, 0, comm);
 
   /* Divide the particles among the tasks. */
   for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) {
@@ -548,19 +556,29 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
           error("Error while opening particle group %s.", partTypeGroupName);
         }
 
+        int num_fields = 0;
+        struct io_props list[100];
+        size_t N = 0;
+
         /* Read particle fields into the particle structure */
         switch (ptype) {
 
           case GAS:
-            if (!dry_run)
-              hydro_read_particles(h_grp, N[ptype], N_total[ptype],
-                                   offset[ptype], *parts);
+            /* if (!dry_run) */
+            /*   hydro_read_particles(h_grp, N[ptype], N_total[ptype], */
+            /*                        offset[ptype], *parts); */
+            /* break; */
+            N = *Ngas;
+            hydro_read_particles(*parts, list, &num_fields);
             break;
 
           case DM:
-            if (!dry_run)
-              darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
-                                        offset[ptype], *gparts);
+            /* if (!dry_run) */
+            /*   darkmatter_read_particles(h_grp, N[ptype], N_total[ptype], */
+            /*                             offset[ptype], *gparts); */
+            /* break; */
+            N = Ndm;
+            darkmatter_read_particles(*gparts, list, &num_fields);
             break;
 
           default:
@@ -568,6 +586,12 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
                     ptype);
         }
 
+        /* Read everything */
+        if (!dry_run)
+          for (int i = 0; i < num_fields; ++i)
+            readArray(h_grp, list[i], N, N_total[ptype], offset[ptype],
+                      internal_units, ic_units);
+
         /* Close particle group */
         H5Gclose(h_grp);
       }
@@ -580,6 +604,9 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
     MPI_Barrier(comm);
   }
 
+  /* Clean up */
+  free(ic_units);
+
   /* Prepare the DM particles */
   if (!dry_run) prepare_dm_gparts(*gparts, Ndm);
 
@@ -594,7 +621,8 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
  *
  * @param e The engine containing all the system.
  * @param baseName The common part of the snapshot file name.
- * @param us The UnitSystem used for the conversion of units in the output.
+ * @param internal_units The #UnitSystem used internally
+ * @param snapshot_units The #UnitSystem used in the snapshots
  * @param mpi_rank The MPI rank of this node.
  * @param mpi_size The number of MPI ranks.
  * @param comm The MPI communicator.
@@ -609,8 +637,10 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
  *
  */
 void write_output_serial(struct engine* e, const char* baseName,
-                         struct UnitSystem* us, int mpi_rank, int mpi_size,
-                         MPI_Comm comm, MPI_Info info) {
+                         const struct UnitSystem* internal_units,
+                         const struct UnitSystem* snapshot_units, int mpi_rank,
+                         int mpi_size, MPI_Comm comm, MPI_Info info) {
+
   hid_t h_file = 0, h_grp = 0;
   const size_t Ngas = e->s->nr_parts;
   const size_t Ntot = e->s->nr_gparts;
@@ -728,8 +758,11 @@ void write_output_serial(struct engine* e, const char* baseName,
     parser_write_params_to_hdf5(e->parameter_file, h_grp);
     H5Gclose(h_grp);
 
-    /* Print the system of Units */
-    writeUnitSystem(h_file, us);
+    /* Print the system of Units used in the spashot */
+    writeUnitSystem(h_file, snapshot_units, "Units");
+
+    /* Print the system of Units used internally */
+    writeUnitSystem(h_file, internal_units, "InternalCodeUnits");
 
     /* Loop over all particle types */
     for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) {
@@ -785,14 +818,16 @@ void write_output_serial(struct engine* e, const char* baseName,
           error("Error while opening particle group %s.", partTypeGroupName);
         }
 
-        /* Read particle fields into the particle structure */
+        int num_fields = 0;
+        struct io_props list[100];
+        size_t N = 0;
+
+        /* Write particle fields from the particle structure */
         switch (ptype) {
 
           case GAS:
-            hydro_write_particles(h_grp, fileName, partTypeGroupName, xmfFile,
-                                  N[ptype], N_total[ptype], mpi_rank,
-                                  offset[ptype], parts, us);
-
+            N = Ngas;
+            hydro_write_particles(parts, list, &num_fields);
             break;
 
           case DM:
@@ -806,18 +841,24 @@ void write_output_serial(struct engine* e, const char* baseName,
             collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
 
             /* Write DM particles */
-            darkmatter_write_particles(h_grp, fileName, partTypeGroupName,
-                                       xmfFile, N[ptype], N_total[ptype],
-                                       mpi_rank, offset[ptype], dmparts, us);
+            N = Ndm;
+            darkmatter_write_particles(dmparts, list, &num_fields);
 
-            /* Free temporary array */
-            free(dmparts);
             break;
 
           default:
             error("Particle Type %d not yet supported. Aborting", ptype);
         }
 
+        /* Write everything */
+        for (int i = 0; i < num_fields; ++i)
+          writeArray(h_grp, fileName, xmfFile, partTypeGroupName, list[i], N,
+                     N_total[ptype], mpi_rank, offset[ptype], internal_units,
+                     snapshot_units);
+
+        /* Free temporary array */
+        free(dmparts);
+
         /* Close particle group */
         H5Gclose(h_grp);
 
diff --git a/src/serial_io.h b/src/serial_io.h
index fa88d5cb87edfba2c101b6096b8a57b04b7178cd..a2226e5cd9848ff2515b15111af43ccc67275a28 100644
--- a/src/serial_io.h
+++ b/src/serial_io.h
@@ -34,14 +34,16 @@
 
 #if defined(HAVE_HDF5) && defined(WITH_MPI) && !defined(HAVE_PARALLEL_HDF5)
 
-void read_ic_serial(char* fileName, double dim[3], struct part** parts,
-                    struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
-                    int* periodic, int* flag_entropy, int mpi_rank,
-                    int mpi_size, MPI_Comm comm, MPI_Info info, int dry_run);
+void read_ic_serial(char* fileName, const struct UnitSystem* internal_units,
+                    double dim[3], struct part** parts, struct gpart** gparts,
+                    size_t* Ngas, size_t* Ngparts, int* periodic,
+                    int* flag_entropy, int mpi_rank, int mpi_size,
+                    MPI_Comm comm, MPI_Info info, int dry_run);
 
 void write_output_serial(struct engine* e, const char* baseName,
-                         struct UnitSystem* us, int mpi_rank, int mpi_size,
-                         MPI_Comm comm, MPI_Info info);
+                         const struct UnitSystem* internal_units,
+                         const struct UnitSystem* snapshot_units, int mpi_rank,
+                         int mpi_size, MPI_Comm comm, MPI_Info info);
 
 #endif
 
diff --git a/src/single_io.c b/src/single_io.c
index ae67445b2d4e34d561611a0fa6ca3322d02a55ed..de7b96812365e136a078645511328338f8c4e8e3 100644
--- a/src/single_io.c
+++ b/src/single_io.c
@@ -39,6 +39,10 @@
 #include "engine.h"
 #include "error.h"
 #include "hydro_properties.h"
+#include "gravity_io.h"
+#include "hydro_io.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
 #include "part.h"
 #include "units.h"
 
@@ -49,80 +53,91 @@
 /**
  * @brief Reads a data array from a given HDF5 group.
  *
- * @param grp The group from which to read.
- * @param name The name of the array to read.
- * @param type The #DATA_TYPE of the attribute.
+ * @param h_grp The group from which to read.
+ * @param prop The #io_props of the field to read
  * @param N The number of particles.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part_c A (char*) pointer on the first occurrence of the field of
- *interest in the parts array
- * @param partSize The size in bytes of the particle structure.
- * @param importance If COMPULSORY, the data must be present in the IC file. If
- *OPTIONAL, the array will be zeroed when the data is not present.
+ * @param internal_units The #UnitSystem used internally
+ * @param ic_units The #UnitSystem used in the ICs
  *
  * @todo A better version using HDF5 hyper-slabs to read the file directly into
  *the part array
  * will be written once the structures have been stabilized.
  */
-void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
-                      int dim, char* part_c, size_t partSize,
-                      enum DATA_IMPORTANCE importance) {
-  hid_t h_data = 0, h_err = 0, h_type = 0;
-  htri_t exist = 0;
-  void* temp;
-  int i = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  char* temp_c = 0;
+void readArray(hid_t h_grp, const struct io_props prop, size_t N,
+               const struct UnitSystem* internal_units,
+               const struct UnitSystem* ic_units) {
+
+  const size_t typeSize = sizeOfType(prop.type);
+  const size_t copySize = typeSize * prop.dimension;
+  const size_t num_elements = N * prop.dimension;
 
   /* Check whether the dataspace exists or not */
-  exist = H5Lexists(grp, name, 0);
+  const htri_t exist = H5Lexists(h_grp, prop.name, 0);
   if (exist < 0) {
-    error("Error while checking the existence of data set '%s'.", name);
+    error("Error while checking the existence of data set '%s'.", prop.name);
   } else if (exist == 0) {
-    if (importance == COMPULSORY) {
-      error("Compulsory data set '%s' not present in the file.", name);
+    if (prop.importance == COMPULSORY) {
+      error("Compulsory data set '%s' not present in the file.", prop.name);
     } else {
       /* message("Optional data set '%s' not present. Zeroing this particle
-       * field...", name);	   */
+       * prop...", name);	   */
 
-      for (i = 0; i < N; ++i) memset(part_c + i * partSize, 0, copySize);
+      for (size_t i = 0; i < N; ++i)
+        memset(prop.field + i * prop.partSize, 0, copySize);
 
       return;
     }
   }
 
-  /* message( "Reading %s '%s' array...", importance == COMPULSORY ?
-   * "compulsory": "optional  ", name); */
+  /* message("Reading %s '%s' array...", */
+  /*         prop.importance == COMPULSORY ? "compulsory" : "optional  ", */
+  /*         prop.name); */
 
   /* Open data space */
-  h_data = H5Dopen(grp, name, H5P_DEFAULT);
+  const hid_t h_data = H5Dopen(h_grp, prop.name, H5P_DEFAULT);
   if (h_data < 0) {
-    error("Error while opening data space '%s'.", name);
+    error("Error while opening data space '%s'.", prop.name);
   }
 
   /* Check data type */
-  h_type = H5Dget_type(h_data);
+  const hid_t h_type = H5Dget_type(h_data);
   if (h_type < 0) error("Unable to retrieve data type from the file");
   // if (!H5Tequal(h_type, hdf5Type(type)))
   //  error("Non-matching types between the code and the file");
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * typeSize);
+  void* temp = malloc(num_elements * typeSize);
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Read HDF5 dataspace in temporary buffer */
   /* Dirty version that happens to work for vectors but should be improved */
   /* Using HDF5 dataspaces would be better */
-  h_err = H5Dread(h_data, hdf5Type(type), H5S_ALL, H5S_ALL, H5P_DEFAULT, temp);
+  const hid_t h_err =
+      H5Dread(h_data, hdf5Type(prop.type), H5S_ALL, H5S_ALL, H5P_DEFAULT, temp);
   if (h_err < 0) {
-    error("Error while reading data array '%s'.", name);
+    error("Error while reading data array '%s'.", prop.name);
+  }
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(ic_units, internal_units, prop.units);
+  if (factor != 1. && exist != 0) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(prop.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
   }
 
   /* Copy temporary buffer to particle data */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(part_c + i * partSize, &temp_c[i * copySize], copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(prop.field + i * prop.partSize, &temp_c[i * copySize], copySize);
 
   /* Free and close everything */
   free(temp);
@@ -141,59 +156,66 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
  * @param fileName The name of the file in which the data is written
  * @param xmfFile The FILE used to write the XMF description
  * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param name The name of the array to write.
- * @param type The #DATA_TYPE of the array.
+ * the HDF5 file.
+ * @param props The #io_props of the field to read
  * @param N The number of particles to write.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part_c A (char*) pointer on the first occurrence of the field of
- *interest in the parts array.
- * @param partSize The size in bytes of the particle structure.
- * @param us The UnitSystem currently in use
- * @param convFactor The UnitConversionFactor for this array
+ * @param internal_units The #UnitSystem used internally
+ * @param snapshot_units The #UnitSystem used in the snapshots
  *
  * @todo A better version using HDF5 hyper-slabs to write the file directly from
- *the part array
- * will be written once the structures have been stabilized.
+ * the part array will be written once the structures have been stabilized.
  */
-void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
-                       char* partTypeGroupName, char* name, enum DATA_TYPE type,
-                       int N, int dim, char* part_c, size_t partSize,
-                       struct UnitSystem* us,
-                       enum UnitConversionFactor convFactor) {
-  hid_t h_data = 0, h_err = 0, h_space = 0, h_prop = 0;
-  void* temp = 0;
-  int i = 0, rank = 0;
-  const size_t typeSize = sizeOfType(type);
-  const size_t copySize = typeSize * dim;
-  char* temp_c = 0;
-  hsize_t shape[2];
-  hsize_t chunk_shape[2];
-  char buffer[FILENAME_BUFFER_SIZE];
+void writeArray(hid_t grp, char* fileName, FILE* xmfFile,
+                char* partTypeGroupName, const struct io_props props, size_t N,
+                const struct UnitSystem* internal_units,
+                const struct UnitSystem* snapshot_units) {
+
+  const size_t typeSize = sizeOfType(props.type);
+  const size_t copySize = typeSize * props.dimension;
+  const size_t num_elements = N * props.dimension;
 
-  /* message("Writing '%s' array...", name); */
+  /* message("Writing '%s' array...", props.name); */
 
   /* Allocate temporary buffer */
-  temp = malloc(N * dim * sizeOfType(type));
+  void* temp = malloc(num_elements * sizeOfType(props.type));
   if (temp == NULL) error("Unable to allocate memory for temporary buffer");
 
   /* Copy particle data to temporary buffer */
-  temp_c = temp;
-  for (i = 0; i < N; ++i)
-    memcpy(&temp_c[i * copySize], part_c + i * partSize, copySize);
+  char* temp_c = temp;
+  for (size_t i = 0; i < N; ++i)
+    memcpy(&temp_c[i * copySize], props.field + i * props.partSize, copySize);
+
+  /* Unit conversion if necessary */
+  const double factor =
+      units_conversion_factor(internal_units, snapshot_units, props.units);
+  if (factor != 1.) {
+
+    /* message("Converting ! factor=%e", factor); */
+
+    if (isDoublePrecision(props.type)) {
+      double* temp_d = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
+    } else {
+      float* temp_f = temp;
+      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+    }
+  }
 
   /* Create data space */
-  h_space = H5Screate(H5S_SIMPLE);
+  const hid_t h_space = H5Screate(H5S_SIMPLE);
+  int rank;
+  hsize_t shape[2];
+  hsize_t chunk_shape[2];
   if (h_space < 0) {
-    error("Error while creating data space for field '%s'.", name);
+    error("Error while creating data space for field '%s'.", props.name);
   }
 
-  if (dim > 1) {
+  if (props.dimension > 1) {
     rank = 2;
     shape[0] = N;
-    shape[1] = dim;
+    shape[1] = props.dimension;
     chunk_shape[0] = 1 << 16; /* Just a guess...*/
-    chunk_shape[1] = dim;
+    chunk_shape[1] = props.dimension;
   } else {
     rank = 1;
     shape[0] = N;
@@ -206,49 +228,55 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   if (chunk_shape[0] > N) chunk_shape[0] = N;
 
   /* Change shape of data space */
-  h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
+  hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
   if (h_err < 0) {
-    error("Error while changing data space shape for field '%s'.", name);
+    error("Error while changing data space shape for field '%s'.", props.name);
   }
 
   /* Dataset properties */
-  h_prop = H5Pcreate(H5P_DATASET_CREATE);
+  const hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
 
   /* Set chunk size */
   h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
   if (h_err < 0) {
     error("Error while setting chunk size (%lld, %lld) for field '%s'.",
-          chunk_shape[0], chunk_shape[1], name);
+          chunk_shape[0], chunk_shape[1], props.name);
   }
 
   /* Impose data compression */
   h_err = H5Pset_deflate(h_prop, 4);
   if (h_err < 0) {
-    error("Error while setting compression options for field '%s'.", name);
+    error("Error while setting compression options for field '%s'.",
+          props.name);
   }
 
   /* Create dataset */
-  h_data = H5Dcreate(grp, name, hdf5Type(type), h_space, H5P_DEFAULT, h_prop,
-                     H5P_DEFAULT);
+  const hid_t h_data = H5Dcreate(grp, props.name, hdf5Type(props.type), h_space,
+                                 H5P_DEFAULT, h_prop, H5P_DEFAULT);
   if (h_data < 0) {
-    error("Error while creating dataspace '%s'.", name);
+    error("Error while creating dataspace '%s'.", props.name);
   }
 
   /* Write temporary buffer to HDF5 dataspace */
-  h_err = H5Dwrite(h_data, hdf5Type(type), h_space, H5S_ALL, H5P_DEFAULT, temp);
+  h_err = H5Dwrite(h_data, hdf5Type(props.type), h_space, H5S_ALL, H5P_DEFAULT,
+                   temp);
   if (h_err < 0) {
-    error("Error while writing data array '%s'.", name);
+    error("Error while writing data array '%s'.", props.name);
   }
 
   /* Write XMF description for this data set */
-  writeXMFline(xmfFile, fileName, partTypeGroupName, name, N, dim, type);
+  writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N,
+               props.dimension, props.type);
 
   /* Write unit conversion factors for this data set */
-  units_conversion_string(buffer, us, convFactor);
+  char buffer[FIELD_BUFFER_SIZE];
+  units_cgs_conversion_string(buffer, snapshot_units, props.units);
   writeAttribute_d(h_data, "CGS conversion factor",
-                   units_conversion_factor(us, convFactor));
-  writeAttribute_f(h_data, "h-scale exponent", units_h_factor(us, convFactor));
-  writeAttribute_f(h_data, "a-scale exponent", units_a_factor(us, convFactor));
+                   units_cgs_conversion_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "h-scale exponent",
+                   units_h_factor(snapshot_units, props.units));
+  writeAttribute_f(h_data, "a-scale exponent",
+                   units_a_factor(snapshot_units, props.units));
   writeAttribute_s(h_data, "Conversion factor", buffer);
 
   /* Free and close everything */
@@ -258,71 +286,19 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
   H5Sclose(h_space);
 }
 
-/**
- * @brief A helper macro to call the readArrayBackEnd function more easily.
- *
- * @param grp The group from which to read.
- * @param name The name of the array to read.
- * @param type The #DATA_TYPE of the attribute.
- * @param N The number of particles.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part The array of particles to fill
- * @param N_total Unused parameter in non-MPI mode
- * @param offset Unused parameter in non-MPI mode
- * @param field The name of the field (C code name as defined in part.h) to fill
- * @param importance Is the data compulsory or not
- *
- */
-#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
-                  importance)                                            \
-  readArrayBackEnd(grp, name, type, N, dim, (char*)(&(part[0]).field),   \
-                   sizeof(part[0]), importance)
-
-/**
- * @brief A helper macro to call the readArrayBackEnd function more easily.
- *
- * @param grp The group in which to write.
- * @param fileName The name of the file in which the data is written
- * @param xmfFile The FILE used to write the XMF description
- * @param name The name of the array to write.
- * @param partTypeGroupName The name of the group containing the particles in
- *the HDF5 file.
- * @param type The #DATA_TYPE of the array.
- * @param N The number of particles to write.
- * @param dim The dimension of the data (1 for scalar, 3 for vector)
- * @param part A (char*) pointer on the first occurrence of the field of
- * interest in the parts array
- * @param N_total Unused parameter in non-MPI mode
- * @param mpi_rank Unused parameter in non-MPI mode
- * @param offset Unused parameter in non-MPI mode
- * @param field The name (code name) of the field to read from.
- * @param us The UnitSystem currently in use
- * @param convFactor The UnitConversionFactor for this array
- *
- */
-#define writeArray(grp, fileName, xmfFile, partTypeGroupName, name, type, N,  \
-                   dim, part, N_total, mpi_rank, offset, field, us,           \
-                   convFactor)                                                \
-  writeArrayBackEnd(grp, fileName, xmfFile, partTypeGroupName, name, type, N, \
-                    dim, (char*)(&(part[0]).field), sizeof(part[0]), us,      \
-                    convFactor)
-
-/* Import the right hydro definition */
-#include "hydro_io.h"
-/* Import the right gravity definition */
-#include "gravity_io.h"
-
 /**
  * @brief Reads an HDF5 initial condition file (GADGET-3 type)
  *
  * @param fileName The file to read.
+ * @param internal_units The system units used internally
  * @param dim (output) The dimension of the volume.
  * @param parts (output) Array of Gas particles.
  * @param gparts (output) Array of #gpart particles.
  * @param Ngas (output) number of Gas particles read.
  * @param Ngparts (output) The number of #gpart read.
  * @param periodic (output) 1 if the volume is periodic, 0 if not.
- * @param flag_entropy 1 if the ICs contained Entropy in the InternalEnergy
+ * @param flag_entropy (output) 1 if the ICs contained Entropy in the
+ * InternalEnergy
  * field
  * @param dry_run If 1, don't read the particle. Only allocates the arrays.
  *
@@ -334,9 +310,11 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
  * @todo Read snapshots distributed in more than one file.
  *
  */
-void read_ic_single(char* fileName, double dim[3], struct part** parts,
-                    struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
-                    int* periodic, int* flag_entropy, int dry_run) {
+void read_ic_single(char* fileName, const struct UnitSystem* internal_units,
+                    double dim[3], struct part** parts, struct gpart** gparts,
+                    size_t* Ngas, size_t* Ngparts, int* periodic,
+                    int* flag_entropy, int dry_run) {
+
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
   double boxSize[3] = {0.0, -1.0, -1.0};
@@ -389,6 +367,40 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
   /* Close header */
   H5Gclose(h_grp);
 
+  /* Read the unit system used in the ICs */
+  struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem));
+  if (ic_units == NULL) error("Unable to allocate memory for IC unit system");
+  readUnitSystem(h_file, ic_units);
+
+  /* Tell the user if a conversion will be needed */
+  if (units_are_equal(ic_units, internal_units)) {
+
+    message("IC and internal units match. No conversion needed.");
+
+  } else {
+
+    message("Conversion needed from:");
+    message("(ICs) Unit system: U_M =      %e g.", ic_units->UnitMass_in_cgs);
+    message("(ICs) Unit system: U_L =      %e cm.",
+            ic_units->UnitLength_in_cgs);
+    message("(ICs) Unit system: U_t =      %e s.", ic_units->UnitTime_in_cgs);
+    message("(ICs) Unit system: U_I =      %e A.",
+            ic_units->UnitCurrent_in_cgs);
+    message("(ICs) Unit system: U_T =      %e K.",
+            ic_units->UnitTemperature_in_cgs);
+    message("to:");
+    message("(internal) Unit system: U_M = %e g.",
+            internal_units->UnitMass_in_cgs);
+    message("(internal) Unit system: U_L = %e cm.",
+            internal_units->UnitLength_in_cgs);
+    message("(internal) Unit system: U_t = %e s.",
+            internal_units->UnitTime_in_cgs);
+    message("(internal) Unit system: U_I = %e A.",
+            internal_units->UnitCurrent_in_cgs);
+    message("(internal) Unit system: U_T = %e K.",
+            internal_units->UnitTemperature_in_cgs);
+  }
+
   /* Allocate memory to store SPH particles */
   *Ngas = N[0];
   if (posix_memalign((void*)parts, part_align, *Ngas * sizeof(struct part)) !=
@@ -425,23 +437,32 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
       error("Error while opening particle group %s.", partTypeGroupName);
     }
 
-    /* message("Group %s found - reading...", partTypeGroupName); */
+    int num_fields = 0;
+    struct io_props list[100];
+    size_t N = 0;
 
-    /* Read particle fields into the particle structure */
+    /* Read particle fields into the structure */
     switch (ptype) {
 
       case GAS:
-        if (!dry_run) hydro_read_particles(h_grp, *Ngas, *Ngas, 0, *parts);
+        N = *Ngas;
+        hydro_read_particles(*parts, list, &num_fields);
         break;
 
       case DM:
-        if (!dry_run) darkmatter_read_particles(h_grp, Ndm, Ndm, 0, *gparts);
+        N = Ndm;
+        darkmatter_read_particles(*gparts, list, &num_fields);
         break;
 
       default:
         message("Particle Type %d not yet supported. Particles ignored", ptype);
     }
 
+    /* Read everything */
+    if (!dry_run)
+      for (int i = 0; i < num_fields; ++i)
+        readArray(h_grp, list[i], N, internal_units, ic_units);
+
     /* Close particle group */
     H5Gclose(h_grp);
   }
@@ -454,6 +475,9 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
 
   /* message("Done Reading particles..."); */
 
+  /* Clean up */
+  free(ic_units);
+
   /* Close file */
   H5Fclose(h_file);
 }
@@ -463,7 +487,8 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
  *
  * @param e The engine containing all the system.
  * @param baseName The common part of the snapshot file name.
- * @param us The UnitSystem used for the conversion of units in the output.
+ * @param internal_units The #UnitSystem used internally
+ * @param snapshot_units The #UnitSystem used in the snapshots
  *
  * Creates an HDF5 output file and writes the particles contained
  * in the engine. If such a file already exists, it is erased and replaced
@@ -474,7 +499,8 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
  *
  */
 void write_output_single(struct engine* e, const char* baseName,
-                         struct UnitSystem* us) {
+                         const struct UnitSystem* internal_units,
+                         const struct UnitSystem* snapshot_units) {
 
   hid_t h_file = 0, h_grp = 0;
   const size_t Ngas = e->s->nr_parts;
@@ -578,8 +604,11 @@ void write_output_single(struct engine* e, const char* baseName,
   parser_write_params_to_hdf5(e->parameter_file, h_grp);
   H5Gclose(h_grp);
 
-  /* Print the system of Units */
-  writeUnitSystem(h_file, us);
+  /* Print the system of Units used in the spashot */
+  writeUnitSystem(h_file, snapshot_units, "Units");
+
+  /* Print the system of Units used internally */
+  writeUnitSystem(h_file, internal_units, "InternalCodeUnits");
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) {
@@ -600,14 +629,16 @@ void write_output_single(struct engine* e, const char* baseName,
       error("Error while creating particle group.\n");
     }
 
-    /* message("Writing particle arrays..."); */
+    int num_fields = 0;
+    struct io_props list[100];
+    size_t N = 0;
 
     /* Write particle fields from the particle structure */
     switch (ptype) {
 
       case GAS:
-        hydro_write_particles(h_grp, fileName, partTypeGroupName, xmfFile, Ngas,
-                              Ngas, 0, 0, parts, us);
+        N = Ngas;
+        hydro_write_particles(parts, list, &num_fields);
         break;
 
       case DM:
@@ -621,17 +652,22 @@ void write_output_single(struct engine* e, const char* baseName,
         collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
 
         /* Write DM particles */
-        darkmatter_write_particles(h_grp, fileName, partTypeGroupName, xmfFile,
-                                   Ndm, Ndm, 0, 0, dmparts, us);
-
-        /* Free temporary array */
-        free(dmparts);
+        N = Ndm;
+        darkmatter_write_particles(dmparts, list, &num_fields);
         break;
 
       default:
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
+    /* Write everything */
+    for (int i = 0; i < num_fields; ++i)
+      writeArray(h_grp, fileName, xmfFile, partTypeGroupName, list[i], N,
+                 internal_units, snapshot_units);
+
+    /* Free temporary array */
+    free(dmparts);
+
     /* Close particle group */
     H5Gclose(h_grp);
 
diff --git a/src/single_io.h b/src/single_io.h
index 4936aa3eb35a5c76a5a8f242c9cc79bbc86161a0..51a30a7bc6af7f3aaf5708a3d2df14982e026e3e 100644
--- a/src/single_io.h
+++ b/src/single_io.h
@@ -29,12 +29,14 @@
 
 #if defined(HAVE_HDF5) && !defined(WITH_MPI)
 
-void read_ic_single(char* fileName, double dim[3], struct part** parts,
-                    struct gpart** gparts, size_t* Ngas, size_t* Ndm,
-                    int* periodic, int* flag_entropy, int dry_run);
+void read_ic_single(char* fileName, const struct UnitSystem* internal_units,
+                    double dim[3], struct part** parts, struct gpart** gparts,
+                    size_t* Ngas, size_t* Ndm, int* periodic, int* flag_entropy,
+                    int dry_run);
 
 void write_output_single(struct engine* e, const char* baseName,
-                         struct UnitSystem* us);
+                         const struct UnitSystem* internal_units,
+                         const struct UnitSystem* snapshot_units);
 
 #endif
 
diff --git a/src/units.c b/src/units.c
index dd9576adc4bb18b859cff03c674f31b721409a19..0284ebfb993c9c31d399a6ae45ec2b8d543e2a34 100644
--- a/src/units.c
+++ b/src/units.c
@@ -91,10 +91,10 @@ double units_get_base_unit(const struct UnitSystem* us,
 }
 
 /**
- * @brief Returns the base unit symbol
+ * @brief Returns the base unit symbol used internally
  * @param baseUnit The base unit
  */
-const char* units_get_base_unit_symbol(enum BaseUnits baseUnit) {
+const char* units_get_base_unit_internal_symbol(enum BaseUnits baseUnit) {
   switch (baseUnit) {
     case UNIT_MASS:
       return "U_M";
@@ -116,7 +116,7 @@ const char* units_get_base_unit_symbol(enum BaseUnits baseUnit) {
  * @brief Returns the base unit symbol in the cgs system
  * @param baseUnit The base unit
  */
-const char* units_get_base_unit_CGS_symbol(enum BaseUnits baseUnit) {
+const char* units_get_base_unit_cgs_symbol(enum BaseUnits baseUnit) {
   switch (baseUnit) {
     case UNIT_MASS:
       return "g";
@@ -276,13 +276,13 @@ void units_get_base_unit_exponants_array(float baseUnitsExp[5],
  * @param us The system of units in use
  * @param unit The unit to convert
  */
-double units_conversion_factor(const struct UnitSystem* us,
-                               enum UnitConversionFactor unit) {
+double units_cgs_conversion_factor(const struct UnitSystem* us,
+                                   enum UnitConversionFactor unit) {
   float baseUnitsExp[5] = {0.f};
 
   units_get_base_unit_exponants_array(baseUnitsExp, unit);
 
-  return units_general_conversion_factor(us, baseUnitsExp);
+  return units_general_cgs_conversion_factor(us, baseUnitsExp);
 }
 
 /**
@@ -317,13 +317,13 @@ float units_a_factor(const struct UnitSystem* us,
  * @brief Returns a string containing the exponents of the base units making up
  * the conversion factors
  */
-void units_conversion_string(char* buffer, const struct UnitSystem* us,
-                             enum UnitConversionFactor unit) {
+void units_cgs_conversion_string(char* buffer, const struct UnitSystem* us,
+                                 enum UnitConversionFactor unit) {
   float baseUnitsExp[5] = {0.f};
 
   units_get_base_unit_exponants_array(baseUnitsExp, unit);
 
-  units_general_conversion_string(buffer, us, baseUnitsExp);
+  units_general_cgs_conversion_string(buffer, us, baseUnitsExp);
 }
 
 /**
@@ -333,8 +333,8 @@ void units_conversion_string(char* buffer, const struct UnitSystem* us,
  * @param baseUnitsExponants The exponent of each base units required to form
  * the desired quantity. See conversionFactor() for a working example
  */
-double units_general_conversion_factor(const struct UnitSystem* us,
-                                       const float baseUnitsExponants[5]) {
+double units_general_cgs_conversion_factor(const struct UnitSystem* us,
+                                           const float baseUnitsExponants[5]) {
   double factor = 1.;
   int i;
 
@@ -387,8 +387,9 @@ float units_general_a_factor(const struct UnitSystem* us,
  * @param baseUnitsExponants The exponent of each base units required to form
  * the desired quantity. See conversionFactor() for a working example
  */
-void units_general_conversion_string(char* buffer, const struct UnitSystem* us,
-                                     const float baseUnitsExponants[5]) {
+void units_general_cgs_conversion_string(char* buffer,
+                                         const struct UnitSystem* us,
+                                         const float baseUnitsExponants[5]) {
   char temp[14];
   double a_exp = units_general_a_factor(us, baseUnitsExponants);
   double h_exp = units_general_h_factor(us, baseUnitsExponants);
@@ -431,12 +432,12 @@ void units_general_conversion_string(char* buffer, const struct UnitSystem* us,
       if (baseUnitsExponants[i] == 0.)
         sprintf(temp, " ");
       else if (baseUnitsExponants[i] == 1.)
-        sprintf(temp, "%s ", units_get_base_unit_symbol(i));
+        sprintf(temp, "%s ", units_get_base_unit_internal_symbol(i));
       else if (remainder(baseUnitsExponants[i], 1.) == 0)
-        sprintf(temp, "%s^%d ", units_get_base_unit_symbol(i),
+        sprintf(temp, "%s^%d ", units_get_base_unit_internal_symbol(i),
                 (int)baseUnitsExponants[i]);
       else
-        sprintf(temp, "%s^%7.4f ", units_get_base_unit_symbol(i),
+        sprintf(temp, "%s^%7.4f ", units_get_base_unit_internal_symbol(i),
                 baseUnitsExponants[i]);
       strncat(buffer, temp, 12);
     }
@@ -449,12 +450,12 @@ void units_general_conversion_string(char* buffer, const struct UnitSystem* us,
       if (baseUnitsExponants[i] == 0.)
         continue;
       else if (baseUnitsExponants[i] == 1.)
-        sprintf(temp, "%s ", units_get_base_unit_CGS_symbol(i));
+        sprintf(temp, "%s ", units_get_base_unit_cgs_symbol(i));
       else if (remainder(baseUnitsExponants[i], 1.) == 0)
-        sprintf(temp, "%s^%d ", units_get_base_unit_CGS_symbol(i),
+        sprintf(temp, "%s^%d ", units_get_base_unit_cgs_symbol(i),
                 (int)baseUnitsExponants[i]);
       else
-        sprintf(temp, "%s^%7.4f ", units_get_base_unit_CGS_symbol(i),
+        sprintf(temp, "%s^%7.4f ", units_get_base_unit_cgs_symbol(i),
                 baseUnitsExponants[i]);
       strncat(buffer, temp, 12);
     }
@@ -462,3 +463,61 @@ void units_general_conversion_string(char* buffer, const struct UnitSystem* us,
 
   strncat(buffer, "]", 2);
 }
+
+/**
+ * @brief Are the two unit systems equal ?
+ *
+ * @param a The First #UnitSystem
+ * @param b The second #UnitSystem
+ * @return 1 if the systems are the same, 0 otherwise
+ */
+int units_are_equal(const struct UnitSystem* a, const struct UnitSystem* b) {
+
+  if (a->UnitMass_in_cgs != b->UnitMass_in_cgs) return 0;
+  if (a->UnitLength_in_cgs != b->UnitLength_in_cgs) return 0;
+  if (a->UnitTime_in_cgs != b->UnitTime_in_cgs) return 0;
+  if (a->UnitCurrent_in_cgs != b->UnitCurrent_in_cgs) return 0;
+  if (a->UnitTemperature_in_cgs != b->UnitTemperature_in_cgs) return 0;
+
+  return 1;
+}
+
+/**
+ * @brief Return the unit conversion factor between two systems
+ *
+ * @param from The #UnitSystem we are converting from
+ * @param to The #UnitSystem we are converting to
+ * @param baseUnitsExponants The exponent of each base units required to form
+ * the desired quantity. See conversionFactor() for a working example
+ */
+double units_general_conversion_factor(const struct UnitSystem* from,
+                                       const struct UnitSystem* to,
+                                       const float baseUnitsExponants[5]) {
+
+  const double from_cgs =
+      units_general_cgs_conversion_factor(from, baseUnitsExponants);
+  const double to_cgs =
+      units_general_cgs_conversion_factor(to, baseUnitsExponants);
+
+  return from_cgs / to_cgs;
+}
+
+/**
+ * @brief Return the unit conversion factor between two systems
+ *
+ * @param from The #UnitSystem we are converting from
+ * @param to The #UnitSystem we are converting to
+ * @param unit The unit we are converting
+ *
+ * @return The conversion factor
+ */
+double units_conversion_factor(const struct UnitSystem* from,
+                               const struct UnitSystem* to,
+                               enum UnitConversionFactor unit) {
+
+  float baseUnitsExp[5] = {0.f};
+
+  units_get_base_unit_exponants_array(baseUnitsExp, unit);
+
+  return units_general_conversion_factor(from, to, baseUnitsExp);
+}
diff --git a/src/units.h b/src/units.h
index 24e37e177480d7f84e41df1b73e2036aa00b7220..29b4563163b4fe224c881b1c1055f2cbfbcc95a1 100644
--- a/src/units.h
+++ b/src/units.h
@@ -94,13 +94,14 @@ enum UnitConversionFactor {
 
 void units_init(struct UnitSystem*, const struct swift_params*,
                 const char* category);
+int units_are_equal(const struct UnitSystem* a, const struct UnitSystem* b);
+
+/* Base units */
 double units_get_base_unit(const struct UnitSystem*, enum BaseUnits);
-const char* units_get_base_unit_symbol(enum BaseUnits);
-const char* units_get_base_unit_CGS_symbol(enum BaseUnits);
-double units_general_conversion_factor(const struct UnitSystem* us,
-                                       const float baseUnitsExponants[5]);
-double units_conversion_factor(const struct UnitSystem* us,
-                               enum UnitConversionFactor unit);
+const char* units_get_base_unit_internal_symbol(enum BaseUnits);
+const char* units_get_base_unit_cgs_symbol(enum BaseUnits);
+
+/* Cosmology factors */
 float units_general_h_factor(const struct UnitSystem* us,
                              const float baseUnitsExponants[5]);
 float units_h_factor(const struct UnitSystem* us,
@@ -109,9 +110,24 @@ float units_general_a_factor(const struct UnitSystem* us,
                              const float baseUnitsExponants[5]);
 float units_a_factor(const struct UnitSystem* us,
                      enum UnitConversionFactor unit);
-void units_general_conversion_string(char* buffer, const struct UnitSystem* us,
-                                     const float baseUnitsExponants[5]);
-void units_conversion_string(char* buffer, const struct UnitSystem* us,
-                             enum UnitConversionFactor unit);
+
+/* Conversion to CGS */
+double units_general_cgs_conversion_factor(const struct UnitSystem* us,
+                                           const float baseUnitsExponants[5]);
+double units_cgs_conversion_factor(const struct UnitSystem* us,
+                                   enum UnitConversionFactor unit);
+void units_general_cgs_conversion_string(char* buffer,
+                                         const struct UnitSystem* us,
+                                         const float baseUnitsExponants[5]);
+void units_cgs_conversion_string(char* buffer, const struct UnitSystem* us,
+                                 enum UnitConversionFactor unit);
+
+/* Conversion between systems */
+double units_general_conversion_factor(const struct UnitSystem* from,
+                                       const struct UnitSystem* to,
+                                       const float baseUnitsExponants[5]);
+double units_conversion_factor(const struct UnitSystem* from,
+                               const struct UnitSystem* to,
+                               enum UnitConversionFactor unit);
 
 #endif /* SWIFT_UNITS_H */