diff --git a/src/parallel_io.c b/src/parallel_io.c
index d17b7e99e5ec0bb97d87eb1dc50e4a5d40a37f01..03b13aa63f65ad2c0dbe27110ed5cd276ca50b65 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -240,19 +240,103 @@ void readArray(hid_t grp, struct io_props props, size_t N, long long N_total,
  * Routines writing an output file
  *-----------------------------------------------------------------------------*/
 
+/**
+ * @brief Prepares an array in the snapshot.
+ *
+ * @param e The #engine we are writing from.
+ * @param grp The HDF5 grp to write to.
+ * @param fileName The name of the file we are writing to.
+ * @param xmfFile The (opened) XMF file we are appending to.
+ * @param partTypeGroupName The name of the group we are writing to.
+ * @param props The #io_props of the field to write.
+ * @param N_total The total number of particles to write in this array.
+ * @param snapshot_units The units used for the data in this snapshot.
+ */
+void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
+                  char* partTypeGroupName, struct io_props props,
+                  long long N_total, const struct unit_system* snapshot_units) {
+
+  /* Create data space */
+  const hid_t h_space = H5Screate(H5S_SIMPLE);
+  if (h_space < 0)
+    error("Error while creating data space for field '%s'.", props.name);
+
+  int rank = 0;
+  hsize_t shape[2];
+  hsize_t chunk_shape[2];
+  if (props.dimension > 1) {
+    rank = 2;
+    shape[0] = N_total;
+    shape[1] = props.dimension;
+    chunk_shape[0] = 1 << 16; /* Just a guess...*/
+    chunk_shape[1] = props.dimension;
+  } else {
+    rank = 1;
+    shape[0] = N_total;
+    shape[1] = 0;
+    chunk_shape[0] = 1 << 16; /* Just a guess...*/
+    chunk_shape[1] = 0;
+  }
+
+  /* Make sure the chunks are not larger than the dataset */
+  if (chunk_shape[0] > N_total) chunk_shape[0] = N_total;
+
+  /* Change shape of data space */
+  hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
+  if (h_err < 0)
+    error("Error while changing data space shape for field '%s'.", props.name);
+
+  /* Create property list for collective dataset write.    */
+  const hid_t h_plist_id = H5Pcreate(H5P_DATASET_XFER);
+  H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);
+
+  /* Set chunk size */
+  /* h_err = H5Pset_chunk(h_prop, rank, chunk_shape); */
+  /* if (h_err < 0) { */
+  /*   error("Error while setting chunk size (%llu, %llu) for field '%s'.", */
+  /*         chunk_shape[0], chunk_shape[1], props.name); */
+  /* } */
+
+  /* Create dataset */
+  const hid_t h_data =
+      H5Dcreate(grp, props.name, io_hdf5_type(props.type), h_space, H5P_DEFAULT,
+                H5P_DEFAULT, H5P_DEFAULT);
+  if (h_data < 0) error("Error while creating dataspace '%s'.", props.name);
+
+  /* Write unit conversion factors for this data set */
+  char buffer[FIELD_BUFFER_SIZE];
+  units_cgs_conversion_string(buffer, snapshot_units, props.units);
+  io_write_attribute_d(
+      h_data, "CGS conversion factor",
+      units_cgs_conversion_factor(snapshot_units, props.units));
+  io_write_attribute_f(h_data, "h-scale exponent",
+                       units_h_factor(snapshot_units, props.units));
+  io_write_attribute_f(h_data, "a-scale exponent",
+                       units_a_factor(snapshot_units, props.units));
+  io_write_attribute_s(h_data, "Conversion factor", buffer);
+
+  /* Add a line to the XMF */
+  xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
+                 props.dimension, props.type);
+
+  /* Close everything */
+  H5Pclose(h_plist_id);
+  H5Dclose(h_data);
+  H5Sclose(h_space);
+}
+
 /**
  * @brief Writes a chunk of data in an open HDF5 dataset
  *
  * @param e The #engine we are writing from.
  * @param h_data The HDF5 dataset to write to.
- * @param h_plist_id the parallel HDF5 properties.
- * @param props The #io_props of the field to read.
+ * @param props The #io_props of the field to write.
  * @param N The number of particles to write.
  * @param offset Offset in the array where this mpi task starts writing.
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
  */
-void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
+void writeArray_chunk(struct engine* e, hid_t h_data,
                       const struct io_props props, size_t N, long long offset,
                       const struct unit_system* internal_units,
                       const struct unit_system* snapshot_units) {
@@ -338,7 +422,7 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
 
   /* Write temporary buffer to HDF5 dataspace */
   h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace,
-                   h_plist_id, temp);
+                   H5P_DEFAULT, temp);
   if (h_err < 0) {
     error("Error while writing data array '%s'.", props.name);
   }
@@ -367,7 +451,6 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
  * @param e The #engine we are writing from.
  * @param grp The group in which to write.
  * @param fileName The name of the file in which the data is written.
- * @param xmfFile The FILE used to write the XMF description.
  * @param partTypeGroupName The name of the group containing the particles in
  * the HDF5 file.
  * @param props The #io_props of the field to read
@@ -378,7 +461,7 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
  */
-void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
+void writeArray(struct engine* e, hid_t grp, char* fileName,
                 char* partTypeGroupName, struct io_props props, size_t N,
                 long long N_total, int mpi_rank, long long offset,
                 const struct unit_system* internal_units,
@@ -390,62 +473,9 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
   const ticks tic = getticks();
 #endif
 
-  /* Work out properties of the array in the file */
-  int rank;
-  hsize_t shape_total[2];
-  hsize_t chunk_shape[2];
-  if (props.dimension > 1) {
-    rank = 2;
-    shape_total[0] = N_total;
-    shape_total[1] = props.dimension;
-    chunk_shape[0] = 1 << 16; /* Just a guess...*/
-    chunk_shape[1] = props.dimension;
-  } else {
-    rank = 1;
-    shape_total[0] = N_total;
-    shape_total[1] = 0;
-    chunk_shape[0] = 1 << 16; /* Just a guess...*/
-    chunk_shape[1] = 0;
-  }
-
-  /* Make sure the chunks are not larger than the dataset */
-  if (chunk_shape[0] > (hsize_t)N_total) chunk_shape[0] = N_total;
-
-  /* Create the space in the file */
-  hid_t h_filespace = H5Screate(H5S_SIMPLE);
-  if (h_filespace < 0) {
-    error("Error while creating data space (file) for field '%s'.", props.name);
-  }
-
-  /* Change shape of file data space */
-  hid_t h_err = H5Sset_extent_simple(h_filespace, rank, shape_total, NULL);
-  if (h_err < 0) {
-    error("Error while changing data space (file) shape for field '%s'.",
-          props.name);
-  }
-
-  /* Dataset properties */
-  const hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
-
-  /* Set chunk size */
-  /* h_err = H5Pset_chunk(h_prop, rank, chunk_shape); */
-  /* if (h_err < 0) { */
-  /*   error("Error while setting chunk size (%llu, %llu) for field '%s'.", */
-  /*         chunk_shape[0], chunk_shape[1], props.name); */
-  /* } */
-
-  /* Create dataset */
-  const hid_t h_data = H5Dcreate(grp, props.name, io_hdf5_type(props.type),
-                                 h_filespace, H5P_DEFAULT, h_prop, H5P_DEFAULT);
-  if (h_data < 0) {
-    error("Error while creating dataset '%s'.", props.name);
-  }
-
-  H5Sclose(h_filespace);
-
-  /* Create property list for collective dataset write.    */
-  const hid_t h_plist_id = H5Pcreate(H5P_DATASET_XFER);
-  H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);
+  /* Open dataset */
+  const hid_t h_data = H5Dopen(grp, props.name, H5P_DEFAULT);
+  if (h_data < 0) error("Error while opening dataset '%s'.", props.name);
 
   /* Given the limitations of ROM-IO we will need to write the data in chunk of
      HDF5_PARALLEL_IO_MAX_BYTES bytes per node until all the nodes are done. */
@@ -458,8 +488,8 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
 
     /* Write the first chunk */
     const size_t this_chunk = (N > max_chunk_size) ? max_chunk_size : N;
-    writeArray_chunk(e, h_data, h_plist_id, props, this_chunk, offset,
-                     internal_units, snapshot_units);
+    writeArray_chunk(e, h_data, props, this_chunk, offset, internal_units,
+                     snapshot_units);
 
     /* Compute how many items are left */
     if (N > max_chunk_size) {
@@ -482,27 +512,8 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
       message("Need to redo one iteration for array '%s'", props.name);
   }
 
-  /* Write XMF description for this data set */
-  if (mpi_rank == 0)
-    xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
-                   props.dimension, props.type);
-
-  /* Write unit conversion factors for this data set */
-  char buffer[FIELD_BUFFER_SIZE];
-  units_cgs_conversion_string(buffer, snapshot_units, props.units);
-  io_write_attribute_d(
-      h_data, "CGS conversion factor",
-      units_cgs_conversion_factor(snapshot_units, props.units));
-  io_write_attribute_f(h_data, "h-scale exponent",
-                       units_h_factor(snapshot_units, props.units));
-  io_write_attribute_f(h_data, "a-scale exponent",
-                       units_a_factor(snapshot_units, props.units));
-  io_write_attribute_s(h_data, "Conversion factor", buffer);
-
   /* Close everything */
-  H5Pclose(h_prop);
   H5Dclose(h_data);
-  H5Pclose(h_plist_id);
 
 #ifdef IO_SPEED_MEASUREMENT
   MPI_Barrier(MPI_COMM_WORLD);
@@ -797,128 +808,47 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
 }
 
 /**
- * @brief Writes an HDF5 output file (GADGET-3 type) with
- *its XMF descriptor
- *
- * @param e The engine containing all the system.
- * @param baseName The common part of the snapshot file name.
- * @param internal_units The #unit_system used internally
- * @param snapshot_units The #unit_system used in the snapshots
- * @param mpi_rank The MPI rank of this node.
- * @param mpi_size The number of MPI ranks.
- * @param comm The MPI communicator.
- * @param info The MPI information object
- *
- * Creates an HDF5 output file and writes the particles
- * contained in the engine. If such a file already exists, it is
- * erased and replaced by the new one.
- * The companion XMF file is also updated accordingly.
- *
- * Calls #error() if an error occurs.
+ * @brief Prepares a file for a parallel write.
  *
+ * @param e The #engine.
+ * @param baseName The base name of the snapshots.
+ * @param N_total The total number of particles of each type to write.
+ * @param internal_units The #unit_system used internally.
+ * @param snapshot_units The #unit_system used in the snapshots.
  */
-void write_output_parallel(struct engine* e, const char* baseName,
-                           const struct unit_system* internal_units,
-                           const struct unit_system* snapshot_units,
-                           int mpi_rank, int mpi_size, MPI_Comm comm,
-                           MPI_Info info) {
+void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
+                  const struct unit_system* internal_units,
+                  const struct unit_system* snapshot_units) {
 
-  hid_t h_file = 0, h_grp = 0;
-  const size_t Ngas = e->s->nr_parts;
-  const size_t Nstars = e->s->nr_sparts;
-  const size_t Ntot = e->s->nr_gparts;
-  int periodic = e->s->periodic;
-  int numFiles = 1;
   struct part* parts = e->s->parts;
   struct gpart* gparts = e->s->gparts;
-  struct gpart* dmparts = NULL;
   struct spart* sparts = e->s->sparts;
   FILE* xmfFile = 0;
-
-  /* Number of unassociated gparts */
-  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
-
-  /* File name */
-  char fileName[FILENAME_BUFFER_SIZE];
-  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-           e->snapshotOutputCount);
+  int periodic = e->s->periodic;
+  int numFiles = 1;
 
   /* First time, we need to create the XMF file */
-  if (e->snapshotOutputCount == 0 && mpi_rank == 0) xmf_create_file(baseName);
+  if (e->snapshotOutputCount == 0) xmf_create_file(baseName);
 
   /* Prepare the XMF file for the new entry */
-  if (mpi_rank == 0) xmfFile = xmf_prepare_file(baseName);
-
-  /* Prepare some file-access properties */
-  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
-
-  /* Set some MPI-IO parameters */
-  // MPI_Info_set(info, "IBM_largeblock_io", "true");
-  MPI_Info_set(info, "romio_cb_write", "enable");
-  MPI_Info_set(info, "romio_ds_write", "disable");
-
-  /* Activate parallel i/o */
-  hid_t h_err = H5Pset_fapl_mpio(plist_id, comm, info);
-  if (h_err < 0) error("Error setting parallel i/o");
-
-  /* Align on 4k pages. */
-  h_err = H5Pset_alignment(plist_id, 1024, 4096);
-  if (h_err < 0) error("Error setting Hdf5 alignment");
-
-  /* Disable meta-data cache eviction */
-  H5AC_cache_config_t mdc_config;
-  mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
-  h_err = H5Pget_mdc_config(plist_id, &mdc_config);
-  if (h_err < 0) error("Error getting the MDC config");
+  xmfFile = xmf_prepare_file(baseName);
 
-  mdc_config.evictions_enabled = 0; /* false */
-  mdc_config.incr_mode = H5C_incr__off;
-  mdc_config.decr_mode = H5C_decr__off;
-  mdc_config.flash_incr_mode = H5C_flash_incr__off;
-  h_err = H5Pset_mdc_config(plist_id, &mdc_config);
-  if (h_err < 0) error("Error setting the MDC config");
-
-/* Use parallel meta-data writes */
-#if H5_VERSION_GE(1, 10, 0)
-  h_err = H5Pset_all_coll_metadata_ops(plist_id, 1);
-  if (h_err < 0) error("Error setting collective meta-data on all ops");
-  h_err = H5Pset_coll_metadata_write(plist_id, 1);
-  if (h_err < 0) error("Error setting collective meta-data writes");
-#endif
+  /* HDF5 File name */
+  char fileName[FILENAME_BUFFER_SIZE];
+  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+           e->snapshotOutputCount);
 
   /* Open HDF5 file with the chosen parameters */
-  h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
-  if (h_file < 0) {
-    error("Error while opening file '%s'.", fileName);
-  }
-
-  /* Compute offset in the file and total number of particles */
-  size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
-  long long N_total[swift_type_count] = {0};
-  long long offset[swift_type_count] = {0};
-  MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
-  for (int ptype = 0; ptype < swift_type_count; ++ptype)
-    N_total[ptype] = offset[ptype] + N[ptype];
-
-  /* The last rank now has the correct N_total. Let's
-   * broadcast from there */
-  MPI_Bcast(&N_total, 6, MPI_LONG_LONG_INT, mpi_size - 1, comm);
-
-  /* Now everybody konws its offset and the total number of
-   * particles of each type */
+  hid_t h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_file < 0) error("Error while opening file '%s'.", fileName);
 
   /* Write the part of the XMF file corresponding to this
    * specific output */
-  if (mpi_rank == 0) xmf_write_outputheader(xmfFile, fileName, e->time);
-
-#ifdef IO_SPEED_MEASUREMENT
-  MPI_Barrier(MPI_COMM_WORLD);
-  ticks tic = getticks();
-#endif
+  xmf_write_outputheader(xmfFile, fileName, e->time);
 
   /* Open header to write simulation properties */
   /* message("Writing runtime parameters..."); */
-  h_grp =
+  hid_t h_grp =
       H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating runtime parameters group\n");
 
@@ -1008,10 +938,200 @@ void write_output_parallel(struct engine* e, const char* baseName,
   /* Print the system of Units used internally */
   io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
 
+  /* Loop over all particle types */
+  for (int ptype = 0; ptype < swift_type_count; ptype++) {
+
+    /* Don't do anything if no particle of this kind */
+    if (N_total[ptype] == 0) continue;
+
+    /* Add the global information for that particle type to
+     * the XMF meta-file */
+    xmf_write_groupheader(xmfFile, fileName, N_total[ptype],
+                          (enum part_type)ptype);
+
+    /* Create the particle group in the file */
+    char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
+    snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
+             ptype);
+    h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0)
+      error("Error while opening particle group %s.", partTypeGroupName);
+
+    int num_fields = 0;
+    struct io_props list[100];
+
+    /* Write particle fields from the particle structure */
+    switch (ptype) {
+
+      case swift_type_gas:
+        hydro_write_particles(parts, list, &num_fields);
+        num_fields += chemistry_write_particles(parts, list + num_fields);
+        break;
+
+      case swift_type_dark_matter:
+        darkmatter_write_particles(gparts, list, &num_fields);
+        break;
+
+      case swift_type_star:
+        star_write_particles(sparts, list, &num_fields);
+        break;
+
+      default:
+        error("Particle Type %d not yet supported. Aborting", ptype);
+    }
+
+    /* Prepare everything */
+    for (int i = 0; i < num_fields; ++i)
+      prepareArray(e, h_grp, fileName, xmfFile, partTypeGroupName, list[i],
+                   N_total[ptype], snapshot_units);
+
+    /* Close particle group */
+    H5Gclose(h_grp);
+
+    /* Close this particle group in the XMF file as well */
+    xmf_write_groupfooter(xmfFile, (enum part_type)ptype);
+  }
+
+  /* Write LXMF file descriptor */
+  xmf_write_outputfooter(xmfFile, e->snapshotOutputCount, e->time);
+
+  /* Close the file for now */
+  H5Fclose(h_file);
+}
+
+/**
+ * @brief Writes an HDF5 output file (GADGET-3 type) with
+ *its XMF descriptor
+ *
+ * @param e The engine containing all the system.
+ * @param baseName The common part of the snapshot file name.
+ * @param internal_units The #unit_system used internally
+ * @param snapshot_units The #unit_system used in the snapshots
+ * @param mpi_rank The MPI rank of this node.
+ * @param mpi_size The number of MPI ranks.
+ * @param comm The MPI communicator.
+ * @param info The MPI information object
+ *
+ * Creates an HDF5 output file and writes the particles
+ * contained in the engine. If such a file already exists, it is
+ * erased and replaced by the new one.
+ * The companion XMF file is also updated accordingly.
+ *
+ * Calls #error() if an error occurs.
+ *
+ */
+void write_output_parallel(struct engine* e, const char* baseName,
+                           const struct unit_system* internal_units,
+                           const struct unit_system* snapshot_units,
+                           int mpi_rank, int mpi_size, MPI_Comm comm,
+                           MPI_Info info) {
+
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  const size_t Ntot = e->s->nr_gparts;
+  struct part* parts = e->s->parts;
+  struct gpart* gparts = e->s->gparts;
+  struct gpart* dmparts = NULL;
+  struct spart* sparts = e->s->sparts;
+
+  /* Number of unassociated gparts */
+  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
+
+  /* Compute offset in the file and total number of particles */
+  size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
+  long long N_total[swift_type_count] = {0};
+  long long offset[swift_type_count] = {0};
+  MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
+  for (int ptype = 0; ptype < swift_type_count; ++ptype)
+    N_total[ptype] = offset[ptype] + N[ptype];
+
+  /* The last rank now has the correct N_total. Let's
+   * broadcast from there */
+  MPI_Bcast(&N_total, 6, MPI_LONG_LONG_INT, mpi_size - 1, comm);
+
+/* Now everybody konws its offset and the total number of
+ * particles of each type */
+
 #ifdef IO_SPEED_MEASUREMENT
+  ticks tic = getticks();
+#endif
+
+  /* Rank 0 prepares the file */
+  if (mpi_rank == 0)
+    prepare_file(e, baseName, N_total, internal_units, snapshot_units);
+
   MPI_Barrier(MPI_COMM_WORLD);
+
+#ifdef IO_SPEED_MEASUREMENT
   if (engine_rank == 0)
-    message("Writing HDF5 header took %.3f %s.",
+    message("Preparing file on rank 0 took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+  tic = getticks();
+#endif
+
+  /* HDF5 File name */
+  char fileName[FILENAME_BUFFER_SIZE];
+  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+           e->snapshotOutputCount);
+
+  /* Prepare some file-access properties */
+  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+
+  /* Set some MPI-IO parameters */
+  // MPI_Info_set(info, "IBM_largeblock_io", "true");
+  MPI_Info_set(info, "romio_cb_write", "enable");
+  MPI_Info_set(info, "romio_ds_write", "disable");
+
+  /* Activate parallel i/o */
+  hid_t h_err = H5Pset_fapl_mpio(plist_id, comm, info);
+  if (h_err < 0) error("Error setting parallel i/o");
+
+  /* Align on 4k pages. */
+  h_err = H5Pset_alignment(plist_id, 1024, 4096);
+  if (h_err < 0) error("Error setting Hdf5 alignment");
+
+  /* Disable meta-data cache eviction */
+  H5AC_cache_config_t mdc_config;
+  mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+  h_err = H5Pget_mdc_config(plist_id, &mdc_config);
+  if (h_err < 0) error("Error getting the MDC config");
+
+  mdc_config.evictions_enabled = 0; /* false */
+  mdc_config.incr_mode = H5C_incr__off;
+  mdc_config.decr_mode = H5C_decr__off;
+  mdc_config.flash_incr_mode = H5C_flash_incr__off;
+  h_err = H5Pset_mdc_config(plist_id, &mdc_config);
+  if (h_err < 0) error("Error setting the MDC config");
+
+/* Use parallel meta-data writes */
+#if H5_VERSION_GE(1, 10, 0)
+  h_err = H5Pset_all_coll_metadata_ops(plist_id, 1);
+  if (h_err < 0) error("Error setting collective meta-data on all ops");
+  h_err = H5Pset_coll_metadata_write(plist_id, 1);
+  if (h_err < 0) error("Error setting collective meta-data writes");
+#endif
+
+#ifdef IO_SPEED_MEASUREMENT
+  MPI_Barrier(MPI_COMM_WORLD);
+  if (engine_rank == 0)
+    message("Setting parallel HDF5 access properties took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+  tic = getticks();
+#endif
+
+  /* Open HDF5 file with the chosen parameters */
+  hid_t h_file = H5Fopen(fileName, H5F_ACC_RDWR, plist_id);
+  if (h_file < 0) {
+    error("Error while opening file '%s'.", fileName);
+  }
+
+#ifdef IO_SPEED_MEASUREMENT
+  MPI_Barrier(MPI_COMM_WORLD);
+  if (engine_rank == 0)
+    message("Opening HDF5 file  took %.3f %s.",
             clocks_from_ticks(getticks() - tic), clocks_getunit());
 
   tic = getticks();
@@ -1056,21 +1176,13 @@ void write_output_parallel(struct engine* e, const char* baseName,
     /* Don't do anything if no particle of this kind */
     if (N_total[ptype] == 0) continue;
 
-    /* Add the global information for that particle type to
-     * the XMF meta-file */
-    if (mpi_rank == 0)
-      xmf_write_groupheader(xmfFile, fileName, N_total[ptype],
-                            (enum part_type)ptype);
-
     /* Open the particle group in the file */
     char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
     snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
              ptype);
-    h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
-                      H5P_DEFAULT);
-    if (h_grp < 0) {
+    hid_t h_grp = H5Gopen(h_file, partTypeGroupName, H5P_DEFAULT);
+    if (h_grp < 0)
       error("Error while opening particle group %s.", partTypeGroupName);
-    }
 
     int num_fields = 0;
     struct io_props list[100];
@@ -1113,9 +1225,9 @@ void write_output_parallel(struct engine* e, const char* baseName,
 
     /* Write everything */
     for (int i = 0; i < num_fields; ++i)
-      writeArray(e, h_grp, fileName, xmfFile, partTypeGroupName, list[i],
-                 Nparticles, N_total[ptype], mpi_rank, offset[ptype],
-                 internal_units, snapshot_units);
+      writeArray(e, h_grp, fileName, partTypeGroupName, list[i], Nparticles,
+                 N_total[ptype], mpi_rank, offset[ptype], internal_units,
+                 snapshot_units);
 
     /* Free temporary array */
     if (dmparts) {
@@ -1139,16 +1251,6 @@ void write_output_parallel(struct engine* e, const char* baseName,
 
     tic = getticks();
 #endif
-
-    /* Close this particle group in the XMF file as well */
-    if (mpi_rank == 0) xmf_write_groupfooter(xmfFile, (enum part_type)ptype);
-
-#ifdef IO_SPEED_MEASUREMENT
-    MPI_Barrier(MPI_COMM_WORLD);
-    if (engine_rank == 0)
-      message("Writing XMF group footer took %.3f %s.",
-              clocks_from_ticks(getticks() - tic), clocks_getunit());
-#endif
   }
 
 #ifdef IO_SPEED_MEASUREMENT
@@ -1156,19 +1258,6 @@ void write_output_parallel(struct engine* e, const char* baseName,
   tic = getticks();
 #endif
 
-  /* Write LXMF file descriptor */
-  if (mpi_rank == 0)
-    xmf_write_outputfooter(xmfFile, e->snapshotOutputCount, e->time);
-
-#ifdef IO_SPEED_MEASUREMENT
-  MPI_Barrier(MPI_COMM_WORLD);
-  if (engine_rank == 0)
-    message("Writing XMF output footer took %.3f %s.",
-            clocks_from_ticks(getticks() - tic), clocks_getunit());
-
-  tic = getticks();
-#endif
-
   /* message("Done writing particles..."); */
 
   /* Close property descriptor */