diff --git a/doc/RTD/source/Snapshots/index.rst b/doc/RTD/source/Snapshots/index.rst
index 3d808a4d46b1e3d8eafd5b3eff527be89be1f7f1..6c9e1fd4117b7fe7f98ef35012f89aa836888ef0 100644
--- a/doc/RTD/source/Snapshots/index.rst
+++ b/doc/RTD/source/Snapshots/index.rst
@@ -73,7 +73,7 @@ time-line. This is the smallest time-step size that the code can use. This field
 is zero in non-cosmological runs. Similarly, the field ``TimeBase_dt`` contains
 the smallest time-step size (in internal units) that the code can take. This
 would be the increase in time a particle in the time-bin one would have. Note
-that in cosmological runs this quantity evolves with redhsift as the (logarithm
+that in cosmological runs this quantity evolves with redshift as the (logarithm
 of the) scale-factor is used on the integer time-line.
 
 The field ``SelectOutput`` will contain the name of the
@@ -87,6 +87,13 @@ written to the snapshot. Note, however, that when sub-sampling the fields
 of particles actually written (i.e. after sub-sampling), not the total number of
 particles in the run.
 
+The field ``CanHaveTypes`` contains information about whether a given particle
+type is to be expected in snapshots of the run. For instance, a simulation with
+star formation switched on, the code may not have formed a star yet but might in
+future snapshots. This allows reading tools to distinguish fields they will
+never expect to find in a given simulation from fields that may be present in
+other outputs.
+
 Meta-data about the code and run
 --------------------------------
 
diff --git a/examples/main.c b/examples/main.c
index 37c1dd099c5714d29108881c758b8335ca69ca96..3541dd4b7fa2f67f15911868c85d9e20bdb2859d 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -1342,8 +1342,8 @@ int main(int argc, char *argv[]) {
                sparts, bparts, Ngas, Ngpart, Nsink, Nspart, Nbpart, Nnupart,
                periodic, replicate, remap_ids, generate_gas_in_ics, with_hydro,
                with_self_gravity, with_star_formation, with_sink,
-               with_DM_background_particles, with_neutrinos, talking, dry_run,
-               nr_nodes);
+               with_DM_particles, with_DM_background_particles, with_neutrinos,
+               talking, dry_run, nr_nodes);
 
     /* Initialise the line of sight properties. */
     if (with_line_of_sight) los_init(s.dim, &los_properties, params);
diff --git a/examples/main_fof.c b/examples/main_fof.c
index 00351305d7cb578812e5675175c61d2fe11fe29c..02c2a1461efe0b338d5e447d08c73c68a8b1b3a4 100644
--- a/examples/main_fof.c
+++ b/examples/main_fof.c
@@ -565,8 +565,8 @@ int main(int argc, char *argv[]) {
              Nnupart, periodic, replicate, /*remap_ids=*/0,
              /*generate_gas_in_ics=*/0, /*hydro=*/N_total[0] > 0, /*gravity=*/1,
              /*with_star_formation=*/0, /*sink=*/N_total[swift_type_sink],
-             with_DM_background_particles, with_neutrinos, talking,
-             /*dry_run=*/0, nr_nodes);
+             with_DM_particles, with_DM_background_particles, with_neutrinos,
+             talking, /*dry_run=*/0, nr_nodes);
 
   if (myrank == 0) {
     clocks_gettime(&toc);
diff --git a/src/common_io.h b/src/common_io.h
index 0c77f0a69d3003a2a7b1c6d58e46c5f6441675b0..ca411f810ba8bc6b3ae91cf71a9610dcca699f82 100644
--- a/src/common_io.h
+++ b/src/common_io.h
@@ -116,6 +116,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                            const int snap_num,
                            const long long global_counts[swift_type_count],
                            const long long global_offsets[swift_type_count],
+                           const int to_write[swift_type_count],
                            const int num_fields[swift_type_count],
                            const struct unit_system* internal_units,
                            const struct unit_system* snapshot_units);
diff --git a/src/common_io_cells.c b/src/common_io_cells.c
index 589778eea7fca1bdd4668c69fe8481ac9a3e2abc..642d0aa62951f320e6221436b7723d051bf7cdef 100644
--- a/src/common_io_cells.c
+++ b/src/common_io_cells.c
@@ -227,6 +227,16 @@ void io_write_array(hid_t h_grp, const int n, const int dim, const void* array,
     error("Error while setting check-sum filter on %s %s data space.", name,
           array_content);
 
+  /* Impose SHUFFLE compression */
+  h_err = H5Pset_shuffle(h_prop);
+  if (h_err < 0)
+    error("Error while setting shuffling options for field '%s'.", name);
+
+  /* Impose GZIP compression */
+  h_err = H5Pset_deflate(h_prop, 4);
+  if (h_err < 0)
+    error("Error while setting compression options for field '%s'.", name);
+
   /* Write */
   hid_t h_data =
       H5Dcreate(h_grp, name, h_type, h_space, H5P_DEFAULT, h_prop, H5P_DEFAULT);
@@ -256,6 +266,8 @@ void io_write_array(hid_t h_grp, const int n, const int dim, const void* array,
  * @param global_counts The total number of particles across all nodes.
  * @param global_offsets The offsets of this node into the global list of
  * particles.
+ * @param to_write Whether a given particle type should be written to the cell
+ * info.
  * @param numFields The number of fields to write for each particle type.
  * @param internal_units The internal unit system.
  * @param snapshot_units The snapshot unit system.
@@ -269,6 +281,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                            const int snap_num,
                            const long long global_counts[swift_type_count],
                            const long long global_offsets[swift_type_count],
+                           const int to_write[swift_type_count],
                            const int num_fields[swift_type_count],
                            const struct unit_system* internal_units,
                            const struct unit_system* snapshot_units) {
@@ -611,7 +624,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
         H5Gcreate(h_grp, "MaxPositions", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
     if (h_grp_counts < 0) error("Error while creating counts sub-group");
 
-    if (global_counts[swift_type_gas] > 0 && num_fields[swift_type_gas] > 0) {
+    if (to_write[swift_type_gas] > 0 && num_fields[swift_type_gas] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType0",
                      "files");
       io_write_array(h_grp_offsets, nr_cells, /*dim=*/1, offset_part, LONGLONG,
@@ -624,7 +637,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType0", "max_pos");
     }
 
-    if (global_counts[swift_type_dark_matter] > 0 &&
+    if (to_write[swift_type_dark_matter] > 0 &&
         num_fields[swift_type_dark_matter] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType1",
                      "files");
@@ -638,7 +651,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType1", "max_pos");
     }
 
-    if (global_counts[swift_type_dark_matter_background] > 0 &&
+    if (to_write[swift_type_dark_matter_background] > 0 &&
         num_fields[swift_type_dark_matter_background] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType2",
                      "files");
@@ -652,7 +665,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      max_gpart_background_pos, DOUBLE, "PartType2", "max_pos");
     }
 
-    if (global_counts[swift_type_sink] > 0 && num_fields[swift_type_sink] > 0) {
+    if (to_write[swift_type_sink] > 0 && num_fields[swift_type_sink] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType3",
                      "files");
       io_write_array(h_grp_offsets, nr_cells, /*dim=*/1, offset_sink, LONGLONG,
@@ -665,8 +678,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType3", "max_pos");
     }
 
-    if (global_counts[swift_type_stars] > 0 &&
-        num_fields[swift_type_stars] > 0) {
+    if (to_write[swift_type_stars] > 0 && num_fields[swift_type_stars] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType4",
                      "files");
       io_write_array(h_grp_offsets, nr_cells, /*dim=*/1, offset_spart, LONGLONG,
@@ -679,7 +691,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType4", "max_pos");
     }
 
-    if (global_counts[swift_type_black_hole] > 0 &&
+    if (to_write[swift_type_black_hole] > 0 &&
         num_fields[swift_type_black_hole] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType5",
                      "files");
@@ -693,7 +705,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType5", "max_pos");
     }
 
-    if (global_counts[swift_type_neutrino] > 0 &&
+    if (to_write[swift_type_neutrino] > 0 &&
         num_fields[swift_type_neutrino] > 0) {
       io_write_array(h_grp_files, nr_cells, /*dim=*/1, files, INT, "PartType6",
                      "files");
diff --git a/src/distributed_io.c b/src/distributed_io.c
index 2510eb156d10a48feac0880680c34f833fcac108..3f74c30b51f5f8bbbda9c383b24c58a99f7324f6 100644
--- a/src/distributed_io.c
+++ b/src/distributed_io.c
@@ -458,6 +458,7 @@ void write_virtual_file(struct engine* e, const char* fileName_base,
                         const char* xmfFileName,
                         const long long N_total[swift_type_count],
                         const long long* N_counts, const int num_ranks,
+                        const int to_write[swift_type_count],
                         const int numFields[swift_type_count],
                         char current_selection_name[FIELD_BUFFER_SIZE],
                         const struct unit_system* internal_units,
@@ -582,6 +583,7 @@ void write_virtual_file(struct engine* e, const char* fileName_base,
   io_write_attribute_i(h_grp, "ThisFile", 0);
   io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
   io_write_attribute_i(h_grp, "Virtual", 1);
+  io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
 
   if (subsample_any) {
     io_write_attribute_s(h_grp, "OutputType", "SubSampled");
@@ -603,9 +605,10 @@ void write_virtual_file(struct engine* e, const char* fileName_base,
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if there are (a) no particles of this kind, or (b)
-     * if we have disabled every field of this particle type. */
-    if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
+    /* Don't do anything if there are
+     * (a) no particles of this kind in this run, or
+     * (b) if we have disabled every field of this particle type. */
+    if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
     /* Add the global information for that particle type to
      * the XMF meta-file */
@@ -761,8 +764,13 @@ void write_output_distributed(struct engine* e,
   const int with_temperature = e->policy & engine_policy_temperature;
   const int with_fof = e->policy & engine_policy_fof;
   const int with_DM_background = e->s->with_DM_background;
+  const int with_DM = e->s->with_DM;
   const int with_neutrinos = e->s->with_neutrinos;
   const int with_rt = e->policy & engine_policy_rt;
+  const int with_hydro = (e->policy & engine_policy_hydro) ? 1 : 0;
+  const int with_stars = (e->policy & engine_policy_stars) ? 1 : 0;
+  const int with_black_hole = (e->policy & engine_policy_black_holes) ? 1 : 0;
+  const int with_sink = (e->policy & engine_policy_sinks) ? 1 : 0;
 #ifdef HAVE_VELOCIRAPTOR
   const int with_stf = (e->policy & engine_policy_structure_finding) &&
                        (e->s->gpart_group_data != NULL);
@@ -941,6 +949,15 @@ void write_output_distributed(struct engine* e,
   MPI_Gather(N, swift_type_count, MPI_LONG_LONG_INT, N_counts, swift_type_count,
              MPI_LONG_LONG_INT, 0, comm);
 
+  /* List what fields to write.
+   * Note that we want to want to write a 0-size dataset for some species
+   * in case future snapshots will contain them (e.g. star formation) */
+  const int to_write[swift_type_count] = {
+      with_hydro, with_DM,         with_DM_background, with_sink,
+      with_stars, with_black_hole, with_neutrinos
+
+  };
+
   /* Use a single Lustre stripe with a rank-based OST offset? */
   if (e->snapshot_lustre_OST_count != 0) {
     char string[1200];
@@ -1048,6 +1065,7 @@ void write_output_distributed(struct engine* e,
   io_write_attribute_i(h_grp, "ThisFile", mpi_rank);
   io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
   io_write_attribute_i(h_grp, "Virtual", 0);
+  io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
 
   if (subsample_any) {
     io_write_attribute_s(h_grp, "OutputType", "SubSampled");
@@ -1079,15 +1097,16 @@ void write_output_distributed(struct engine* e,
                         e->s->nr_cells, e->s->width, mpi_rank,
                         /*distributed=*/1, subsample, subsample_fraction,
                         e->snapshot_output_count, N_total, global_offsets,
-                        numFields, internal_units, snapshot_units);
+                        to_write, numFields, internal_units, snapshot_units);
   H5Gclose(h_grp);
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if there are (a) no particles of this kind, or (b)
-     * if we have disabled every field of this particle type. */
-    if (N[ptype] == 0 || numFields[ptype] == 0) continue;
+    /* Don't do anything if there are
+     * (a) no particles of this kind in this run, or
+     * (b) if we have disabled every field of this particle type. */
+    if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
     /* Open the particle group in the file */
     char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
@@ -1431,7 +1450,7 @@ void write_output_distributed(struct engine* e,
   /* Write the virtual meta-file */
   if (mpi_rank == 0)
     write_virtual_file(e, fileName_base, xmfFileName, N_total, N_counts,
-                       mpi_size, numFields, current_selection_name,
+                       mpi_size, to_write, numFields, current_selection_name,
                        internal_units, snapshot_units, subsample_any,
                        subsample_fraction);
 
@@ -1470,7 +1489,7 @@ void write_output_distributed(struct engine* e,
                         e->s->nr_cells, e->s->width, mpi_rank,
                         /*distributed=*/0, subsample, subsample_fraction,
                         e->snapshot_output_count, N_total, global_offsets,
-                        numFields, internal_units, snapshot_units);
+                        to_write, numFields, internal_units, snapshot_units);
 
   /* Close everything */
   if (mpi_rank == 0) {
diff --git a/src/line_of_sight.c b/src/line_of_sight.c
index 9f07bbf12f298d1b74262283ea1481dbab095bc0..99a530fc1dc4e61d2f08e1a2cf7699dc134899ce 100644
--- a/src/line_of_sight.c
+++ b/src/line_of_sight.c
@@ -534,6 +534,8 @@ void write_hdf5_header(hid_t h_file, const struct engine *e,
                      swift_type_count);
   io_write_attribute_i(h_grp, "NumFilesPerSnapshot", 1);
   io_write_attribute_i(h_grp, "ThisFile", 0);
+  io_write_attribute_s(h_grp, "SelectOutput", "Default");
+  io_write_attribute_i(h_grp, "Virtual", 0);
   io_write_attribute_s(h_grp, "OutputType", "LineOfSight");
 
   /* Close group */
diff --git a/src/parallel_io.c b/src/parallel_io.c
index faa4f209c523f8d83ae224fd02cb01dd8967047a..61d47674523173acb62fd84595da042782f55d92 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -392,7 +392,8 @@ void read_array_parallel(hid_t grp, struct io_props props, size_t N,
  */
 void prepare_array_parallel(
     struct engine* e, hid_t grp, const char* fileName, FILE* xmfFile,
-    char* partTypeGroupName, struct io_props props, long long N_total,
+    const char* partTypeGroupName, const struct io_props props,
+    const long long N_total,
     const enum lossy_compression_schemes lossy_compression,
     const struct unit_system* snapshot_units) {
 
@@ -519,9 +520,9 @@ void prepare_array_parallel(
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
  */
-void write_array_parallel_chunk(struct engine* e, hid_t h_data,
-                                const struct io_props props, size_t N,
-                                long long offset,
+void write_array_parallel_chunk(const struct engine* e, hid_t h_data,
+                                const struct io_props props, const size_t N,
+                                const long long offset,
                                 const struct unit_system* internal_units,
                                 const struct unit_system* snapshot_units) {
 
@@ -646,9 +647,10 @@ void write_array_parallel_chunk(struct engine* e, hid_t h_data,
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
  */
-void write_array_parallel(struct engine* e, hid_t grp, char* fileName,
-                          char* partTypeGroupName, struct io_props props,
-                          size_t N, long long N_total, int mpi_rank,
+void write_array_parallel(const struct engine* e, hid_t grp,
+                          const char* fileName, char* partTypeGroupName,
+                          struct io_props props, size_t N,
+                          const long long N_total, const int mpi_rank,
                           long long offset,
                           const struct unit_system* internal_units,
                           const struct unit_system* snapshot_units) {
@@ -761,12 +763,14 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
                       struct bpart** bparts, size_t* Ngas, size_t* Ngparts,
                       size_t* Ngparts_background, size_t* Nnuparts,
                       size_t* Nsinks, size_t* Nstars, size_t* Nblackholes,
-                      int* flag_entropy, int with_hydro, int with_gravity,
-                      int with_sink, int with_stars, int with_black_holes,
-                      int with_cosmology, int cleanup_h, int cleanup_sqrt_a,
-                      double h, double a, int mpi_rank, int mpi_size,
-                      MPI_Comm comm, MPI_Info info, int n_threads, int dry_run,
-                      int remap_ids, struct ic_info* ics_metadata) {
+                      int* flag_entropy, const int with_hydro,
+                      const int with_gravity, const int with_sink,
+                      const int with_stars, const int with_black_holes,
+                      const int with_cosmology, const int cleanup_h,
+                      const int cleanup_sqrt_a, const double h, const double a,
+                      const int mpi_rank, const int mpi_size, MPI_Comm comm,
+                      MPI_Info info, const int n_threads, const int dry_run,
+                      const int remap_ids, struct ic_info* ics_metadata) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -1138,6 +1142,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
  * @param e The #engine.
  * @param fileName The file name to write to.
  * @param N_total The total number of particles of each type to write.
+ * @param to_write Whether or not specific particle types must be written.
  * @param numFields The number of fields to write for each particle type.
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
@@ -1145,9 +1150,11 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
  * @param subsample_fraction The subsampling fraction of each particle type.
  */
 void prepare_file(struct engine* e, const char* fileName,
-                  const char* xmfFileName, long long N_total[swift_type_count],
+                  const char* xmfFileName,
+                  const long long N_total[swift_type_count],
+                  const int to_write[swift_type_count],
                   const int numFields[swift_type_count],
-                  char current_selection_name[FIELD_BUFFER_SIZE],
+                  const char current_selection_name[FIELD_BUFFER_SIZE],
                   const struct unit_system* internal_units,
                   const struct unit_system* snapshot_units,
                   const int subsample_any,
@@ -1265,6 +1272,7 @@ void prepare_file(struct engine* e, const char* fileName,
   io_write_attribute_i(h_grp, "ThisFile", 0);
   io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
   io_write_attribute_i(h_grp, "Virtual", 0);
+  io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
 
   if (subsample_any) {
     io_write_attribute_s(h_grp, "OutputType", "SubSampled");
@@ -1286,9 +1294,10 @@ void prepare_file(struct engine* e, const char* fileName,
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if there are (a) no particles of this kind, or (b)
-     * if we have disabled every field of this particle type. */
-    if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
+    /* Don't do anything if there are
+     * (a) no particles of this kind in this run, or
+     * (b) if we have disabled every field of this particle type. */
+    if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
     /* Add the global information for that particle type to
      * the XMF meta-file */
@@ -1424,8 +1433,8 @@ void prepare_file(struct engine* e, const char* fileName,
 void write_output_parallel(struct engine* e,
                            const struct unit_system* internal_units,
                            const struct unit_system* snapshot_units,
-                           int mpi_rank, int mpi_size, MPI_Comm comm,
-                           MPI_Info info) {
+                           const int mpi_rank, const int mpi_size,
+                           MPI_Comm comm, MPI_Info info) {
 
   const struct part* parts = e->s->parts;
   const struct xpart* xparts = e->s->xparts;
@@ -1440,7 +1449,12 @@ void write_output_parallel(struct engine* e,
   const int with_temperature = e->policy & engine_policy_temperature;
   const int with_fof = e->policy & engine_policy_fof;
   const int with_DM_background = e->s->with_DM_background;
+  const int with_DM = e->s->with_DM;
   const int with_neutrinos = e->s->with_neutrinos;
+  const int with_hydro = (e->policy & engine_policy_hydro) ? 1 : 0;
+  const int with_stars = (e->policy & engine_policy_stars) ? 1 : 0;
+  const int with_black_hole = (e->policy & engine_policy_black_holes) ? 1 : 0;
+  const int with_sink = (e->policy & engine_policy_sinks) ? 1 : 0;
 #ifdef HAVE_VELOCIRAPTOR
   const int with_stf = (e->policy & engine_policy_structure_finding) &&
                        (e->s->gpart_group_data != NULL);
@@ -1592,9 +1606,18 @@ void write_output_parallel(struct engine* e,
   /* Now everybody konws its offset and the total number of
    * particles of each type */
 
+  /* List what fields to write.
+   * Note that we want to want to write a 0-size dataset for some species
+   * in case future snapshots will contain them (e.g. star formation) */
+  const int to_write[swift_type_count] = {
+      with_hydro, with_DM,         with_DM_background, with_sink,
+      with_stars, with_black_hole, with_neutrinos
+
+  };
+
   /* Rank 0 prepares the file */
   if (mpi_rank == 0)
-    prepare_file(e, fileName, xmfFileName, N_total, numFields,
+    prepare_file(e, fileName, xmfFileName, N_total, to_write, numFields,
                  current_selection_name, internal_units, snapshot_units,
                  subsample_any, subsample_fraction);
 
@@ -1627,8 +1650,8 @@ void write_output_parallel(struct engine* e,
   io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->dim, e->s->cells_top,
                         e->s->nr_cells, e->s->width, mpi_rank,
                         /*distributed=*/0, subsample, subsample_fraction,
-                        e->snapshot_output_count, N_total, offset, numFields,
-                        internal_units, snapshot_units);
+                        e->snapshot_output_count, N_total, offset, to_write,
+                        numFields, internal_units, snapshot_units);
 
   /* Close everything */
   if (mpi_rank == 0) {
@@ -1698,7 +1721,12 @@ void write_output_parallel(struct engine* e,
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if no particle of this kind */
+    /* Don't do anything if there are
+     * (a) no particles of this kind in this snapshot
+     * (b) if we have disabled every field of this particle type.
+     *
+     * Note: we have already created the array so we can escape if there are
+     * no particles but the corresponding to_write[] was set. */
     if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
 
     /* Open the particle group in the file */
diff --git a/src/parallel_io.h b/src/parallel_io.h
index d6a3ab8eac67b316c452c26c0d00df8ed118bf24..d86c3058c74afb6304f1845e32e2e5d9b670a4e6 100644
--- a/src/parallel_io.h
+++ b/src/parallel_io.h
@@ -40,12 +40,14 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
                       struct bpart** bparts, size_t* Ngas, size_t* Ngparts,
                       size_t* Ngparts_background, size_t* Nnuparts,
                       size_t* Nsinks, size_t* Nsparts, size_t* Nbparts,
-                      int* flag_entropy, int with_hydro, int with_gravity,
-                      int with_sinks, int with_stars, int with_black_holes,
-                      int with_cosmology, int cleanup_h, int cleanup_sqrt_a,
-                      double h, double a, int mpi_rank, int mpi_size,
-                      MPI_Comm comm, MPI_Info info, int nr_threads, int dry_run,
-                      int remap_ids, struct ic_info* ics_metadata);
+                      int* flag_entropy, const int with_hydro,
+                      const int with_gravity, const int with_sinks,
+                      const int with_stars, const int with_black_holes,
+                      const int with_cosmology, const int cleanup_h,
+                      const int cleanup_sqrt_a, const double h, const double a,
+                      const int mpi_rank, const int mpi_size, MPI_Comm comm,
+                      MPI_Info info, const int nr_threads, const int dry_run,
+                      const int remap_ids, struct ic_info* ics_metadata);
 
 void write_output_parallel(struct engine* e,
                            const struct unit_system* internal_units,
diff --git a/src/serial_io.c b/src/serial_io.c
index 53156f0966a2c332012ab1c489640cc9784b2331..cdb335b12ea06727cea1567ce6c1ed017b63f2d6 100644
--- a/src/serial_io.c
+++ b/src/serial_io.c
@@ -247,9 +247,9 @@ void read_array_serial(hid_t grp, const struct io_props props, size_t N,
 }
 
 void prepare_array_serial(
-    const struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
-    char* partTypeGroupName, const struct io_props props,
-    unsigned long long N_total,
+    const struct engine* e, hid_t grp, const char* fileName, FILE* xmfFile,
+    const char* partTypeGroupName, const struct io_props props,
+    const unsigned long long N_total,
     const enum lossy_compression_schemes lossy_compression,
     const struct unit_system* internal_units,
     const struct unit_system* snapshot_units) {
@@ -293,11 +293,13 @@ void prepare_array_serial(
   /* Dataset properties */
   hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
 
-  /* Set chunk size */
-  h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
-  if (h_err < 0)
-    error("Error while setting chunk size (%llu, %llu) for field '%s'.",
-          chunk_shape[0], chunk_shape[1], props.name);
+  /* Set chunk size if have some particles */
+  if (N_total > 0) {
+    h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
+    if (h_err < 0)
+      error("Error while setting chunk size (%llu, %llu) for field '%s'.",
+            chunk_shape[0], chunk_shape[1], props.name);
+  }
 
   /* Are we imposing some form of lossy compression filter? */
   char comp_buffer[32] = "None";
@@ -305,8 +307,8 @@ void prepare_array_serial(
     set_hdf5_lossy_compression(&h_prop, &h_type, lossy_compression, props.name,
                                comp_buffer);
 
-  /* Impose data compression */
-  if (e->snapshot_compression > 0) {
+  /* Impose GZIP and shuffle data compression */
+  if (e->snapshot_compression > 0 && N_total > 0) {
     h_err = H5Pset_shuffle(h_prop);
     if (h_err < 0)
       error("Error while setting shuffling options for field '%s'.",
@@ -319,9 +321,11 @@ void prepare_array_serial(
   }
 
   /* Impose check-sum to verify data corruption */
-  h_err = H5Pset_fletcher32(h_prop);
-  if (h_err < 0)
-    error("Error while setting checksum options for field '%s'.", props.name);
+  if (N_total > 0) {
+    h_err = H5Pset_fletcher32(h_prop);
+    if (h_err < 0)
+      error("Error while setting checksum options for field '%s'.", props.name);
+  }
 
   /* Create dataset */
   const hid_t h_data = H5Dcreate(grp, props.name, h_type, h_space, H5P_DEFAULT,
@@ -398,9 +402,10 @@ void prepare_array_serial(
  * the part array will be written once the structures have been stabilized.
  */
 void write_array_serial(const struct engine* e, hid_t grp, char* fileName,
-                        FILE* xmfFile, char* partTypeGroupName,
-                        const struct io_props props, size_t N,
-                        long long N_total, int mpi_rank, long long offset,
+                        FILE* xmfFile, const char* partTypeGroupName,
+                        const struct io_props props, const size_t N,
+                        const long long N_total, const int mpi_rank,
+                        const long long offset,
                         const enum lossy_compression_schemes lossy_compression,
                         const struct unit_system* internal_units,
                         const struct unit_system* snapshot_units) {
@@ -531,12 +536,14 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
                     struct bpart** bparts, size_t* Ngas, size_t* Ngparts,
                     size_t* Ngparts_background, size_t* Nnuparts,
                     size_t* Nsinks, size_t* Nstars, size_t* Nblackholes,
-                    int* flag_entropy, int with_hydro, int with_gravity,
-                    int with_sink, int with_stars, int with_black_holes,
-                    int with_cosmology, int cleanup_h, int cleanup_sqrt_a,
-                    double h, double a, int mpi_rank, int mpi_size,
-                    MPI_Comm comm, MPI_Info info, int n_threads, int dry_run,
-                    int remap_ids, struct ic_info* ics_metadata) {
+                    int* flag_entropy, const int with_hydro,
+                    const int with_gravity, const int with_sink,
+                    const int with_stars, const int with_black_holes,
+                    const int with_cosmology, const int cleanup_h,
+                    const int cleanup_sqrt_a, double h, double a,
+                    const int mpi_rank, int mpi_size, MPI_Comm comm,
+                    MPI_Info info, const int n_threads, const int dry_run,
+                    const int remap_ids, struct ic_info* ics_metadata) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -951,8 +958,9 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
  */
 void write_output_serial(struct engine* e,
                          const struct unit_system* internal_units,
-                         const struct unit_system* snapshot_units, int mpi_rank,
-                         int mpi_size, MPI_Comm comm, MPI_Info info) {
+                         const struct unit_system* snapshot_units,
+                         const int mpi_rank, const int mpi_size, MPI_Comm comm,
+                         MPI_Info info) {
 
   hid_t h_file = 0, h_grp = 0;
   int numFiles = 1;
@@ -968,8 +976,13 @@ void write_output_serial(struct engine* e,
   const int with_cooling = e->policy & engine_policy_cooling;
   const int with_temperature = e->policy & engine_policy_temperature;
   const int with_fof = e->policy & engine_policy_fof;
+  const int with_DM = e->s->with_DM;
   const int with_DM_background = e->s->with_DM_background;
   const int with_neutrinos = e->s->with_neutrinos;
+  const int with_hydro = (e->policy & engine_policy_hydro) ? 1 : 0;
+  const int with_stars = (e->policy & engine_policy_stars) ? 1 : 0;
+  const int with_black_hole = (e->policy & engine_policy_black_holes) ? 1 : 0;
+  const int with_sink = (e->policy & engine_policy_sinks) ? 1 : 0;
 #ifdef HAVE_VELOCIRAPTOR
   const int with_stf = (e->policy & engine_policy_structure_finding) &&
                        (e->s->gpart_group_data != NULL);
@@ -1118,6 +1131,15 @@ void write_output_serial(struct engine* e,
   /* The last rank now has the correct N_total. Let's broadcast from there */
   MPI_Bcast(N_total, swift_type_count, MPI_LONG_LONG_INT, mpi_size - 1, comm);
 
+  /* List what fields to write.
+   * Note that we want to want to write a 0-size dataset for some species
+   * in case future snapshots will contain them (e.g. star formation) */
+  const int to_write[swift_type_count] = {
+      with_hydro, with_DM,         with_DM_background, with_sink,
+      with_stars, with_black_hole, with_neutrinos
+
+  };
+
   /* Now everybody knows its offset and the total number of particles of each
    * type */
 
@@ -1219,6 +1241,7 @@ void write_output_serial(struct engine* e,
     io_write_attribute_i(h_grp, "ThisFile", 0);
     io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
     io_write_attribute_i(h_grp, "Virtual", 0);
+    io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
 
     if (subsample_any) {
       io_write_attribute_s(h_grp, "OutputType", "SubSampled");
@@ -1240,9 +1263,10 @@ void write_output_serial(struct engine* e,
     /* Loop over all particle types */
     for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-      /* Don't do anything if there are (a) no particles of this kind, or (b)
-       * if we have disabled every field of this particle type. */
-      if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
+      /* Don't do anything if there are
+       * (a) no particles of this kind in this run, or
+       * (b) if we have disabled every field of this particle type. */
+      if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
       /* Open the particle group in the file */
       char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
@@ -1290,8 +1314,8 @@ void write_output_serial(struct engine* e,
   io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->dim, e->s->cells_top,
                         e->s->nr_cells, e->s->width, mpi_rank,
                         /*distributed=*/0, subsample, subsample_fraction,
-                        e->snapshot_output_count, N_total, offset, numFields,
-                        internal_units, snapshot_units);
+                        e->snapshot_output_count, N_total, offset, to_write,
+                        numFields, internal_units, snapshot_units);
 
   /* Close everything */
   if (mpi_rank == 0) {
@@ -1312,9 +1336,10 @@ void write_output_serial(struct engine* e,
       /* Loop over all particle types */
       for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-        /* Don't do anything if there are (a) no particles of this kind, or (b)
-         * if we have disabled every field of this particle type. */
-        if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
+        /* Don't do anything if there are
+         * (a) no particles of this kind in this run, or
+         * (b) if we have disabled every field of this particle type. */
+        if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
         /* Add the global information for that particle type to the XMF
          * meta-file */
diff --git a/src/serial_io.h b/src/serial_io.h
index 86edacf3b1ab3dd9b89545e4c8627f9ec7f435ba..2d3577883beca90d890570cb37f8c58b74ee2b5f 100644
--- a/src/serial_io.h
+++ b/src/serial_io.h
@@ -42,17 +42,20 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
                     struct bpart** bparts, size_t* Ngas, size_t* Ngparts,
                     size_t* Ngparts_background, size_t* Nnuparts,
                     size_t* Nsinks, size_t* Nstars, size_t* Nblackholes,
-                    int* flag_entropy, int with_hydro, int with_gravity,
-                    int with_sinks, int with_stars, int with_black_holes,
-                    int with_cosmology, int cleanup_h, int cleanup_sqrt_a,
-                    double h, double a, int mpi_rank, int mpi_size,
-                    MPI_Comm comm, MPI_Info info, int n_threads, int dry_run,
-                    int remap_ids, struct ic_info* ics_metadata);
+                    int* flag_entropy, const int with_hydro,
+                    const int with_gravity, const int with_sinks,
+                    const int with_stars, const int with_black_holes,
+                    const int with_cosmology, const int cleanup_h,
+                    const int cleanup_sqrt_a, const double h, const double a,
+                    const int mpi_rank, int mpi_size, MPI_Comm comm,
+                    MPI_Info info, const int n_threads, const int dry_run,
+                    const int remap_ids, struct ic_info* ics_metadata);
 
 void write_output_serial(struct engine* e,
                          const struct unit_system* internal_units,
-                         const struct unit_system* snapshot_units, int mpi_rank,
-                         int mpi_size, MPI_Comm comm, MPI_Info info);
+                         const struct unit_system* snapshot_units,
+                         const int mpi_rank, const int mpi_size, MPI_Comm comm,
+                         MPI_Info info);
 
 #endif /* HAVE_HDF5 && WITH_MPI && !HAVE_PARALLEL_HDF5 */
 
diff --git a/src/single_io.c b/src/single_io.c
index 2be4892ae27698d4f6f942aeebe3d4840a55cde3..43694bb419fa264c6141047d28533e751ff568a7 100644
--- a/src/single_io.c
+++ b/src/single_io.c
@@ -235,9 +235,9 @@ void read_array_single(hid_t h_grp, const struct io_props props, size_t N,
  * @todo A better version using HDF5 hyper-slabs to write the file directly from
  * the part array will be written once the structures have been stabilized.
  */
-void write_array_single(const struct engine* e, hid_t grp, char* fileName,
-                        FILE* xmfFile, char* partTypeGroupName,
-                        const struct io_props props, size_t N,
+void write_array_single(const struct engine* e, hid_t grp, const char* fileName,
+                        FILE* xmfFile, const char* partTypeGroupName,
+                        const struct io_props props, const size_t N,
                         const enum lossy_compression_schemes lossy_compression,
                         const struct unit_system* internal_units,
                         const struct unit_system* snapshot_units) {
@@ -296,11 +296,13 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
   /* Dataset properties */
   hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
 
-  /* Set chunk size */
-  h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
-  if (h_err < 0)
-    error("Error while setting chunk size (%llu, %llu) for field '%s'.",
-          chunk_shape[0], chunk_shape[1], props.name);
+  /* Set chunk size if have some particles */
+  if (N > 0) {
+    h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
+    if (h_err < 0)
+      error("Error while setting chunk size (%llu, %llu) for field '%s'.",
+            chunk_shape[0], chunk_shape[1], props.name);
+  }
 
   /* Are we imposing some form of lossy compression filter? */
   char comp_buffer[32] = "None";
@@ -308,8 +310,8 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
     set_hdf5_lossy_compression(&h_prop, &h_type, lossy_compression, props.name,
                                comp_buffer);
 
-  /* Impose GZIP data compression */
-  if (e->snapshot_compression > 0) {
+  /* Impose GZIP and shuffle data compression */
+  if (e->snapshot_compression > 0 && N > 0) {
     h_err = H5Pset_shuffle(h_prop);
     if (h_err < 0)
       error("Error while setting shuffling options for field '%s'.",
@@ -322,9 +324,11 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
   }
 
   /* Impose check-sum to verify data corruption */
-  h_err = H5Pset_fletcher32(h_prop);
-  if (h_err < 0)
-    error("Error while setting checksum options for field '%s'.", props.name);
+  if (N > 0) {
+    h_err = H5Pset_fletcher32(h_prop);
+    if (h_err < 0)
+      error("Error while setting checksum options for field '%s'.", props.name);
+  }
 
   /* Create dataset */
   const hid_t h_data = H5Dcreate(grp, props.name, h_type, h_space, H5P_DEFAULT,
@@ -428,18 +432,17 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
  * @warning Can not read snapshot distributed over more than 1 file !!!
  * @todo Read snapshots distributed in more than one file.
  */
-void read_ic_single(const char* fileName,
-                    const struct unit_system* internal_units, double dim[3],
-                    struct part** parts, struct gpart** gparts,
-                    struct sink** sinks, struct spart** sparts,
-                    struct bpart** bparts, size_t* Ngas, size_t* Ngparts,
-                    size_t* Ngparts_background, size_t* Nnuparts,
-                    size_t* Nsinks, size_t* Nstars, size_t* Nblackholes,
-                    int* flag_entropy, int with_hydro, int with_gravity,
-                    int with_sink, int with_stars, int with_black_holes,
-                    int with_cosmology, int cleanup_h, int cleanup_sqrt_a,
-                    double h, double a, int n_threads, int dry_run,
-                    int remap_ids, struct ic_info* ics_metadata) {
+void read_ic_single(
+    const char* fileName, const struct unit_system* internal_units,
+    double dim[3], struct part** parts, struct gpart** gparts,
+    struct sink** sinks, struct spart** sparts, struct bpart** bparts,
+    size_t* Ngas, size_t* Ngparts, size_t* Ngparts_background, size_t* Nnuparts,
+    size_t* Nsinks, size_t* Nstars, size_t* Nblackholes, int* flag_entropy,
+    const int with_hydro, const int with_gravity, const int with_sink,
+    const int with_stars, const int with_black_holes, const int with_cosmology,
+    const int cleanup_h, const int cleanup_sqrt_a, const double h,
+    const double a, const int n_threads, const int dry_run, const int remap_ids,
+    struct ic_info* ics_metadata) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -821,7 +824,12 @@ void write_output_single(struct engine* e,
   const int with_temperature = e->policy & engine_policy_temperature;
   const int with_fof = e->policy & engine_policy_fof;
   const int with_DM_background = e->s->with_DM_background;
+  const int with_DM = e->s->with_DM;
   const int with_neutrinos = e->s->with_neutrinos;
+  const int with_hydro = (e->policy & engine_policy_hydro) ? 1 : 0;
+  const int with_stars = (e->policy & engine_policy_stars) ? 1 : 0;
+  const int with_black_hole = (e->policy & engine_policy_black_holes) ? 1 : 0;
+  const int with_sink = (e->policy & engine_policy_sinks) ? 1 : 0;
 #ifdef HAVE_VELOCIRAPTOR
   const int with_stf = (e->policy & engine_policy_structure_finding) &&
                        (e->s->gpart_group_data != NULL);
@@ -956,12 +964,21 @@ void write_output_single(struct engine* e,
   }
 
   /* Format things in a Gadget-friendly array */
-  long long N_total[swift_type_count] = {
+  const long long N_total[swift_type_count] = {
       (long long)Ngas_written,   (long long)Ndm_written,
       (long long)Ndm_background, (long long)Nsinks_written,
       (long long)Nstars_written, (long long)Nblackholes_written,
       (long long)Ndm_neutrino};
 
+  /* List what fields to write.
+   * Note that we want to want to write a 0-size dataset for some species
+   * in case future snapshots will contain them (e.g. star formation) */
+  const int to_write[swift_type_count] = {
+      with_hydro, with_DM,         with_DM_background, with_sink,
+      with_stars, with_black_hole, with_neutrinos
+
+  };
+
   /* Open file */
   /* message("Opening file '%s'.", fileName); */
   h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -1053,6 +1070,7 @@ void write_output_single(struct engine* e,
   io_write_attribute_i(h_grp, "ThisFile", 0);
   io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
   io_write_attribute_i(h_grp, "Virtual", 0);
+  io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
 
   if (subsample_any) {
     io_write_attribute_s(h_grp, "OutputType", "SubSampled");
@@ -1081,15 +1099,16 @@ void write_output_single(struct engine* e,
                         e->s->nr_cells, e->s->width, e->nodeID,
                         /*distributed=*/0, subsample, subsample_fraction,
                         e->snapshot_output_count, N_total, global_offsets,
-                        numFields, internal_units, snapshot_units);
+                        to_write, numFields, internal_units, snapshot_units);
   H5Gclose(h_grp);
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if there are (a) no particles of this kind, or (b)
-     * if we have disabled every field of this particle type. */
-    if (numParticles[ptype] == 0 || numFields[ptype] == 0) continue;
+    /* Don't do anything if there are
+     * (a) no particles of this kind in this run, or
+     * (b) if we have disabled every field of this particle type. */
+    if (!to_write[ptype] || numFields[ptype] == 0) continue;
 
     /* Add the global information for that particle type to the XMF meta-file */
     xmf_write_groupheader(xmfFile, fileName, /*distributed=*/0,
diff --git a/src/single_io.h b/src/single_io.h
index 3867f9e85a6cd5358132fbcca9f91e21384cd653..96407208dc52ee6433857a1893a2bee31910d65f 100644
--- a/src/single_io.h
+++ b/src/single_io.h
@@ -31,18 +31,17 @@
 struct engine;
 struct unit_system;
 
-void read_ic_single(const char* fileName,
-                    const struct unit_system* internal_units, double dim[3],
-                    struct part** parts, struct gpart** gparts,
-                    struct sink** sinks, struct spart** sparts,
-                    struct bpart** bparts, size_t* Ngas, size_t* Ndm,
-                    size_t* Ndm_background, size_t* Nnuparts, size_t* Nsinks,
-                    size_t* Nstars, size_t* Nblackholes, int* flag_entropy,
-                    int with_hydro, int with_gravity, int with_sinks,
-                    int with_stars, int with_black_holes, int with_cosmology,
-                    int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                    int nr_threads, int dry_run, int remap_ids,
-                    struct ic_info* ics_metadata);
+void read_ic_single(
+    const char* fileName, const struct unit_system* internal_units,
+    double dim[3], struct part** parts, struct gpart** gparts,
+    struct sink** sinks, struct spart** sparts, struct bpart** bparts,
+    size_t* Ngas, size_t* Ndm, size_t* Ndm_background, size_t* Nnuparts,
+    size_t* Nsinks, size_t* Nstars, size_t* Nblackholes, int* flag_entropy,
+    const int with_hydro, const int with_gravity, const int with_sinks,
+    const int with_stars, const int with_black_holes, const int with_cosmology,
+    const int cleanup_h, const int cleanup_sqrt_a, const double h,
+    const double a, const int nr_threads, const int dry_run,
+    const int remap_ids, struct ic_info* ics_metadata);
 
 void write_output_single(struct engine* e,
                          const struct unit_system* internal_units,
diff --git a/src/space.c b/src/space.c
index d2b023d0a717d21a6bab765e90269806b376acfa..00e162aa930b1d5b6c60aff3282199f54fcc9272 100644
--- a/src/space.c
+++ b/src/space.c
@@ -1116,8 +1116,8 @@ void space_init(struct space *s, struct swift_params *params,
                 size_t Nspart, size_t Nbpart, size_t Nnupart, int periodic,
                 int replicate, int remap_ids, int generate_gas_in_ics,
                 int hydro, int self_gravity, int star_formation, int with_sink,
-                int DM_background, int neutrinos, int verbose, int dry_run,
-                int nr_nodes) {
+                int with_DM, int with_DM_background, int neutrinos, int verbose,
+                int dry_run, int nr_nodes) {
 
   /* Clean-up everything */
   bzero(s, sizeof(struct space));
@@ -1131,7 +1131,8 @@ void space_init(struct space *s, struct swift_params *params,
   s->with_hydro = hydro;
   s->with_star_formation = star_formation;
   s->with_sink = with_sink;
-  s->with_DM_background = DM_background;
+  s->with_DM = with_DM;
+  s->with_DM_background = with_DM_background;
   s->with_neutrinos = neutrinos;
   s->nr_parts = Npart;
   s->nr_gparts = Ngpart;
@@ -1192,7 +1193,7 @@ void space_init(struct space *s, struct swift_params *params,
 
   /* Are we generating gas from the DM-only ICs? */
   if (generate_gas_in_ics) {
-    space_generate_gas(s, cosmo, hydro_properties, periodic, DM_background,
+    space_generate_gas(s, cosmo, hydro_properties, periodic, with_DM_background,
                        neutrinos, dim, verbose);
     parts = s->parts;
     gparts = s->gparts;
@@ -1211,7 +1212,7 @@ void space_init(struct space *s, struct swift_params *params,
     error("Value of 'InitialConditions:replicate' (%d) is too small",
           replicate);
   if (replicate > 1) {
-    if (DM_background)
+    if (with_DM_background)
       error("Can't replicate the space if background DM particles are in use.");
     if (neutrinos)
       error("Can't replicate the space if neutrino DM particles are in use.");
diff --git a/src/space.h b/src/space.h
index 8cd32e4e733eba9452f8c69a7faef2bc68c3d52c..60d2b99903e8bec3eadd088e2029d691b51bf2b2 100644
--- a/src/space.h
+++ b/src/space.h
@@ -113,6 +113,9 @@ struct space {
   /*! Are we doing star formation through sink particles? */
   int with_sink;
 
+  /*! Are we running with some regular DM particles? */
+  int with_DM;
+
   /*! Are we running with some DM background particles? */
   int with_DM_background;
 
@@ -358,8 +361,8 @@ void space_init(struct space *s, struct swift_params *params,
                 size_t Nspart, size_t Nbpart, size_t Nnupart, int periodic,
                 int replicate, int remap_ids, int generate_gas_in_ics,
                 int hydro, int gravity, int star_formation, int with_sink,
-                int DM_background, int neutrinos, int verbose, int dry_run,
-                int nr_nodes);
+                int with_DM, int with_DM_background, int neutrinos, int verbose,
+                int dry_run, int nr_nodes);
 void space_sanitize(struct space *s);
 void space_map_cells_pre(struct space *s, int full,
                          void (*fun)(struct cell *c, void *data), void *data);