diff --git a/doc/RTD/source/ParameterFiles/output_selection.rst b/doc/RTD/source/ParameterFiles/output_selection.rst
index 41ae9cbb4f366b99e36da1dcc043cbd01485f593..069869b26fa7341e037fbd292dad1d3c3b50bade 100644
--- a/doc/RTD/source/ParameterFiles/output_selection.rst
+++ b/doc/RTD/source/ParameterFiles/output_selection.rst
@@ -68,6 +68,9 @@ CGS. Entries in the file look like:
     SmoothingLengths_Gas: on  # Co-moving smoothing lengths (FWHM of the kernel) of the particles : a U_L  [ cm ]
     ...
 
+For cosmological simulations, users can optionally add the ``--cosmology`` flag
+to generate the field names appropriate for such a run.
+
 Users can select the particle fields to output in snapshot using a (separate)
 YAML parameter file. By default, you can define a section `Default` at the
 top level of this file (in the exact same way as the file dumped by using the
@@ -83,9 +86,9 @@ options:
     select_output_on: 1
     select_output: your_select_output_yaml.yml
 
-This field is mostly used to remove unnecessary output by listing them with
-0's. A classic use-case for this feature is a DM-only simulation (pure
-n-body) where all particles have the same mass. Outputting the mass field in
+This field is mostly used to remove unnecessary output by listing them as
+"off". A classic use-case for this feature is a DM-only simulation (pure
+N-body) where all particles have the same mass. Outputting the mass field in
 the snapshots results in extra i/o time and unnecessary waste of disk space.
 The corresponding section of the YAML file would look like:
 
@@ -97,6 +100,21 @@ The corresponding section of the YAML file would look like:
 Entries can simply be copied from the ``output.yml`` generated by the
 ``-o`` runtime flag. 
 
+For convenience, there is also the option to set a default output status for
+all fields of a particular particle type. This can be used, for example, to
+skip an entire particle type in certain snapshots (see below for how to define
+per-snapshot output policies). This is achieved with the special ``Standard``
+field for each particle type:
+
+.. code:: YAML
+
+   BlackHolesOnly:
+     Standard_Gas: off
+     Standard_DM: off
+     Standard_DMBackground: off
+     Standard_Stars: off
+     Standard_BH: on  # Not strictly necessary, on is already the default
+
 
 Combining Output Lists and Output Selection
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/examples/main.c b/examples/main.c
index 67daf5db0808d3cd1791bd122a8f39c660eaabb8..79d917690921e199832bfaa2cab8d896f7580777 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -1161,8 +1161,7 @@ int main(int argc, char *argv[]) {
     /* Verify that the fields to dump actually exist - this must be done after
      * space_init so we know whether or not we have gas particles. */
     if (myrank == 0)
-      io_check_output_fields(output_options->select_output, N_total,
-                             with_cosmology);
+      io_check_output_fields(output_options, N_total, with_cosmology);
 
     /* Say a few nice things about the space we just created. */
     if (myrank == 0) {
diff --git a/src/common_io.c b/src/common_io.c
index a877cb9fec1f72a94157d0cf67c4f673a1c2b7f9..4cf4c5c5bba6b59dd81563dcc3bd24dd30d214bf 100644
--- a/src/common_io.c
+++ b/src/common_io.c
@@ -884,6 +884,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                            const int distributed,
                            const long long global_counts[swift_type_count],
                            const long long global_offsets[swift_type_count],
+                           const int num_fields[swift_type_count],
                            const struct unit_system* internal_units,
                            const struct unit_system* snapshot_units) {
 
@@ -1108,7 +1109,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
         H5Gcreate(h_grp, "Counts", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
     if (h_grp_counts < 0) error("Error while creating counts sub-group");
 
-    if (global_counts[swift_type_gas] > 0) {
+    if (global_counts[swift_type_gas] > 0 && num_fields[swift_type_gas] > 0) {
       io_write_array(h_grp_files, nr_cells, files, INT, "PartType0", "files");
       io_write_array(h_grp_offsets, nr_cells, offset_part, LONGLONG,
                      "PartType0", "offsets");
@@ -1116,7 +1117,8 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "counts");
     }
 
-    if (global_counts[swift_type_dark_matter] > 0) {
+    if (global_counts[swift_type_dark_matter] > 0 &&
+        num_fields[swift_type_dark_matter] > 0) {
       io_write_array(h_grp_files, nr_cells, files, INT, "PartType1", "files");
       io_write_array(h_grp_offsets, nr_cells, offset_gpart, LONGLONG,
                      "PartType1", "offsets");
@@ -1124,7 +1126,8 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "counts");
     }
 
-    if (global_counts[swift_type_dark_matter_background] > 0) {
+    if (global_counts[swift_type_dark_matter_background] > 0 &&
+        num_fields[swift_type_dark_matter_background] > 0) {
       io_write_array(h_grp_files, nr_cells, files, INT, "PartType2", "files");
       io_write_array(h_grp_offsets, nr_cells, offset_background_gpart, LONGLONG,
                      "PartType2", "offsets");
@@ -1132,7 +1135,8 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "PartType2", "counts");
     }
 
-    if (global_counts[swift_type_stars] > 0) {
+    if (global_counts[swift_type_stars] > 0 &&
+        num_fields[swift_type_stars] > 0) {
       io_write_array(h_grp_files, nr_cells, files, INT, "PartType4", "files");
       io_write_array(h_grp_offsets, nr_cells, offset_spart, LONGLONG,
                      "PartType4", "offsets");
@@ -1140,7 +1144,8 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                      "counts");
     }
 
-    if (global_counts[swift_type_black_hole] > 0) {
+    if (global_counts[swift_type_black_hole] > 0 &&
+        num_fields[swift_type_black_hole] > 0) {
       io_write_array(h_grp_files, nr_cells, files, INT, "PartType5", "files");
       io_write_array(h_grp_offsets, nr_cells, offset_bpart, LONGLONG,
                      "PartType5", "offsets");
@@ -2269,152 +2274,168 @@ void io_collect_gparts_background_to_write(
 }
 
 /**
- * @brief Verify the io parameter file
+ * @brief Verify that the output selection file is valid.
  *
  * @param params The #swift_params instance corresponding to the select_output
  *               file.
  * @param N_total The total number of each particle type.
  * @param with_cosmolgy Ran with cosmology?
  */
-void io_check_output_fields(struct swift_params* params,
+void io_check_output_fields(struct output_options* output_options,
                             const long long N_total[swift_type_count],
-                            int with_cosmology) {
+                            const int with_cosmology) {
 
-  /* Loop over each section */
+  const int MAX_NUM_PTYPE_FIELDS = 100;
+
+  /* Parameter struct for the output options */
+  struct swift_params* params = output_options->select_output;
+
+  /* Get all possible outputs per particle type */
+  int ptype_num_fields_total[swift_type_count] = {0};
+  struct io_props field_list[swift_type_count][MAX_NUM_PTYPE_FIELDS];
+
+  for (int ptype = 0; ptype < swift_type_count; ptype++)
+    ptype_num_fields_total[ptype] =
+        get_ptype_fields(ptype, field_list[ptype], with_cosmology);
+
+  /* Check for whether we have a `Default` section */
+  int have_default = 0;
+
+  /* Loop over each section, i.e. different class of output */
   for (int section_id = 0; section_id < params->sectionCount; section_id++) {
+
+    /* Get the name of current (selection) section, without a trailing colon */
     char section_name[FIELD_BUFFER_SIZE];
-    sprintf(section_name, "%s", params->section[section_id].name);
+    strcpy(section_name, params->section[section_id].name);
+    section_name[strlen(section_name) - 1] = 0;
+
+    /* Is this the `Default` section? */
+    if (strcmp(section_name, select_output_header_default_name) == 0)
+      have_default = 1;
+
+    /* How many fields should each ptype write by default? */
+    int ptype_num_fields_to_write[swift_type_count];
+
+    /* What is the default writing status for each ptype (on/off)? */
+    int ptype_default_write_status[swift_type_count];
+
+    /* Initialise section-specific writing counters for each particle type.
+     * If default is 'write', then we start from the total to deduct any fields
+     * that are switched off. If the default is 'off', we have to start from
+     * zero and then count upwards for each field that is switched back on. */
+    for (int ptype = 0; ptype < swift_type_count; ptype++) {
+
+      /* Internally also verifies that the default level is allowed */
+      const enum compression_levels compression_level_current_default =
+          output_options_get_ptype_default(params, section_name,
+                                           (enum part_type)ptype);
+
+      if (compression_level_current_default == compression_do_not_write) {
+        ptype_default_write_status[ptype] = 0;
+        ptype_num_fields_to_write[ptype] = 0;
+      } else {
+        ptype_default_write_status[ptype] = 1;
+        ptype_num_fields_to_write[ptype] = ptype_num_fields_total[ptype];
+      }
+
+    } /* ends loop over particle types */
 
     /* Loop over each parameter */
     for (int param_id = 0; param_id < params->paramCount; param_id++) {
 
+      /* Full name of the parameter to check */
       const char* param_name = params->data[param_id].name;
 
-      char comparison_section_name[FIELD_BUFFER_SIZE];
-
-      /* Skip if wrong section */
-      sprintf(comparison_section_name, "%s", "SelectOutput:");
-      if (strstr(param_name, comparison_section_name) != NULL) {
+      /* Check whether the file still contains the old, now inappropriate
+       * 'SelectOutput' section */
+      if (strstr(param_name, "SelectOutput:") != NULL) {
         error(
             "Output selection files no longer require the use of top level "
             "SelectOutput; see the documentation for changes.");
-        continue;
       }
 
-      /* Skip if top-level section */
-      sprintf(comparison_section_name, "%s", section_name);
-      if (strstr(param_name, comparison_section_name) == NULL) continue;
-
-      /* Loop over all particle types to check the fields */
-      int found = 0;
-      for (int ptype = 0; ptype < swift_type_count; ptype++) {
-
-        /* Skip if wrong particle type */
-        sprintf(comparison_section_name, "_%s", part_type_names[ptype]);
-        if (strstr(param_name, section_name) == NULL) continue;
-
-        int num_fields = 0;
-        struct io_props list[100];
-
-        /* Don't do anything if no particle of this kind */
-        if (N_total[ptype] == 0) continue;
-
-        /* Gather particle fields from the particle structures */
-        switch (ptype) {
-
-          case swift_type_gas:
-            hydro_write_particles(NULL, NULL, list, &num_fields);
-            num_fields += chemistry_write_particles(NULL, list + num_fields);
-            num_fields +=
-                cooling_write_particles(NULL, NULL, list + num_fields, NULL);
-            num_fields += tracers_write_particles(NULL, NULL, list + num_fields,
-                                                  with_cosmology);
-            num_fields +=
-                star_formation_write_particles(NULL, NULL, list + num_fields);
-            num_fields += fof_write_parts(NULL, NULL, list + num_fields);
-            num_fields +=
-                velociraptor_write_parts(NULL, NULL, list + num_fields);
-            break;
-
-          case swift_type_dark_matter:
-            darkmatter_write_particles(NULL, list, &num_fields);
-            num_fields += fof_write_gparts(NULL, list + num_fields);
-            num_fields += velociraptor_write_gparts(NULL, list + num_fields);
-            break;
-
-          case swift_type_dark_matter_background:
-            darkmatter_write_particles(NULL, list, &num_fields);
-            num_fields += fof_write_gparts(NULL, list + num_fields);
-            num_fields += velociraptor_write_gparts(NULL, list + num_fields);
-            break;
-
-          case swift_type_stars:
-            stars_write_particles(NULL, list, &num_fields, with_cosmology);
-            num_fields += chemistry_write_sparticles(NULL, list + num_fields);
-            num_fields += tracers_write_sparticles(NULL, list + num_fields,
-                                                   with_cosmology);
-            num_fields +=
-                star_formation_write_sparticles(NULL, list + num_fields);
-            num_fields += fof_write_sparts(NULL, list + num_fields);
-            num_fields += velociraptor_write_sparts(NULL, list + num_fields);
-            break;
-
-          case swift_type_black_hole:
-            black_holes_write_particles(NULL, list, &num_fields,
-                                        with_cosmology);
-            num_fields += chemistry_write_bparticles(NULL, list + num_fields);
-            num_fields += fof_write_bparts(NULL, list + num_fields);
-            num_fields += velociraptor_write_bparts(NULL, list + num_fields);
-            break;
-
-          default:
-            error("Particle Type %d not yet supported. Aborting", ptype);
-        }
-
-        /* For this particle type, loop over each possible output field */
-        for (int field_id = 0; field_id < num_fields; field_id++) {
-          char field_name[PARSER_MAX_LINE_SIZE];
-          /* Note that section_name includes a : */
-          sprintf(field_name, "%s%.*s_%s", section_name, FIELD_BUFFER_SIZE,
-                  list[field_id].name, part_type_names[ptype]);
-
-          if (strcmp(param_name, field_name) == 0) {
-            found = 1;
-
-            /* Perform a correctness check on the _value_ of that
-             * parameter */
-            char field_value[FIELD_BUFFER_SIZE];
-            parser_get_param_string(params, field_name, &field_value[0]);
-
-            int value_is_valid = 0;
-
-            for (int allowed_value_index = 0;
-                 allowed_value_index < compression_level_count;
-                 allowed_value_index++) {
-              if (strcmp(field_value,
-                         compression_level_names[allowed_value_index]) == 0) {
-                value_is_valid = 1;
-                break;
-              }
-            }
-
-            if (value_is_valid) {
-              /* Found value and it is correct, so move to the next one. */
-              break;
-            } else {
-              error("Choice of output selection parameter %s:%s is invalid.",
-                    field_name, field_value);
-            }
-          }
-        }
+      /* Skip if the parameter belongs to another output class or is a
+       * 'Standard' parameter */
+      if (strstr(param_name, section_name) == NULL) continue;
+      if (strstr(param_name, ":Standard_") != NULL) continue;
+
+      /* Get the particle type for current parameter
+       * (raises an error if it could not determine it) */
+      const int param_ptype = get_param_ptype(param_name);
+
+      /* Issue a warning if this parameter does not pertain to any of the
+       * known fields from this ptype. */
+      int field_id = 0;
+      char field_name[PARSER_MAX_LINE_SIZE];
+      for (field_id = 0; field_id < ptype_num_fields_total[param_ptype];
+           field_id++) {
+
+        sprintf(field_name, "%s:%.*s_%s", section_name, FIELD_BUFFER_SIZE,
+                field_list[param_ptype][field_id].name,
+                part_type_names[param_ptype]);
+
+        if (strcmp(param_name, field_name) == 0) break;
       }
-      if (!found)
+
+      if (field_id == ptype_num_fields_total[param_ptype])
         message(
             "WARNING: Trying to change behaviour of field '%s' (read from "
-            "'%s') that does not exist. This may because you are not running "
-            "with all of the physics that you compiled the code with.",
+            "'%s') that does not exist. This may be because you are not "
+            "running with all of the physics that you compiled the code with.",
             param_name, params->fileName);
+
+      /* Perform a correctness check on the _value_ of the parameter */
+      char field_value[FIELD_BUFFER_SIZE];
+      parser_get_param_string(params, field_name, field_value);
+
+      int value_id = 0;
+      for (value_id = 0; value_id < compression_level_count; value_id++)
+        if (strcmp(field_value, compression_level_names[value_id]) == 0) break;
+
+      if (value_id == compression_level_count)
+        error("Choice of output selection parameter %s ('%s') is invalid.",
+              field_name, field_value);
+
+      /* Adjust number of fields to be written for param_ptype, if this field's
+       * status is different from default */
+      const int is_on =
+          strcmp(field_value,
+                 compression_level_names[compression_do_not_write]) != 0;
+
+      if (is_on && !ptype_default_write_status[param_ptype]) {
+        /* Particle should be written even though default is off:
+         * increase field count */
+        ptype_num_fields_to_write[param_ptype] += 1;
+      }
+      if (!is_on && ptype_default_write_status[param_ptype]) {
+        /* Particle should not be written, even though default is on:
+         * decrease field count */
+        ptype_num_fields_to_write[param_ptype] -= 1;
+      }
+    } /* ends loop over parameters */
+
+    /* Second loop over ptypes, to write out total number of fields to write */
+    for (int ptype = 0; ptype < swift_type_count; ptype++) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Sanity check: is the number of fields to write non-negative? */
+      if (ptype_num_fields_to_write[ptype] < 0)
+        error(
+            "We seem to have subtracted too many fields for particle "
+            "type %d in output class %s (total to write is %d)",
+            ptype, section_name, ptype_num_fields_to_write[ptype]);
+#endif
+      output_options->num_fields_to_write[section_id][ptype] =
+          ptype_num_fields_to_write[ptype];
     }
+  } /* Ends loop over sections, for different output classes */
+
+  /* Add field numbers for (possible) implicit `Default` output class */
+  if (!have_default) {
+    const int default_id = output_options->select_output->sectionCount;
+    for (int ptype = 0; ptype < swift_type_count; ptype++)
+      output_options->num_fields_to_write[default_id][ptype] =
+          ptype_num_fields_total[ptype];
   }
 }
 
@@ -2437,57 +2458,8 @@ void io_write_output_field_parameter(const char* filename, int with_cosmology) {
   fprintf(file, "Default:\n");
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    int num_fields = 0;
     struct io_props list[100];
-
-    /* Write particle fields from the particle structure */
-    switch (ptype) {
-
-      case swift_type_gas:
-        hydro_write_particles(NULL, NULL, list, &num_fields);
-        num_fields += chemistry_write_particles(NULL, list + num_fields);
-        num_fields +=
-            cooling_write_particles(NULL, NULL, list + num_fields, NULL);
-        num_fields += tracers_write_particles(NULL, NULL, list + num_fields,
-                                              with_cosmology);
-        num_fields +=
-            star_formation_write_particles(NULL, NULL, list + num_fields);
-        num_fields += fof_write_parts(NULL, NULL, list + num_fields);
-        num_fields += velociraptor_write_parts(NULL, NULL, list + num_fields);
-        break;
-
-      case swift_type_dark_matter:
-        darkmatter_write_particles(NULL, list, &num_fields);
-        num_fields += fof_write_gparts(NULL, list + num_fields);
-        num_fields += velociraptor_write_gparts(NULL, list + num_fields);
-        break;
-
-      case swift_type_dark_matter_background:
-        darkmatter_write_particles(NULL, list, &num_fields);
-        num_fields += fof_write_gparts(NULL, list + num_fields);
-        num_fields += velociraptor_write_gparts(NULL, list + num_fields);
-        break;
-
-      case swift_type_stars:
-        stars_write_particles(NULL, list, &num_fields, with_cosmology);
-        num_fields += chemistry_write_sparticles(NULL, list + num_fields);
-        num_fields +=
-            tracers_write_sparticles(NULL, list + num_fields, with_cosmology);
-        num_fields += star_formation_write_sparticles(NULL, list + num_fields);
-        num_fields += fof_write_sparts(NULL, list + num_fields);
-        num_fields += velociraptor_write_sparts(NULL, list + num_fields);
-        break;
-
-      case swift_type_black_hole:
-        black_holes_write_particles(NULL, list, &num_fields, with_cosmology);
-        num_fields += chemistry_write_bparticles(NULL, list + num_fields);
-        num_fields += fof_write_bparts(NULL, list + num_fields);
-        num_fields += velociraptor_write_bparts(NULL, list + num_fields);
-        break;
-
-      default:
-        break;
-    }
+    int num_fields = get_ptype_fields(ptype, list, with_cosmology);
 
     if (num_fields == 0) continue;
 
@@ -2587,3 +2559,97 @@ void io_get_snapshot_filename(char filename[1024], char xmf_filename[1024],
     sprintf(xmf_filename, "%s.xmf", basename);
   }
 }
+
+/**
+ * @brief Return the number and names of all output fields of a given ptype.
+ *
+ * @param ptype The index of the particle type under consideration.
+ * @param list An io_props list that will hold the individual fields.
+ * @param with_cosmology Use cosmological name variant?
+ *
+ * @return The total number of fields that can be written for the ptype.
+ */
+int get_ptype_fields(const int ptype, struct io_props* list,
+                     const int with_cosmology) {
+
+  int num_fields = 0;
+
+  switch (ptype) {
+
+    case swift_type_gas:
+      hydro_write_particles(NULL, NULL, list, &num_fields);
+      num_fields += chemistry_write_particles(NULL, list + num_fields);
+      num_fields +=
+          cooling_write_particles(NULL, NULL, list + num_fields, NULL);
+      num_fields += tracers_write_particles(NULL, NULL, list + num_fields,
+                                            with_cosmology);
+      num_fields +=
+          star_formation_write_particles(NULL, NULL, list + num_fields);
+      num_fields += fof_write_parts(NULL, NULL, list + num_fields);
+      num_fields += velociraptor_write_parts(NULL, NULL, list + num_fields);
+      break;
+
+    case swift_type_dark_matter:
+      darkmatter_write_particles(NULL, list, &num_fields);
+      num_fields += fof_write_gparts(NULL, list + num_fields);
+      num_fields += velociraptor_write_gparts(NULL, list + num_fields);
+      break;
+
+    case swift_type_dark_matter_background:
+      darkmatter_write_particles(NULL, list, &num_fields);
+      num_fields += fof_write_gparts(NULL, list + num_fields);
+      num_fields += velociraptor_write_gparts(NULL, list + num_fields);
+      break;
+
+    case 3:
+      break;
+
+    case swift_type_stars:
+      stars_write_particles(NULL, list, &num_fields, with_cosmology);
+      num_fields += chemistry_write_sparticles(NULL, list + num_fields);
+      num_fields +=
+          tracers_write_sparticles(NULL, list + num_fields, with_cosmology);
+      num_fields += star_formation_write_sparticles(NULL, list + num_fields);
+      num_fields += fof_write_sparts(NULL, list + num_fields);
+      num_fields += velociraptor_write_sparts(NULL, list + num_fields);
+      break;
+
+    case swift_type_black_hole:
+      black_holes_write_particles(NULL, list, &num_fields, with_cosmology);
+      num_fields += chemistry_write_bparticles(NULL, list + num_fields);
+      num_fields += fof_write_bparts(NULL, list + num_fields);
+      num_fields += velociraptor_write_bparts(NULL, list + num_fields);
+      break;
+
+    default:
+      error("Particle Type %d not yet supported. Aborting", ptype);
+  }
+
+  return num_fields;
+}
+
+/**
+ * @brief Return the particle type code of a select_output parameter
+ *
+ * @param name The name of the parameter under consideration.
+ *
+ * @return The (integer) particle type of the parameter.
+ */
+int get_param_ptype(const char* name) {
+
+  const int name_len = strlen(name);
+
+  for (int ptype = 0; ptype < swift_type_count; ptype++) {
+    const int ptype_name_len = strlen(part_type_names[ptype]);
+    if (name_len >= ptype_name_len &&
+        strcmp(&name[name_len - ptype_name_len], part_type_names[ptype]) == 0)
+      return ptype;
+  }
+
+  /* If we get here, we could not match the name, so something's gone wrong. */
+  error("Could not determine the particle type for parameter '%s'.", name);
+
+  /* We can never get here, but the compiler may complain if we don't return
+   * an int after promising to do so... */
+  return -1;
+}
diff --git a/src/common_io.h b/src/common_io.h
index 9d9512b18688ba12f0b2a19dd94e8b21ea59a6a8..2fe5db82cbe2d701dd2d777286e8072593a0c666 100644
--- a/src/common_io.h
+++ b/src/common_io.h
@@ -21,11 +21,10 @@
 #define SWIFT_COMMON_IO_H
 
 /* Config parameters. */
-#include "../config.h"
+#include "config.h"
 
 /* Local includes. */
 #include "part_type.h"
-#include "units.h"
 
 #define FIELD_BUFFER_SIZE 64
 #define DESCRIPTION_BUFFER_SIZE 512
@@ -44,6 +43,8 @@ struct xpart;
 struct io_props;
 struct engine;
 struct threadpool;
+struct output_options;
+struct unit_system;
 
 /**
  * @brief The different types of data used in the GADGET IC files.
@@ -66,6 +67,9 @@ enum IO_DATA_TYPE {
 
 #if defined(HAVE_HDF5)
 
+/* Library header */
+#include <hdf5.h>
+
 hid_t io_hdf5_type(enum IO_DATA_TYPE type);
 
 hsize_t io_get_number_element_in_attribute(hid_t attr);
@@ -104,6 +108,7 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3], const double dim[3],
                            const int distributed,
                            const long long global_counts[swift_type_count],
                            const long long global_offsets[swift_type_count],
+                           const int num_fields[swift_type_count],
                            const struct unit_system* internal_units,
                            const struct unit_system* snapshot_units);
 
@@ -118,7 +123,7 @@ void io_copy_temp_buffer(void* temp, const struct engine* e,
                          const struct unit_system* internal_units,
                          const struct unit_system* snapshot_units);
 
-#endif /* defined HDF5 */
+#endif /* HAVE_HDF5 */
 
 size_t io_sizeof_type(enum IO_DATA_TYPE type);
 int io_is_double_precision(enum IO_DATA_TYPE type);
@@ -167,8 +172,9 @@ void io_duplicate_black_holes_gparts(struct threadpool* tp,
                                      struct gpart* const gparts, size_t Nstars,
                                      size_t Ndm);
 
-void io_check_output_fields(struct swift_params* params,
-                            const long long N_total[3], int with_cosmology);
+void io_check_output_fields(struct output_options* output_options,
+                            const long long N_total[swift_type_count],
+                            const int with_cosmology);
 
 void io_write_output_field_parameter(const char* filename, int with_cosmology);
 
@@ -180,4 +186,8 @@ void io_get_snapshot_filename(char filename[1024], char xmf_filename[1024],
                               const int stf_count, const int snap_count,
                               const char* subdir, const char* basename);
 
+int get_ptype_fields(const int ptype, struct io_props* list,
+                     const int with_cosmology);
+int get_param_ptype(const char* name);
+
 #endif /* SWIFT_COMMON_IO_H */
diff --git a/src/distributed_io.c b/src/distributed_io.c
index 84a1002329edb09c11a6de3b9b72e8a03554925f..56d5b2731a38d7364cd5bcd08c377292a2177984 100644
--- a/src/distributed_io.c
+++ b/src/distributed_io.c
@@ -383,14 +383,21 @@ void write_output_distributed(struct engine* e,
   strftime(snapshot_date, 64, "%T %F %Z", timeinfo);
   io_write_attribute_s(h_grp, "Snapshot date", snapshot_date);
 
-  /* GADGET-2 legacy values */
-  /* Number of particles of each type */
+  /* GADGET-2 legacy values:  Number of particles of each type */
   unsigned int numParticles[swift_type_count] = {0};
   unsigned int numParticlesHighWord[swift_type_count] = {0};
+
+  /* Total number of fields to write per ptype */
+  int numFields[swift_type_count] = {0};
+
   for (int ptype = 0; ptype < swift_type_count; ++ptype) {
     numParticles[ptype] = (unsigned int)N_total[ptype];
     numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
+
+    numFields[ptype] = output_options_get_num_fields_to_write(
+        output_options, current_selection_name, ptype);
   }
+
   io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N, swift_type_count);
   io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
                      swift_type_count);
@@ -424,15 +431,16 @@ void write_output_distributed(struct engine* e,
   /* Write the location of the particles in the arrays */
   io_write_cell_offsets(h_grp, e->s->cdim, e->s->dim, e->s->pos_dithering,
                         e->s->cells_top, e->s->nr_cells, e->s->width, mpi_rank,
-                        /*distributed=*/1, N_total, global_offsets,
+                        /*distributed=*/1, N_total, global_offsets, numFields,
                         internal_units, snapshot_units);
   H5Gclose(h_grp);
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if no particle of this kind */
-    if (numParticles[ptype] == 0) continue;
+    /* Don't do anything if there are (a) no particles of this kind, or (b)
+     * if we have disabled every field of this particle type. */
+    if (numParticles[ptype] == 0 || numFields[ptype] == 0) continue;
 
     /* Open the particle group in the file */
     char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
@@ -452,6 +460,7 @@ void write_output_distributed(struct engine* e,
 
     /* Write the number of particles as an attribute */
     io_write_attribute_l(h_grp, "NumberOfParticles", N[ptype]);
+    io_write_attribute_i(h_grp, "NumberOfFields", numFields[ptype]);
 
     int num_fields = 0;
     struct io_props list[100];
@@ -717,19 +726,33 @@ void write_output_distributed(struct engine* e,
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
-    /* Write everything that is not cancelled */
+    /* Did the user specify a non-standard default for the entire particle
+     * type? */
+    const enum compression_levels compression_level_current_default =
+        output_options_get_ptype_default(output_options->select_output,
+                                         current_selection_name,
+                                         (enum part_type)ptype);
 
+    /* Write everything that is not cancelled */
+    int num_fields_written = 0;
     for (int i = 0; i < num_fields; ++i) {
 
       /* Did the user cancel this field? */
       const int should_write = output_options_should_write_field(
           output_options, current_selection_name, list[i].name,
-          (enum part_type)ptype);
+          (enum part_type)ptype, compression_level_current_default);
 
-      if (should_write)
+      if (should_write) {
         write_distributed_array(e, h_grp, fileName, partTypeGroupName, list[i],
                                 Nparticles, internal_units, snapshot_units);
+        num_fields_written++;
+      }
     }
+#ifdef SWIFT_DEBUG_CHECKS
+    if (num_fields_written != numFields[ptype])
+      error("Wrote %d fields for particle type %s, but expected to write %d.",
+            num_fields_written, part_type_names[ptype], numFields[ptype]);
+#endif
 
     /* Free temporary arrays */
     if (parts_written) swift_free("parts_written", parts_written);
diff --git a/src/hydro_parameters.h b/src/hydro_parameters.h
index 852e510c1a46fb72bf1f949dd3dea753aeabe75a..ddc9366c74c5bda58985424d51a8616c9ddc416a 100644
--- a/src/hydro_parameters.h
+++ b/src/hydro_parameters.h
@@ -28,6 +28,9 @@
 /* Config parameters. */
 #include "../config.h"
 
+/* Local includes */
+#include "parser.h"
+
 /* Import the right hydro header */
 #if defined(MINIMAL_SPH)
 #include "./hydro/Minimal/hydro_parameters.h"
diff --git a/src/io_properties.h b/src/io_properties.h
index 450b5b6621a05393c80c3aa7f0550588b0ede617..dc84c0ed1aadd03e4e3b4f02602732062395af3f 100644
--- a/src/io_properties.h
+++ b/src/io_properties.h
@@ -27,6 +27,7 @@
 #include "error.h"
 #include "inline.h"
 #include "part.h"
+#include "units.h"
 
 /* Standard includes. */
 #include <string.h>
diff --git a/src/output_list.h b/src/output_list.h
index 7f8cfc14a5a651896a25bbde80ac4a0ecd1a46cb..4b9a6aa77d40880430dab920cef53f7ba66da93b 100644
--- a/src/output_list.h
+++ b/src/output_list.h
@@ -20,16 +20,19 @@
 #define SWIFT_OUTPUT_LIST_H
 
 /* Config parameters. */
-#include "../config.h"
+#include "config.h"
 
-/* Local includes */
+/* Local headers */
 #include "common_io.h"
-#include "cosmology.h"
-
-#define OUTPUT_LIST_MAX_NUM_OF_SELECT_OUTPUT_STYLES 8
+#include "timeline.h"
 
+/* Pre-declarations */
+struct cosmology;
 struct engine;
 
+/*! Maximal number of output lists */
+#define OUTPUT_LIST_MAX_NUM_OF_SELECT_OUTPUT_STYLES 8
+
 /**
  * @brief the different output_list type
  */
diff --git a/src/output_options.c b/src/output_options.c
index 23938223767213c3e3ff37c92c991cf91072c2dc..128fae598589573974a1aa5d1d610a98196ac015 100644
--- a/src/output_options.c
+++ b/src/output_options.c
@@ -20,6 +20,7 @@
 
 /* Some standard headers. */
 #include <stdlib.h>
+#include <string.h>
 
 /* MPI headers. */
 #ifdef WITH_MPI
@@ -30,9 +31,9 @@
 #include "output_options.h"
 
 /* Local headers. */
+#include "common_io.h"
+#include "error.h"
 #include "parser.h"
-#include "part_type.h"
-#include "swift.h"
 
 /* Compression level names. */
 const char* compression_level_names[compression_level_count] = {
@@ -125,13 +126,17 @@ void output_options_struct_restore(struct output_options* output_options,
  * @param field_name pointer to a char array containing the name of the
  *        relevant field.
  * @param part_type integer particle type
+ * @param compression_level_current_default The default output strategy
+ *.       based on the snapshot_type and part_type.
  *
  * @return should_write integer determining whether this field should be
  *         written
  **/
-int output_options_should_write_field(struct output_options* output_options,
-                                      char* snapshot_type, char* field_name,
-                                      enum part_type part_type) {
+int output_options_should_write_field(
+    const struct output_options* output_options, const char* snapshot_type,
+    const char* field_name, const enum part_type part_type,
+    const enum compression_levels compression_level_current_default) {
+
   /* Full name for the field path */
   char field[PARSER_MAX_LINE_SIZE];
   sprintf(field, "%.*s:%.*s_%s", FIELD_BUFFER_SIZE, snapshot_type,
@@ -140,7 +145,7 @@ int output_options_should_write_field(struct output_options* output_options,
   char compression_level[FIELD_BUFFER_SIZE];
   parser_get_opt_param_string(
       output_options->select_output, field, compression_level,
-      compression_level_names[compression_level_default]);
+      compression_level_names[compression_level_current_default]);
 
   int should_write = strcmp(compression_level_names[compression_do_not_write],
                             compression_level);
@@ -154,3 +159,102 @@ int output_options_should_write_field(struct output_options* output_options,
 
   return should_write;
 }
+
+/**
+ * @brief Return the default output strategy of a given particle type.
+ *
+ * This can only be "on" or "off". No lossy compression strategy can be
+ * applied at the level of an entire particle type.
+ *
+ * @param output_params The parsed select output file.
+ * @param snapshot_type The type of snapshot we are writing
+ * @param part_type The #part_type we are considering.
+ */
+enum compression_levels output_options_get_ptype_default(
+    struct swift_params* output_params, const char* snapshot_type,
+    const enum part_type part_type) {
+
+  /* Full name for the default path */
+  char field[PARSER_MAX_LINE_SIZE];
+  sprintf(field, "%.*s:Standard_%s", FIELD_BUFFER_SIZE, snapshot_type,
+          part_type_names[part_type]);
+
+  char compression_level[FIELD_BUFFER_SIZE];
+  parser_get_opt_param_string(
+      output_params, field, compression_level,
+      compression_level_names[compression_level_default]);
+
+  /* Need to find out which of the entries this corresponds to... */
+  int level_index;
+  for (level_index = 0; level_index < compression_level_count; level_index++) {
+    if (!strcmp(compression_level_names[level_index], compression_level)) break;
+  }
+
+  /* Make sure that the supplied default option is either on or off, not a
+   * compression strategy (these should only be set on a per-field basis) */
+  if (!(level_index == compression_do_not_write ||
+        level_index == compression_write_lossless))
+    error(
+        "A lossy default compression strategy was specified for snapshot "
+        "type %s and particle type %d. This is not allowed, lossy "
+        "compression must be set on a field-by-field basis.",
+        snapshot_type, part_type);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check whether we could translate the level string to a known entry. */
+  if (level_index >= compression_level_count)
+    error(
+        "Could not resolve compression level \"%s\" as default compression "
+        "level of particle type %s in snapshot type %s.",
+        compression_level, part_type_names[part_type], snapshot_type);
+
+  message(
+      "Determined default compression level of %s in snapshot type %s "
+      "as \"%s\", corresponding to level code %d",
+      part_type_names[part_type], snapshot_type, compression_level,
+      level_index);
+#endif
+
+  return (enum compression_levels)level_index;
+}
+
+/**
+ * @brief Return the number of fields to be written for a ptype.
+ *
+ * @param output_options The output_options struct.
+ * @param selection_name The current output selection name.
+ * @param ptype The particle type index.
+ */
+int output_options_get_num_fields_to_write(
+    const struct output_options* output_options, const char* selection_name,
+    const int ptype) {
+
+  /* Get the ID of the output selection in the structure */
+  int selection_id =
+      parser_get_section_id(output_options->select_output, selection_name);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* The only situation where we might legitimately not find the selection
+   * name is if it is the default. Everything else means trouble. */
+  if (strcmp(selection_name, select_output_header_default_name) &&
+      selection_id < 0)
+    error(
+        "Output selection '%s' could not be located in output_options "
+        "structure. Please investigate.",
+        selection_name);
+
+  /* While we're at it, make sure the selection ID is not impossibly high */
+  if (selection_id >= output_options->select_output->sectionCount)
+    error(
+        "Output selection '%s' was apparently located in index %d of the "
+        "output_options structure, but this only has %d sections.",
+        selection_name, selection_id,
+        output_options->select_output->sectionCount);
+#endif
+
+  /* Special treatment for absent `Default` section */
+  if (selection_id < 0)
+    selection_id = output_options->select_output->sectionCount;
+
+  return output_options->num_fields_to_write[selection_id][ptype];
+}
diff --git a/src/output_options.h b/src/output_options.h
index 02ba151b1103fe09a50740c99a693ded0c4eec1c..72c23239a1c3338e0546dd446f06a80c93a95f0c 100644
--- a/src/output_options.h
+++ b/src/output_options.h
@@ -19,11 +19,14 @@
 #ifndef SWIFT_OUTPUT_OPTIONS_H
 #define SWIFT_OUTPUT_OPTIONS_H
 
-#include "config.h"
-#include "parser.h"
+/* Local headers. */
+#include "output_list.h"
 #include "part_type.h"
+#include "restart.h"
 
-/* Compression level names */
+/**
+ * @brief Compression levels for snapshot fields
+ */
 enum compression_levels {
   compression_do_not_write = 0,
   compression_write_lossless,
@@ -34,10 +37,10 @@ enum compression_levels {
   compression_level_count,
 };
 
-/* Default value for SelectOutput */
+/*! Default value for SelectOutput */
 #define compression_level_default compression_write_lossless
 
-/* Default name for the SelectOutput header */
+/*! Default name for the SelectOutput header */
 #define select_output_header_default_name "Default"
 
 /**
@@ -54,7 +57,11 @@ struct output_options {
   /*! Select output file, parsed */
   struct swift_params* select_output;
 
-  /* Pass-through struct for now but may need more later. */
+  /* Number of fields to write for each output selection and ptype.
+   * We need one more than max num of output styles, in case the Default
+   * output style is used but not specified. */
+  int num_fields_to_write[OUTPUT_LIST_MAX_NUM_OF_SELECT_OUTPUT_STYLES + 1]
+                         [swift_type_count];
 };
 
 /* Create and destroy */
@@ -69,8 +76,17 @@ void output_options_struct_restore(struct output_options* output_options,
                                    FILE* stream);
 
 /* Logic functions */
-int output_options_should_write_field(struct output_options* output_options,
-                                      char* snapshot_type, char* field_name,
-                                      enum part_type part_type);
+int output_options_should_write_field(
+    const struct output_options* output_options, const char* snapshot_type,
+    const char* field_name, const enum part_type part_type,
+    const enum compression_levels comp_level_current_default);
+
+enum compression_levels output_options_get_ptype_default(
+    struct swift_params* output_params, const char* snapshot_type,
+    const enum part_type part_type);
+
+int output_options_get_num_fields_to_write(
+    const struct output_options* output_options, const char* selection_name,
+    const int ptype);
 
-#endif
\ No newline at end of file
+#endif
diff --git a/src/parallel_io.c b/src/parallel_io.c
index 308486da23ff6c9e9f0e89aa021578e756b8f9c8..0a90921848005389d66de7f5cfa14e124232c4f5 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -1044,11 +1044,14 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
  *
  * @param e The #engine.
  * @param N_total The total number of particles of each type to write.
+ * @param numFields The number of fields to write for each particle type.
  * @param internal_units The #unit_system used internally.
  * @param snapshot_units The #unit_system used in the snapshots.
  */
 void prepare_file(struct engine* e, const char* fileName,
-                  const char* xmfFileName, long long N_total[6],
+                  const char* xmfFileName, long long N_total[swift_type_count],
+                  const int numFields[swift_type_count],
+                  char current_selection_name[FIELD_BUFFER_SIZE],
                   const struct unit_system* internal_units,
                   const struct unit_system* snapshot_units) {
 
@@ -1058,7 +1061,6 @@ void prepare_file(struct engine* e, const char* fileName,
   const struct spart* sparts = e->s->sparts;
   const struct bpart* bparts = e->s->bparts;
   struct output_options* output_options = e->output_options;
-  struct output_list* output_list = e->output_list_snapshots;
   const int with_cosmology = e->policy & engine_policy_cosmology;
   const int with_cooling = e->policy & engine_policy_cooling;
   const int with_temperature = e->policy & engine_policy_temperature;
@@ -1103,16 +1105,6 @@ void prepare_file(struct engine* e, const char* fileName,
                          e->s->dim[1] * factor_length,
                          e->s->dim[2] * factor_length};
 
-  /* Determine if we are writing a reduced snapshot, and if so which
-   * output selection type to use */
-  char current_selection_name[FIELD_BUFFER_SIZE] =
-      select_output_header_default_name;
-  if (output_list) {
-    /* Users could have specified a different Select Output scheme for each
-     * snapshot. */
-    output_list_get_current_select_output(output_list, current_selection_name);
-  }
-
   /* Print the relevant information and print status */
   io_write_attribute(h_grp, "BoxSize", DOUBLE, dim, 3);
   io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
@@ -1164,8 +1156,9 @@ void prepare_file(struct engine* e, const char* fileName,
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if no particle of this kind */
-    if (N_total[ptype] == 0) continue;
+    /* Don't do anything if there are (a) no particles of this kind, or (b)
+     * if we have disabled every field of this particle type. */
+    if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
 
     /* Add the global information for that particle type to
      * the XMF meta-file */
@@ -1191,6 +1184,7 @@ void prepare_file(struct engine* e, const char* fileName,
 
     /* Write the number of particles as an attribute */
     io_write_attribute_l(h_grp, "NumberOfParticles", N_total[ptype]);
+    io_write_attribute_i(h_grp, "NumberOfFields", numFields[ptype]);
 
     int num_fields = 0;
     struct io_props list[100];
@@ -1270,19 +1264,33 @@ void prepare_file(struct engine* e, const char* fileName,
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
-    /* Prepare everything that is not cancelled */
+    /* Did the user specify a non-standard default for the entire particle
+     * type? */
+    const enum compression_levels compression_level_current_default =
+        output_options_get_ptype_default(output_options->select_output,
+                                         current_selection_name,
+                                         (enum part_type)ptype);
 
+    /* Prepare everything that is not cancelled */
+    int num_fields_written = 0;
     for (int i = 0; i < num_fields; ++i) {
 
       /* Did the user cancel this field? */
       const int should_write = output_options_should_write_field(
           output_options, current_selection_name, list[i].name,
-          (enum part_type)ptype);
+          (enum part_type)ptype, compression_level_current_default);
 
-      if (should_write)
+      if (should_write) {
         prepare_array_parallel(e, h_grp, fileName, xmfFile, partTypeGroupName,
                                list[i], N_total[ptype], snapshot_units);
+        num_fields_written++;
+      }
     }
+#ifdef SWIFT_DEBUG_CHECKS
+    if (num_fields_written != numFields[ptype])
+      error("Wrote %d fields for particle type %s, but expected to write %d.",
+            num_fields_written, part_type_names[ptype], numFields[ptype]);
+#endif
 
     /* Close particle group */
     H5Gclose(h_grp);
@@ -1399,10 +1407,25 @@ void write_output_parallel(struct engine* e,
                            e->snapshot_output_count, e->snapshot_subdir,
                            e->snapshot_base_name);
 
+  char current_selection_name[FIELD_BUFFER_SIZE] =
+      select_output_header_default_name;
+  if (output_list) {
+    /* Users could have specified a different Select Output scheme for each
+     * snapshot. */
+    output_list_get_current_select_output(output_list, current_selection_name);
+  }
+
+  /* Total number of fields to write per ptype */
+  int numFields[swift_type_count] = {0};
+  for (int ptype = 0; ptype < swift_type_count; ++ptype) {
+    numFields[ptype] = output_options_get_num_fields_to_write(
+        output_options, current_selection_name, ptype);
+  }
+
   /* Rank 0 prepares the file */
   if (mpi_rank == 0)
-    prepare_file(e, fileName, xmfFileName, N_total, internal_units,
-                 snapshot_units);
+    prepare_file(e, fileName, xmfFileName, N_total, numFields,
+                 current_selection_name, internal_units, snapshot_units);
 
   MPI_Barrier(MPI_COMM_WORLD);
 
@@ -1432,8 +1455,8 @@ void write_output_parallel(struct engine* e,
   /* Write the location of the particles in the arrays */
   io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->dim, e->s->pos_dithering,
                         e->s->cells_top, e->s->nr_cells, e->s->width, mpi_rank,
-                        /*distributed=*/0, N_total, offset, internal_units,
-                        snapshot_units);
+                        /*distributed=*/0, N_total, offset, numFields,
+                        internal_units, snapshot_units);
 
   /* Close everything */
   if (mpi_rank == 0) {
@@ -1781,21 +1804,20 @@ void write_output_parallel(struct engine* e,
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
+    /* Did the user specify a non-standard default for the entire particle
+     * type? */
+    const enum compression_levels compression_level_current_default =
+        output_options_get_ptype_default(output_options->select_output,
+                                         current_selection_name,
+                                         (enum part_type)ptype);
+
     /* Write everything that is not cancelled */
-    char current_selection_name[FIELD_BUFFER_SIZE] =
-        select_output_header_default_name;
-    if (output_list) {
-      /* Users could have specified a different Select Output scheme for each
-       * snapshot. */
-      output_list_get_current_select_output(output_list,
-                                            current_selection_name);
-    }
     for (int i = 0; i < num_fields; ++i) {
 
       /* Did the user cancel this field? */
       const int should_write = output_options_should_write_field(
           output_options, current_selection_name, list[i].name,
-          (enum part_type)ptype);
+          (enum part_type)ptype, compression_level_current_default);
 
       if (should_write)
         write_array_parallel(e, h_grp, fileName, partTypeGroupName, list[i],
diff --git a/src/parser.c b/src/parser.c
index 01c0c87eaa7ddc8d66cb8d75063a2790b6b5d56b..6fd0aa561d2be92c437f71eeb3f28568a4e97d81 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -753,7 +753,7 @@ void parser_get_opt_param_string(struct swift_params *params, const char *name,
       if (params->data[i].is_default && strcmp(def, retParam) != 0)
         error(
             "Tried parsing %s again but cannot parse a parameter with "
-            "two different default value ('%s' != '%s')",
+            "two different default values ('%s' != '%s')",
             name, def, retParam);
       /* this parameter has been used */
       params->data[i].used = 1;
@@ -1270,3 +1270,23 @@ void parser_struct_restore(const struct swift_params *params, FILE *stream) {
   restart_read_blocks((void *)params, sizeof(struct swift_params), 1, stream,
                       NULL, "parameters");
 }
+
+/**
+ * @brief Return the index of a given section name in a swift_params struct.
+ *
+ * If the section could not be found, -1 is returned.
+ *
+ * @param params The swift_params struct in which to locate the section.
+ * @param section_name The section name to locate.
+ */
+int parser_get_section_id(const struct swift_params *params, const char *name) {
+  for (int section_id = 0; section_id < params->sectionCount; section_id++) {
+    /* Get the name of current section, *without* a trailing colon */
+    char section_name[FIELD_BUFFER_SIZE];
+    strcpy(section_name, params->section[section_id].name);
+    section_name[strlen(section_name) - 1] = 0;
+
+    if (strcmp(section_name, name) == 0) return section_id;
+  }
+  return -1;
+}
\ No newline at end of file
diff --git a/src/parser.h b/src/parser.h
index d5445f374cd4b1e0808c188e6bcae6a159728695..77021c90ba5009da9a34b2887a6a8528ae749060 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -117,4 +117,7 @@ void parser_write_params_to_hdf5(const struct swift_params *params, hid_t grp,
 void parser_struct_dump(const struct swift_params *params, FILE *stream);
 void parser_struct_restore(const struct swift_params *params, FILE *stream);
 
+/* Lookup functions */
+int parser_get_section_id(const struct swift_params *params, const char *name);
+
 #endif /* SWIFT_PARSER_H */
diff --git a/src/serial_io.c b/src/serial_io.c
index 9dd51f952b12bcf570cdea1563d6bd3c6f3d67d4..cf7280248de45782987fc4fa84ce9b4ec887d480 100644
--- a/src/serial_io.c
+++ b/src/serial_io.c
@@ -933,6 +933,13 @@ void write_output_serial(struct engine* e,
     output_list_get_current_select_output(output_list, current_selection_name);
   }
 
+  /* Total number of fields to write per ptype */
+  int numFields[swift_type_count] = {0};
+  for (int ptype = 0; ptype < swift_type_count; ++ptype) {
+    numFields[ptype] = output_options_get_num_fields_to_write(
+        output_options, current_selection_name, ptype);
+  }
+
   /* Compute offset in the file and total number of particles */
   size_t N[swift_type_count] = {Ngas_written,   Ndm_written,
                                 Ndm_background, 0,
@@ -998,14 +1005,14 @@ void write_output_serial(struct engine* e,
     strftime(snapshot_date, 64, "%T %F %Z", timeinfo);
     io_write_attribute_s(h_grp, "Snapshot date", snapshot_date);
 
-    /* GADGET-2 legacy values */
-    /* Number of particles of each type */
+    /* GADGET-2 legacy values: Number of particles of each type */
     unsigned int numParticles[swift_type_count] = {0};
     unsigned int numParticlesHighWord[swift_type_count] = {0};
     for (int ptype = 0; ptype < swift_type_count; ++ptype) {
       numParticles[ptype] = (unsigned int)N_total[ptype];
       numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
     }
+
     io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total,
                        swift_type_count);
     io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
@@ -1032,8 +1039,9 @@ void write_output_serial(struct engine* e,
     /* Loop over all particle types */
     for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-      /* Don't do anything if no particle of this kind */
-      if (N_total[ptype] == 0) continue;
+      /* Don't do anything if there are (a) no particles of this kind, or (b)
+       * if we have disabled every field of this particle type. */
+      if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
 
       /* Open the particle group in the file */
       char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
@@ -1053,6 +1061,7 @@ void write_output_serial(struct engine* e,
 
       /* Write the number of particles as an attribute */
       io_write_attribute_l(h_grp, "NumberOfParticles", N_total[ptype]);
+      io_write_attribute_i(h_grp, "NumberOfFields", numFields[ptype]);
 
       /* Close particle group */
       H5Gclose(h_grp);
@@ -1080,8 +1089,8 @@ void write_output_serial(struct engine* e,
   /* Write the location of the particles in the arrays */
   io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->dim, e->s->pos_dithering,
                         e->s->cells_top, e->s->nr_cells, e->s->width, mpi_rank,
-                        /*distributed=*/0, N_total, offset, internal_units,
-                        snapshot_units);
+                        /*distributed=*/0, N_total, offset, numFields,
+                        internal_units, snapshot_units);
 
   /* Close everything */
   if (mpi_rank == 0) {
@@ -1102,8 +1111,9 @@ void write_output_serial(struct engine* e,
       /* Loop over all particle types */
       for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-        /* Don't do anything if no particle of this kind */
-        if (N_total[ptype] == 0) continue;
+        /* Don't do anything if there are (a) no particles of this kind, or (b)
+         * if we have disabled every field of this particle type. */
+        if (N_total[ptype] == 0 || numFields[ptype] == 0) continue;
 
         /* Add the global information for that particle type to the XMF
          * meta-file */
@@ -1400,20 +1410,35 @@ void write_output_serial(struct engine* e,
             error("Particle Type %d not yet supported. Aborting", ptype);
         }
 
-        /* Write everything that is not cancelled */
+        /* Did the user specify a non-standard default for the entire particle
+         * type? */
+        const enum compression_levels compression_level_current_default =
+            output_options_get_ptype_default(output_options->select_output,
+                                             current_selection_name,
+                                             (enum part_type)ptype);
 
+        /* Write everything that is not cancelled */
+        int num_fields_written = 0;
         for (int i = 0; i < num_fields; ++i) {
 
           /* Did the user cancel this field? */
           const int should_write = output_options_should_write_field(
               output_options, current_selection_name, list[i].name,
-              (enum part_type)ptype);
+              (enum part_type)ptype, compression_level_current_default);
 
-          if (should_write)
+          if (should_write) {
             write_array_serial(e, h_grp, fileName, xmfFile, partTypeGroupName,
                                list[i], Nparticles, N_total[ptype], mpi_rank,
                                offset[ptype], internal_units, snapshot_units);
+            num_fields_written++;
+          }
         }
+#ifdef SWIFT_DEBUG_CHECKS
+        if (num_fields_written != numFields[ptype])
+          error(
+              "Wrote %d fields for particle type %s, but expected to write %d.",
+              num_fields_written, part_type_names[ptype], numFields[ptype]);
+#endif
 
         /* Free temporary array */
         if (parts_written) swift_free("parts_written", parts_written);
diff --git a/src/single_io.c b/src/single_io.c
index 7319b5e3b2b589f2a45bc5b1a260672bfe8638f7..f69db4ff86d4fcb6b630d103ce00933124b21764 100644
--- a/src/single_io.c
+++ b/src/single_io.c
@@ -842,14 +842,21 @@ void write_output_single(struct engine* e,
   strftime(snapshot_date, 64, "%T %F %Z", timeinfo);
   io_write_attribute_s(h_grp, "Snapshot date", snapshot_date);
 
-  /* GADGET-2 legacy values */
-  /* Number of particles of each type */
+  /* GADGET-2 legacy values: number of particles of each type */
   unsigned int numParticles[swift_type_count] = {0};
   unsigned int numParticlesHighWord[swift_type_count] = {0};
+
+  /* Total number of fields to write per ptype */
+  int numFields[swift_type_count] = {0};
+
   for (int ptype = 0; ptype < swift_type_count; ++ptype) {
     numParticles[ptype] = (unsigned int)N_total[ptype];
     numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
+
+    numFields[ptype] = output_options_get_num_fields_to_write(
+        output_options, current_selection_name, ptype);
   }
+
   io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total,
                      swift_type_count);
   io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
@@ -881,15 +888,16 @@ void write_output_single(struct engine* e,
   /* Write the location of the particles in the arrays */
   io_write_cell_offsets(h_grp, e->s->cdim, e->s->dim, e->s->pos_dithering,
                         e->s->cells_top, e->s->nr_cells, e->s->width, e->nodeID,
-                        /*distributed=*/0, N_total, global_offsets,
+                        /*distributed=*/0, N_total, global_offsets, numFields,
                         internal_units, snapshot_units);
   H5Gclose(h_grp);
 
   /* Loop over all particle types */
   for (int ptype = 0; ptype < swift_type_count; ptype++) {
 
-    /* Don't do anything if no particle of this kind */
-    if (numParticles[ptype] == 0) continue;
+    /* Don't do anything if there are (a) no particles of this kind, or (b)
+     * if we have disabled every field of this particle type. */
+    if (numParticles[ptype] == 0 || numFields[ptype] == 0) continue;
 
     /* Add the global information for that particle type to the XMF meta-file */
     xmf_write_groupheader(xmfFile, fileName, numParticles[ptype],
@@ -911,8 +919,9 @@ void write_output_single(struct engine* e,
                                  H5P_DEFAULT, H5P_DEFAULT);
     if (h_err < 0) error("Error while creating alias for particle group.\n");
 
-    /* Write the number of particles as an attribute */
+    /* Write the number of particles and fields as an attribute */
     io_write_attribute_l(h_grp, "NumberOfParticles", numParticles[ptype]);
+    io_write_attribute_i(h_grp, "NumberOfFields", numFields[ptype]);
 
     int num_fields = 0;
     struct io_props list[100];
@@ -1182,20 +1191,33 @@ void write_output_single(struct engine* e,
         error("Particle Type %d not yet supported. Aborting", ptype);
     }
 
-    /* Write everything that is not cancelled */
+    /* Did the user specify a non-standard default for the entire particle
+     * type? */
+    const enum compression_levels compression_level_current_default =
+        output_options_get_ptype_default(output_options->select_output,
+                                         current_selection_name,
+                                         (enum part_type)ptype);
 
+    /* Write everything that is not cancelled */
+    int num_fields_written = 0;
     for (int i = 0; i < num_fields; ++i) {
 
       /* Did the user cancel this field? */
       const int should_write = output_options_should_write_field(
           output_options, current_selection_name, list[i].name,
-          (enum part_type)ptype);
+          (enum part_type)ptype, compression_level_current_default);
 
       if (should_write) {
         write_array_single(e, h_grp, fileName, xmfFile, partTypeGroupName,
                            list[i], N, internal_units, snapshot_units);
+        num_fields_written++;
       }
     }
+#ifdef SWIFT_DEBUG_CHECKS
+    if (num_fields_written != numFields[ptype])
+      error("Wrote %d fields for particle type %s, but expected to write %d.",
+            num_fields_written, part_type_names[ptype], numFields[ptype]);
+#endif
 
     /* Free temporary arrays */
     if (parts_written) swift_free("parts_written", parts_written);
@@ -1211,7 +1233,7 @@ void write_output_single(struct engine* e,
 
     /* Close this particle group in the XMF file as well */
     xmf_write_groupfooter(xmfFile, (enum part_type)ptype);
-  }
+  } /* ends loop over particle types */
 
   /* Write LXMF file descriptor */
   xmf_write_outputfooter(xmfFile, e->snapshot_output_count, e->time);