diff --git a/configure.ac b/configure.ac
index faa1626a69a23f0d6803af13179793ff3540e116..5bf19116e44f0936655fcd679806c594c05d717d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,7 +52,7 @@ AC_DEFINE([_GLIBCXX_INCLUDE_NEXT_C_HEADERS],1,[Hack for min() and max() using g+
 # Enable POSIX and platform extension preprocessor macros.
 AC_USE_SYSTEM_EXTENSIONS
 
-# Check for compiler version and vendor.
+# Check for C compiler version and vendor.
 AX_COMPILER_VENDOR
 AX_COMPILER_VERSION
 
@@ -1397,6 +1397,34 @@ if test "$enable_velociraptor_orphans" = "yes"; then
    AC_DEFINE([HAVE_VELOCIRAPTOR_ORPHANS], 1, [Orphan particles should be written out])
 fi
 
+# Check if lightcone output is on.
+AC_ARG_ENABLE([lightcone],
+   [AS_HELP_STRING([--enable-lightcone],
+     [Activate lightcone outputs.],
+   )],
+   [enable_lightcone="$enableval"],
+   [enable_lightcone="no"]
+)
+if test "$enable_lightcone" = "yes"; then
+   # Check for healpix for lightcone maps. May require cfitsio
+   # This sets CHEALPIX_LIBS and CHEALPIX_CFLAGS and #defines HAVE_CHEALPIX.
+   # It also adds a --with-cfitsio flag in case cfitsio is installed in a
+   # different location from healpix.
+   GV_FIND_LIBRARY([cfitsio], [CFITSIO], [cfitsio], [cfitsio], [ffclos])
+   TMP_LIBS=${LIBS}
+   LIBS="${CFITSIO_LIBS} ${LIBS}"
+   GV_FIND_LIBRARY([chealpix], [CHEALPIX], [chealpix], [chealpix], [ang2vec])
+   LIBS=${TMP_LIBS}
+   have_chealpix=${USE_CHEALPIX}
+   CHEALPIX_LIBS="${CHEALPIX_LIBS} ${CFITSIO_LIBS}"
+   AC_DEFINE([WITH_LIGHTCONE], 1, [Enable lightcone outputs])
+   if test "$have_chealpix" != "yes"; then
+      AC_MSG_ERROR([Lightcone output requires the HEALPix C API. Please configure with --with-chealpix.])
+   fi
+else
+   have_chealpix="no"
+fi
+
 # Check for floating-point execeptions
 AC_CHECK_FUNC(feenableexcept, AC_DEFINE([HAVE_FE_ENABLE_EXCEPT],[1],
     [Defined if the floating-point exception can be enabled using non-standard GNU functions.]))
@@ -1614,6 +1642,7 @@ with_subgrid_stars=none
 with_subgrid_star_formation=none
 with_subgrid_feedback=none
 with_subgrid_sink=none
+with_subgrid_extra_io=none
 
 case "$with_subgrid" in
    yes)
@@ -1630,6 +1659,7 @@ case "$with_subgrid" in
 	with_subgrid_feedback=GEAR
 	with_subgrid_black_holes=none
 	with_subgrid_sink=none
+	with_subgrid_extra_io=none
 	enable_fof=no
    ;;
    QLA)
@@ -1642,6 +1672,7 @@ case "$with_subgrid" in
 	with_subgrid_feedback=none
 	with_subgrid_black_holes=none
 	with_subgrid_sink=none
+	with_subgrid_extra_io=none
 	enable_fof=no
    ;;
    QLA-EAGLE)
@@ -1666,6 +1697,7 @@ case "$with_subgrid" in
 	with_subgrid_feedback=EAGLE
 	with_subgrid_black_holes=EAGLE
 	with_subgrid_sink=none
+	with_subgrid_extra_io=none
 	enable_fof=yes
    ;;
    EAGLE-XL)
@@ -1678,6 +1710,7 @@ case "$with_subgrid" in
 	with_subgrid_feedback=EAGLE
 	with_subgrid_black_holes=EAGLE
 	with_subgrid_sink=none
+	with_subgrid_extra_io=EAGLE
 	enable_fof=yes
    ;;
    *)
@@ -2169,6 +2202,35 @@ case "$with_tracers" in
    ;;
 esac
 
+#  Extra fields added to snapshots at i/o time
+AC_ARG_WITH([extra_io],
+   [AS_HELP_STRING([--with-extra-io=<function>],
+      [chemistry function @<:@none, EAGLE default: none@:>@]
+   )],
+   [with_extra_io="$withval"],
+   [with_extra_io="none"]
+)
+
+if test "$with_subgrid" != "none"; then
+   if test "$with_extra_io" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-extra_io together])
+   else
+      with_extra_io="$with_subgrid_extra_io"
+   fi
+fi
+
+case "$with_extra_io" in
+   none)
+      AC_DEFINE([EXTRA_IO_NONE], [1], [No extra_io function])
+   ;;
+   EAGLE)
+      AC_DEFINE([EXTRA_IO_EAGLE], [1], [Extra i/o fields taken from the EAGLE model])
+   ;;
+   *)
+      AC_MSG_ERROR([Unknown extra-io choice: $with_extra_io])
+   ;;
+esac
+
 # Stellar model.
 AC_ARG_WITH([stars],
    [AS_HELP_STRING([--with-stars=<model>],
@@ -2592,6 +2654,9 @@ DX_DOXYGEN_FEATURE(OFF)
 DX_INIT_DOXYGEN(SWIFT, doc/Doxyfile, doc/)
 AM_CONDITIONAL([HAVE_DOXYGEN], [test "$ac_cv_path_ac_pt_DX_DOXYGEN" != ""])
 
+# Check if using EAGLE extra I/O
+AM_CONDITIONAL([HAVEEAGLEEXTRAIO], [test "${with_extra_io}" = "EAGLE"])
+
 # Check if using QLA cooling
 AM_CONDITIONAL([HAVEQLACOOLING], [test "$with_cooling" = "QLA"])
 AM_CONDITIONAL([HAVEQLAEAGLECOOLING], [test "$with_cooling" = "QLA-EAGLE"])
@@ -2693,6 +2758,7 @@ AC_MSG_RESULT([
     - MPI               : $have_mpi_fftw
     - ARM               : $have_arm_fftw
    GSL enabled          : $have_gsl
+   HEALPix C enabled    : $have_chealpix
    libNUMA enabled      : $have_numa
    GRACKLE enabled      : $have_grackle
    Special allocators   : $have_special_allocator
@@ -2700,6 +2766,7 @@ AC_MSG_RESULT([
    Pthread barriers     : $have_pthread_barrier
    VELOCIraptor enabled : $have_velociraptor
    FoF activated:       : $enable_fof
+   Lightcones enabled   : $enable_lightcone
 
    Hydro scheme       : $with_hydro
    Dimensionality     : $with_dimension
@@ -2725,8 +2792,9 @@ AC_MSG_RESULT([
    Star feedback model  : $with_feedback_name
    Sink particle model  : $with_sink
    Black holes model    : $with_black_holes
-   Radiative transfer   : $with_rt
-
+   Radiative transfer   : $with_rt$rt_extra_msg
+   Extra i/o            : $with_extra_io
+   
    Atomic operations in tasks  : $enable_atomics_within_tasks
    Individual timers           : $enable_timers
    Task debugging              : $enable_task_debugging
diff --git a/doc/RTD/source/GettingStarted/compiling_code.rst b/doc/RTD/source/GettingStarted/compiling_code.rst
index 297a2393db00498efc1590b1a02d6c3d95b8fc23..dfad12360772fad728dcd05adc1532d0f578913b 100644
--- a/doc/RTD/source/GettingStarted/compiling_code.rst
+++ b/doc/RTD/source/GettingStarted/compiling_code.rst
@@ -93,6 +93,16 @@ GRACKLE
 ~~~~~~~
 GRACKLE cooling is implemented in SWIFT. If you wish to take advantage of it, you will need it installed.
 
+HEALPix C library
+~~~~~~~~~~~~~~~~~~~
+
+This is required for making light cone HEALPix maps. Note that by default HEALPix builds a static library which cannot be used to build the SWIFT shared library. Either HEALPix must be built as a shared library or -fPIC must be added to the C compiler flags when HEALPix is being configured.
+
+CFITSIO
+~~~~~~~
+
+This may be required as a dependency of HEALPix.
+
 
 Initial Setup
 -------------
diff --git a/doc/RTD/source/LightCones/adding_outputs.rst b/doc/RTD/source/LightCones/adding_outputs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a8e554fe9e1ff78cb9690c5676bf71ce40dd53c6
--- /dev/null
+++ b/doc/RTD/source/LightCones/adding_outputs.rst
@@ -0,0 +1,20 @@
+.. Light Cones
+   John Helly 29th April 2021
+
+.. _lightcone_adding_outputs_label:
+
+Adding New Types of Output
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+New particle properties can be added to the particle light cones as follows:
+
+* Add a field to the ``lightcone_<type>_data`` struct in ``lightcone_particle_io.h`` to store the new quantity
+* Modify the ``lightcone_store_<type>`` function in ``lightcone_particle_io.c`` to set the new struct field from the particle data
+* in ``lightcone_io_make_output_fields()``, add a call to ``lightcone_io_make_output_field()`` to define the new output
+
+Here, <type> is the particle type: gas, dark_matter, stars, black_hole or neutrino.
+
+To add a new type of HEALPIX map:
+
+* Add a function to compute the quantity in ``lightcone_map_types.c``. See ``lightcone_map_total_mass()`` for an example.
+* Add a new entry to the ``lightcone_map_types`` array in lightcone_map_types.h. This should specify the name of the new map type, a pointer to the function to compute the quantity, and the units of the quantity. The last entry in the array is not used and must have a NULL function pointer to act as an end marker.
diff --git a/doc/RTD/source/LightCones/algorithm_description.rst b/doc/RTD/source/LightCones/algorithm_description.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ceb4c9834ac0b420dcdc8d3c17b105d1790c78f4
--- /dev/null
+++ b/doc/RTD/source/LightCones/algorithm_description.rst
@@ -0,0 +1,27 @@
+.. Light Cones
+   John Helly 29th April 2021
+
+.. _lightcone_algorithm_description_label:
+
+Light Cone Output Algorithm
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In cosmological simulations it is possible to specify the location of
+an observer in the simulation box and have SWIFT output information
+about particles in the simulation as they cross the observer's past
+light cone.
+
+Whenever a particle is drifted the code checks if any periodic copy of
+the particle crosses the lightcone during the drift, and if so that
+copy of the particle is buffered for output. As an optimization, at the
+start of each time step the code computes which periodic copies of the
+simulation box could contribute to the light cone and only those copies
+are searched. When drifting the particles in a particular cell the list of
+replications is further narrowed down using the spatial extent of the
+cell.
+
+Particles can be output directly to HDF5 files or accumulated to healpix
+maps corresponding to spherical shells centred on the observer.
+
+
+
diff --git a/doc/RTD/source/LightCones/index.rst b/doc/RTD/source/LightCones/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..54ec0194b51efe7938ffae82f2b34f768c63f829
--- /dev/null
+++ b/doc/RTD/source/LightCones/index.rst
@@ -0,0 +1,20 @@
+.. Light Cones
+   John Helly 29th April 2021
+
+.. _Light_Cones_label:
+
+Light Cone Outputs
+==================
+
+This section describes the light cone outputs
+and related parameters
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+   algorithm_description
+   lightcone_particle_output
+   lightcone_healpix_maps
+   running_with_lightcones
+   adding_outputs
diff --git a/doc/RTD/source/LightCones/lightcone_healpix_maps.rst b/doc/RTD/source/LightCones/lightcone_healpix_maps.rst
new file mode 100644
index 0000000000000000000000000000000000000000..42a63741fac6ff88015fe517aa8905ba081891d8
--- /dev/null
+++ b/doc/RTD/source/LightCones/lightcone_healpix_maps.rst
@@ -0,0 +1,36 @@
+.. Light Cones
+   John Helly 29th April 2021
+
+.. _lightcone_healpix_maps_label:
+
+Light Cone HEALPix Maps
+~~~~~~~~~~~~~~~~~~~~~~~
+
+SWIFT can accumulate particle properties to HEALPix maps as they
+cross the observer's past light cone. Each map corresponds to a
+spherical shell centred on the observer. When a particle crosses
+the lightcone its distance from the observer is calculated and the
+particle's contribution is added to a buffer so that at the end of
+the time step it can be added to the corresponding HEALPix map.
+
+Maps can be generated for multiple concentric shells and multiple
+quantities can be accumulated for each shell. The HEALPix map for a
+shell is allocated and zeroed out when the simulation first reaches
+a redshift where particles could contribute to that map. The map is
+written out and deallocated when the simulation advances to a point
+where there can be no further contributions. In MPI runs the pixel
+data for the maps are distributed across all MPI ranks.
+
+Updates to the maps are buffered in order to avoid the need for
+communication during the time step. At the end of the step if any
+MPI rank has a large amount of updates buffered then all pending
+updates will be applied to the pixel data.
+
+For gas particles, the HEALPix maps are smoothed using a projected
+version of the same kernel used for the hydro calculations. Other
+particle types are not smoothed.
+
+The code writes one output file for each spherical shell. In MPI mode
+all ranks write to the same file using parallel HDF5. If maps of
+multiple quantities are being made they will be written to a single
+file as separate 1D datasets with one element per pixel.
diff --git a/doc/RTD/source/LightCones/lightcone_particle_output.rst b/doc/RTD/source/LightCones/lightcone_particle_output.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e4c2ec6349e75e56cafffc61351151cbc35bb698
--- /dev/null
+++ b/doc/RTD/source/LightCones/lightcone_particle_output.rst
@@ -0,0 +1,30 @@
+.. Light Cones
+   John Helly 29th June 2021
+
+.. _lightcone_particle_output_label:
+
+Light Cone Particle Output
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SWIFT can output particles to HDF5 output files (similar to the
+snapshots) as they cross the observer's light cone. During each time
+step, any particles which cross the light cone are added to a buffer.
+If this buffer is large at the end of the step then its contents
+are written to an output file. In MPI runs each MPI rank writes its
+own output file and decides independently when to flush its particle
+buffer.
+
+A new output file is started whenever restart files are written. This
+allows the code to automatically continue from the point of the restart
+dump if the run is interrupted. Any files written after the restart
+dump will be overwritten when the simulation is resumed, preventing
+duplication of particles in the light cone output.
+
+The output files have names of the form ``basename_XXXX.Y.hdf5``, where
+XXXX numbers the files written by a single MPI rank and Y is the index
+of the MPI rank.
+
+The output files contain one HDF5 group for each particle type. Within
+each group there are datasets corresponding to particle properties in
+a similar format to the snapshots.
+
diff --git a/doc/RTD/source/LightCones/running_with_lightcones.rst b/doc/RTD/source/LightCones/running_with_lightcones.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1df3c5b7a8a9e0897d934f4eec8f2130aa9b838f
--- /dev/null
+++ b/doc/RTD/source/LightCones/running_with_lightcones.rst
@@ -0,0 +1,20 @@
+.. Light Cones
+   John Helly 29th April 2021
+
+.. _lightcone_running_label:
+
+Running SWIFT with Light Cone Output
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To produce light cone particle output swift must be configured
+with ``--enable-lightcone``. Additionally, making HEALPix maps
+requires the HEALPix C library. If using MPI then parallel HDF5
+is also required.
+
+One lightcone is produced for each ``LightconeX`` section in the
+parameter file, where X=0-7. This allows generation of up to 8 
+light cones. See :ref:`Parameters_light_cone` for details.
+
+SWIFT must be run with the ``--lightcone`` flag to activate light
+cone outputs, otherwise the Lightcone sections in the parameter file
+are ignored.
diff --git a/doc/RTD/source/ParameterFiles/parameter_description.rst b/doc/RTD/source/ParameterFiles/parameter_description.rst
index 3dfb42fc1ed544c0e45dbdbd0d2aecc76de54ecf..28f8b908de911f5054fb0b46a43977d0cb1eb739 100644
--- a/doc/RTD/source/ParameterFiles/parameter_description.rst
+++ b/doc/RTD/source/ParameterFiles/parameter_description.rst
@@ -975,6 +975,182 @@ be processed by the ``SpecWizard`` tool
      range_when_shooting_down_y: 100. # Range along the y-axis of LoS along y
      range_when_shooting_down_z: 100. # Range along the z-axis of LoS along z
 
+
+.. _Parameters_light_cone:
+
+Light Cone Outputs
+---------------------
+
+One or more light cone outputs can be configured by including ``LightconeX`` sections
+in the parameter file, where X is in the range 0-7. It is also possible to include a
+``LightconeCommon`` section for parameters which are the same for all lightcones. The
+parameters for each light cone are:
+
+* Switch to enable or disable a lightcone: ``enabled``
+
+This should be set to 1 to enable the corresponding lightcone or 0 to disable it.
+Has no effect if specified in the LightconeCommon section.
+
+* Directory in which to write light cone output: ``subdir``
+
+All light cone output files will be written in the specified directory.
+
+* Base name for particle and HEALPix map outputs: ``basename``.
+
+Particles will be written to files ``<basename>_XXXX.Y.hdf5``, where XXXX numbers the files
+written by a single MPI rank and Y is the MPI rank index. HEALPix maps are written to files
+with names ``<basename>.shell_X.hdf5``, where X is the index of the shell. The basename must
+be unique for each light cone so it cannot be specified in the LightconeCommon section.
+
+See :ref:`lightcone_adding_outputs_label` for information on adding new output quantities.
+
+* Location of the observer in the simulation box, in internal units: ``observer_position``
+
+* Size of in memory chunks used to store particles and map updates: ``buffer_chunk_size``
+
+During each time step buffered particles and HEALPix map updates are stored in a linked
+list of chunks of ``buffer_chunk_size`` elements. Additional chunks are allocated as needed.
+The map update process is parallelized over chunks so the chunks should be small enough that
+each MPI rank typically has more chunks than threads.
+
+* Maximum amount of map updates (in MB) to send on each iteration: ``max_map_update_send_size_mb``
+
+Flushing the map update buffer involves sending the updates to the MPI ranks with the affected
+pixel data. Sending all updates at once can consume a large amount of memory so this parameter
+allows updates to be applied over multiple iterations to reduce peak memory usage.
+
+* Redshift range to output each particle type: ``z_range_for_<type>``
+
+A two element array with the minimum and maximum redshift at which particles of type ``<type>``
+will be output as they cross the lightcone. ``<type>`` can be Gas, DM, DMBackground, Stars, BH
+or Neutrino. If this parameter is not present for a particular type then that type will not
+be output.
+
+* The number of buffered particles which triggers a write to disk: ``max_particles_buffered``
+
+If an MPI rank has at least max_particles_buffered particles which have crossed the lightcone,
+it will write them to disk at the end of the current time step.
+
+* Size of chunks in the particle output file
+
+This sets the HDF5 chunk size. Particle outputs must be chunked because the number of particles
+which will be written out is not known when the file is created.
+
+* Whether to use lossy compression in the particle outputs: ``particles_lossy_compression``
+
+If this is 1 then the HDF5 lossy compression filter named in the definition of each particle
+output field will be enabled. If this is 0 lossy compression is not applied.
+
+* Whether to use lossless compression in the particle outputs: ``particles_gzip_level``
+
+If this is non-zero the HDF5 deflate filter will be applied to lightcone particle output with
+the compression level set to the specified value. 
+
+* HEALPix map resolution: ``nside``
+
+* Name of the file with shell radii: ``radius_file.txt``
+
+This specifies the name of a file with the inner and outer radii of the shells used to make
+HEALPix maps. It should be a text file with a one line header and then two comma separated columns
+of numbers with the inner and outer radii. The units are determined by the header. The header must
+be one of the following:
+
+``# Minimum comoving distance, Maximum comoving distance``,
+``# Minimum redshift, Maximum redshift``, or
+``# Maximum expansion factor, Minimum expansion factor``. Comoving distances are in internal units.
+The shells must be in ascending order of radius and must not overlap.
+
+* Number of pending HEALPix map updates before the buffers are flushed: ``max_updates_buffered``
+
+In MPI mode applying updates to the HEALPix maps requires communication and forces synchronisation
+of all MPI ranks, so it is not done every time step. If any MPI rank has at least
+``max_updates_buffered`` pending updates at the end of a time step, then all ranks will apply
+their updates to the HEALPix maps.
+
+* Which types of HEALPix maps to create: ``map_names_file``
+
+This is the name of a file which specifies what quantities should be accumulated to HEALPix maps.
+The possible map types are defined in the lightcone_map_types array in ``lightcone_map_types.h``.
+See :ref:`lightcone_adding_outputs_label` if you'd like to add a new map type.
+
+* Whether to distribute HEALPix maps over multiple files: ``distributed_maps``
+
+If this is 0 then the code uses HDF5 collective writes to write each map to a single file. If this
+is 1 then each MPI rank writes its part of the HEALPix map to a separate file.
+
+The file contains two columns: the first column is the name of the map type and the second is the
+name of the compression filter to apply to it. See io_compression.c for the list of compression
+filter names. Set the filter name to ``on`` to disable compression.
+
+* Whether to use lossless compression in the HEALPix map outputs: ``maps_gzip_level``
+
+If this is non-zero the HDF5 deflate filter will be applied to the lightcone map output with
+the compression level set to the specified value. 
+
+The following shows a full set of light cone parameters for the case where we're making two
+light cones which only differ in the location of the observer:
+
+.. code:: YAML
+
+  LightconeCommon:
+
+    # Common parameters
+    subdir:            lightcones
+    buffer_chunk_size:      100000
+    max_particles_buffered: 1000000
+    hdf5_chunk_size:        10000
+ 
+    # Redshift ranges for particle types
+    z_range_for_Gas:           [0.0, 0.05]
+    z_range_for_DM:            [0.0, 0.05]
+    z_range_for_DMBackground:  [0.0, 0.05]
+    z_range_for_Stars:         [0.0, 0.05]
+    z_range_for_BH:            [0.0, 0.05]
+    z_range_for_Neutrino:      [0.0, 0.05]
+    
+    # Healpix map parameters
+    nside:                512
+    radius_file:          ./shell_radii.txt
+    max_updates_buffered: 100000
+    map_names_file:       map_names.txt
+    max_map_update_send_size_mb: 1.0
+    distributed_maps:     0
+
+    # Compression options
+    particles_lossy_compression: 0
+    particles_gzip_level:        6
+    maps_gzip_level:             6
+
+  Lightcone0:
+
+    enabled:  1
+    basename: lightcone0
+    observer_position: [35.5, 78.12, 12.45]
+
+  Lightcone0:
+
+    enabled:  1
+    basename: lightcone1
+    observer_position: [74.2, 10.80, 53.59]
+  
+
+An example of the radius file::
+
+  # Minimum comoving distance, Maximum comoving distance
+  0.0,   50.0
+  50.0,  100.0
+  150.0, 200.0
+  200.0, 400.0
+  400.0, 1000.0
+
+An example of the map names file::
+
+  TotalMass         on
+  SmoothedGasMass   on
+  UnsmoothedGasMass on
+  DarkMatterMass    on
+
+
 .. _Parameters_eos:
 
 Equation of State (EoS)
diff --git a/doc/RTD/source/index.rst b/doc/RTD/source/index.rst
index b079710d7df407740eae2d0771d8b250ea0b5c07..3f87971a56e9fc7d8581c65982067f5c9b9a24fc 100644
--- a/doc/RTD/source/index.rst
+++ b/doc/RTD/source/index.rst
@@ -27,6 +27,7 @@ difference is the parameter file that will need to be adapted for SWIFT.
    FriendsOfFriends/index
    VELOCIraptorInterface/index
    LineOfSights/index
+   LightCones/index
    EquationOfState/index
    ExternalPotentials/index
    Neutrinos/index
diff --git a/examples/Cooling/CoolingRates/Makefile.am b/examples/Cooling/CoolingRates/Makefile.am
index 7fa7d5f6cad1f3a8c5512722d5afe3c994e1619f..81353af35b26b53aacfc29550bdbe5d536867f69 100644
--- a/examples/Cooling/CoolingRates/Makefile.am
+++ b/examples/Cooling/CoolingRates/Makefile.am
@@ -15,12 +15,12 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
-AM_CFLAGS = -I$(top_srcdir)/src -I$(top_builddir)/examples $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(OPENMP_CFLAGS)
+AM_CFLAGS = -I$(top_srcdir)/src -I$(top_builddir)/examples $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(OPENMP_CFLAGS) $(CHEALPIX_CFLAGS)
 
 AM_LDFLAGS = $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
 
 # Extra libraries.
-EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(VELOCIRAPTOR_LIBS) $(GSL_LIBS)
+EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(VELOCIRAPTOR_LIBS) $(GSL_LIBS) $(CHEALPIX_LIBS)
 
 # Programs.
 bin_PROGRAMS = cooling_rates
diff --git a/examples/Makefile.am b/examples/Makefile.am
index f707edcadb6be63f4cf116bd721d73659bbe653a..b66494dff5ec9d0077b5a383ff6f2488d4b7e6dd 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -20,13 +20,15 @@ MYFLAGS =
 
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
 AM_CFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/argparse $(HDF5_CPPFLAGS) \
-	$(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS) $(OPENMP_CFLAGS)
+	$(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS) $(OPENMP_CFLAGS) \
+	$(CHEALPIX_CFLAGS)
 
 AM_LDFLAGS = $(HDF5_LDFLAGS)
 
 # Extra libraries.
 EXTRA_LIBS = $(GSL_LIBS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) \
-	$(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS)
+	$(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) \
+	$(CHEALPIX_LIBS)
 
 # MPI libraries.
 MPI_LIBS = $(PARMETIS_LIBS) $(METIS_LIBS) $(MPI_THREAD_LIBS) $(FFTW_MPI_LIBS)
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml
index 0731480156788a424491d7a824ca415eace48576..1f93830629811b557705427204520b8c8db9ce53 100644
--- a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml
@@ -41,6 +41,7 @@ SPH:
 
 # Parameters governing the snapshots
 Snapshots:
+  subdir:              snapshots
   basename:            snap
   delta_time:          1.02
   scale_factor_first:  0.02
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/README b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/README
new file mode 100644
index 0000000000000000000000000000000000000000..39e3b7dee1c3450ced4a05c23d93e7d68c0cd72d
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/README
@@ -0,0 +1,26 @@
+Small LCDM cosmological simulation generated by C. Power. Cosmology
+is WMAP9 and the box is 100Mpc/h in size with 64^3 particles.
+We use a softening length of 1/25th of the mean inter-particle separation.
+
+The ICs have been generated to run with Gadget-2 so we need to switch
+on the options to cancel the h-factors and a-factors at reading time.
+We generate gas from the ICs using SWIFT's internal mechanism and set the
+temperature to the expected gas temperature at this redshift.
+
+This example is intended to be run with the EAGLE-XL model in order to
+produce lightcone outputs including gas, stars and black holes. Note 
+that the resulting output will not be at all realistic due to the extremely
+poor mass resolution and the minimum overdensity for star formation has
+to be be reduced to allow any star formation.
+
+To configure the code appropriately, the following flags should be included:
+
+./configure \
+    --with-hydro=sphenix \
+    --with-subgrid=EAGLE-XL \
+    --with-kernel=wendland-C2 \
+    --with-chealpix \
+    --enable-lightcone
+
+MD5 checksum of the ICs:
+08736c3101fd738e22f5159f78e6022b  small_cosmo_volume.hdf5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getColibreCoolingTables.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getColibreCoolingTables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..20a3ee7257b1a9064c268fe1ac20640ad654e98c
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getColibreCoolingTables.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/COLIBRE/UV_dust1_CR1_G1_shield1.hdf5
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getCoolingTables.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getCoolingTables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ecd581fd3dd44a13af1218d7dee6af72a25a324a
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getCoolingTables.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/EAGLE/coolingtables.tar.gz
+tar -xvzf coolingtables.tar.gz
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEaglePhotometryTable.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEaglePhotometryTable.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ee9c3b422f19518612416da0913b162fd4a120ff
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEaglePhotometryTable.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/YieldTables/EAGLE/photometry.tar.gz
+tar -xf photometry.tar.gz
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEagleYieldTable.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEagleYieldTable.sh
new file mode 100755
index 0000000000000000000000000000000000000000..26eef020cab82acee2c80e88089df1790b281eab
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getEagleYieldTable.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/YieldTables/EAGLE/yieldtables.tar.gz
+tar -xf yieldtables.tar.gz
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getIC.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3b8136cc5aca00a25792655c6c505cfeeb0f2bc9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getIC.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/small_cosmo_volume.hdf5
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getXrayTables.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getXrayTables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..84fdc6d8785f0e746f2c7f363d3e17b58a5e40ac
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/getXrayTables.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/COLIBRE/X_Ray_tables.13072021.hdf5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/map_types.txt b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/map_types.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fef8b673f4f1ee1c690193451bb02aee4fb23523
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/map_types.txt
@@ -0,0 +1,16 @@
+TotalMass                        on
+DarkMatterMass                   on
+SmoothedGasMass                  on
+UnsmoothedGasMass                on
+StellarMass                      on
+BlackHoleMass                    on
+StarFormationRate                on
+XrayErositaLowIntrinsicPhotons   on
+XrayErositaLowIntrinsicEnergies  on
+XrayErositaHighIntrinsicPhotons  on
+XrayErositaHighIntrinsicEnergies on
+XrayROSATIntrinsicPhotons        on
+XrayROSATIntrinsicEnergies       on
+ComptonY                         on
+DopplerB                         on
+DM                               on
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotRhoT.py b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotRhoT.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f02213ec2a66700d28ad5f8e57e00c30f3019d7
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotRhoT.py
@@ -0,0 +1,163 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+import matplotlib
+
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 9,
+    "legend.fontsize": 9,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "text.usetex": True,
+    "figure.figsize": (3.15, 3.15),
+    "figure.subplot.left": 0.15,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.13,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.15,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 2.0,
+    "text.latex.unicode": True,
+}
+rcParams.update(params)
+rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+snap = int(sys.argv[1])
+
+# Read the simulation data
+sim = h5py.File("snap_%04d.hdf5" % snap, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+z = sim["/Cosmology"].attrs["Redshift"][0]
+a = sim["/Cosmology"].attrs["Scale-factor"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs[
+    "Hydrogen ionization transition temperature"
+][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal ean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4.0 / (8.0 - 5.0 * (1.0 - H_frac))
+    else:
+        return 4.0 / (1.0 + 3.0 * H_frac)
+
+
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.0) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = T_over_mu > (T_trans + 1) / mu(T_trans + 1, H_frac, T_trans)
+    if np.sum(mask_ionized) > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans * 10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = T_over_mu < (T_trans - 1) / mu((T_trans - 1), H_frac, T_trans)
+    if np.sum(mask_neutral) > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+
+    return ret
+
+
+rho = sim["/PartType0/Densities"][:]
+u = sim["/PartType0/InternalEnergies"][:]
+
+# Compute the temperature
+u *= unit_length_in_si ** 2 / unit_time_in_si ** 2
+u /= a ** (3 * (gas_gamma - 1.0))
+Temp = T(u)
+
+# Compute the physical density
+rho *= unit_mass_in_cgs / unit_length_in_cgs ** 3
+rho /= a ** 3
+rho /= mH_in_kg
+
+# Life is better in log-space
+log_T = np.log10(Temp)
+log_rho = np.log10(rho)
+
+
+# Make a 2D histogram
+log_rho_min = -6
+log_rho_max = 3
+log_T_min = 1
+log_T_max = 8
+
+bins_x = np.linspace(log_rho_min, log_rho_max, 54)
+bins_y = np.linspace(log_T_min, log_T_max, 54)
+H, _, _ = histogram2d(log_rho, log_T, bins=[bins_x, bins_y], normed=True)
+
+
+# Plot the interesting quantities
+figure()
+
+pcolormesh(bins_x, bins_y, np.log10(H).T)
+
+text(-5, 8.0, "$z=%.2f$" % z)
+
+xticks(
+    [-5, -4, -3, -2, -1, 0, 1, 2, 3],
+    ["", "$10^{-4}$", "", "$10^{-2}$", "", "$10^0$", "", "$10^2$", ""],
+)
+yticks(
+    [2, 3, 4, 5, 6, 7, 8], ["$10^{2}$", "", "$10^{4}$", "", "$10^{6}$", "", "$10^8$"]
+)
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=2)
+xlim(-5.2, 3.2)
+ylim(1, 8.5)
+
+savefig("rhoT_%04d.png" % snap, dpi=200)
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotTempEvolution.py b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotTempEvolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e8cf9ea1082372d8e395c352f908c7ce693d99f
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plotTempEvolution.py
@@ -0,0 +1,195 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+# Number of snapshots generated
+n_snapshots = 200
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 9,
+'legend.fontsize': 9,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (3.15,3.15),
+'figure.subplot.left'    : 0.14,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.12,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 2.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+# Read the simulation data
+sim = h5py.File("snap_0000.hdf5", "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs["Hydrogen ionization transition temperature"][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+cooling_model = sim["/SubgridScheme"].attrs["Cooling Model"]
+
+if cooling_model == "Constant Lambda":
+    Lambda = sim["/SubgridScheme"].attrs["Lambda/n_H^2 [cgs]"][0]   
+    
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal mean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4. / (8. - 5. * (1. - H_frac))
+    else:
+        return 4. / (1. + 3. * H_frac)
+    
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = (T_over_mu > (T_trans+1) / mu(T_trans+1, H_frac, T_trans))
+    if np.sum(mask_ionized)  > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans*10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = (T_over_mu < (T_trans-1) / mu((T_trans-1), H_frac, T_trans))
+    if np.sum(mask_neutral)  > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+        
+    return ret
+
+z = np.zeros(n_snapshots)
+a = np.zeros(n_snapshots)
+T_mean = np.zeros(n_snapshots)
+T_std = np.zeros(n_snapshots)
+T_log_mean = np.zeros(n_snapshots)
+T_log_std = np.zeros(n_snapshots)
+T_median = np.zeros(n_snapshots)
+T_min = np.zeros(n_snapshots)
+T_max = np.zeros(n_snapshots)
+
+# Loop over all the snapshots
+for i in range(n_snapshots):
+    sim = h5py.File("snap_%04d.hdf5"%i, "r")
+
+    z[i] = sim["/Cosmology"].attrs["Redshift"][0]
+    a[i] = sim["/Cosmology"].attrs["Scale-factor"][0]
+
+    u = sim["/PartType0/InternalEnergies"][:]
+
+    # Compute the temperature
+    u *= (unit_length_in_si**2 / unit_time_in_si**2)
+    u /= a[i]**(3 * (gas_gamma - 1.))
+    Temp = T(u)
+
+    # Gather statistics
+    T_median[i] = np.median(Temp)
+    T_mean[i] = Temp.mean()
+    T_std[i] = Temp.std()
+    T_log_mean[i] = np.log10(Temp).mean()
+    T_log_std[i] = np.log10(Temp).std()
+    T_min[i] = Temp.min()
+    T_max[i] = Temp.max()
+
+# CMB evolution
+a_evol = np.logspace(-3, 0, 60)
+T_cmb = (1. / a_evol)**2 * 2.72
+
+# Plot the interesting quantities
+figure()
+subplot(111, xscale="log", yscale="log")
+
+fill_between(a, T_mean-T_std, T_mean+T_std, color='C0', alpha=0.1)
+plot(a, T_max, ls='-.', color='C0', lw=1., label="${\\rm max}~T$")
+plot(a, T_min, ls=':', color='C0', lw=1., label="${\\rm min}~T$")
+plot(a, T_mean, color='C0', label="${\\rm mean}~T$", lw=1.5)
+fill_between(a, 10**(T_log_mean-T_log_std), 10**(T_log_mean+T_log_std), color='C1', alpha=0.1)
+plot(a, 10**T_log_mean, color='C1', label="${\\rm mean}~{\\rm log} T$", lw=1.5)
+plot(a, T_median, color='C2', label="${\\rm median}~T$", lw=1.5)
+
+legend(loc="upper left", frameon=False, handlelength=1.5)
+
+# Cooling model
+if cooling_model == "Constant Lambda":
+    text(1e-2, 6e4, "$\Lambda_{\\rm const}/n_{\\rm H}^2 = %.1f\\times10^{%d}~[\\rm{cgs}]$"%(Lambda/10.**(int(log10(Lambda))), log10(Lambda)), fontsize=7)
+elif cooling_model == "EAGLE":
+    text(1e-2, 6e4, "EAGLE (Wiersma et al. 2009)")
+elif cooling_model == b"Grackle":
+    text(1e-2, 6e4, "Grackle (Smith et al. 2016)")
+else:
+    text(1e-2, 6e4, "No cooling")
+    
+# Expected lines
+plot([1e-10, 1e10], [H_transition_temp, H_transition_temp], 'k--', lw=0.5, alpha=0.7)
+text(2.5e-2, H_transition_temp*1.07, "$T_{\\rm HII\\rightarrow HI}$", va="bottom", alpha=0.7, fontsize=8)
+plot([1e-10, 1e10], [T_minimal, T_minimal], 'k--', lw=0.5, alpha=0.7)
+text(1e-2, T_minimal*0.8, "$T_{\\rm min}$", va="top", alpha=0.7, fontsize=8)
+plot(a_evol, T_cmb, 'k--', lw=0.5, alpha=0.7)
+text(a_evol[20], T_cmb[20]*0.55, "$(1+z)^2\\times T_{\\rm CMB,0}$", rotation=-34, alpha=0.7, fontsize=8, va="top", bbox=dict(facecolor='w', edgecolor='none', pad=1.0, alpha=0.9))
+
+
+redshift_ticks = np.array([0., 1., 2., 5., 10., 20., 50., 100.])
+redshift_labels = ["$0$", "$1$", "$2$", "$5$", "$10$", "$20$", "$50$", "$100$"]
+a_ticks = 1. / (redshift_ticks + 1.)
+
+xticks(a_ticks, redshift_labels)
+minorticks_off()
+
+xlabel("${\\rm Redshift}~z$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=0)
+xlim(9e-3, 1.1)
+ylim(5, 2.5e7)
+
+savefig("Temperature_evolution.png", dpi=200)
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plot_healpix_map.py b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plot_healpix_map.py
new file mode 100644
index 0000000000000000000000000000000000000000..15b010b2a198bdc1730b7f57829c7d3cd8c75dec
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/plot_healpix_map.py
@@ -0,0 +1,25 @@
+#!/bin/env python
+
+import h5py
+import numpy as np
+import healpy as hp
+
+import read_lightcone as rl
+
+plt.subplot(2,2,1)
+totalmass_map = rl.read_map("./lightcones/", "lightcone0", shell_nr=0, map_name="TotalMass")
+hp.mollview(totalmass_map+1, norm="log", title="Projected mass", hold=True)
+
+plt.subplot(2,2,2)
+gasmass_map = rl.read_map("./lightcones/", "lightcone0", shell_nr=0, map_name="SmoothedGasMass")
+hp.mollview(gasmass_map+1, norm="log", title="Gas mass", hold=True)
+
+plt.subplot(2,2,3)
+stellarmass_map = rl.read_map("./lightcones/", "lightcone0", shell_nr=0, map_name="StellarMass")
+hp.mollview(stellarmass_map+1, norm="log", title="Stellar mass", hold=True)
+
+plt.subplot(2,2,4)
+xray_map = rl.read_map("./lightcones/", "lightcone0", shell_nr=0, map_name="XrayROSATIntrinsicPhotons")
+hp.mollview(xray_map+1e50, norm="log", title="ROSAT photons", hold=True)
+
+plt.suptitle("SmallCosmoVolume lightcone")
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/read_lightcone.py b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/read_lightcone.py
new file mode 100644
index 0000000000000000000000000000000000000000..60b25c62009de88f1462495451ff068300ca954c
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/read_lightcone.py
@@ -0,0 +1,66 @@
+#!/bin/env python
+
+import h5py
+import numpy as np
+import healpy as hp
+
+
+def read_map(basedir, basename, shell_nr, map_name, return_sum=False):
+    """
+    Read the specified healpix map for a lightcone shell
+    """
+
+    # Open the index file to determine number of files to read
+    fname = "%s/%s_index.hdf5" % (basedir, basename)
+    with h5py.File(fname, "r") as infile:
+        nr_files_per_shell = infile["Lightcone"].attrs["nr_files_per_shell"][0]
+
+    # Read the pixel data
+    data = []
+    for file_nr in range(nr_files_per_shell):
+        fname = ("%s/%s_shells/shell_%d/%s.shell_%d.%d.hdf5" % 
+                 (basedir, basename, shell_nr, basename, shell_nr, file_nr))
+        with h5py.File(fname, "r") as infile:
+            data.append(infile[map_name][...])
+            if file_nr == 0 and return_sum:
+                expected_sum = infile[map_name].attrs["expected_sum"]
+
+    data = np.concatenate(data)
+
+    if return_sum:
+        return data, expected_sum
+    else:
+        return data
+
+
+def read_particles(basedir, basename, part_type, properties):
+    """
+    Read particle data from a lightcone
+    """
+
+    # Open the index file to determine number of files to read
+    fname = "%s/%s_index.hdf5" % (basedir, basename)
+    with h5py.File(fname, "r") as infile:
+        final_file_on_rank = infile["Lightcone"].attrs["final_particle_file_on_rank"]
+        nr_mpi_ranks = infile["Lightcone"].attrs["nr_mpi_ranks"][0]
+
+    # Make a dict to store the result
+    data = {prop_name : [] for prop_name in properties}
+
+    # Loop over MPI ranks
+    for rank_nr in range(nr_mpi_ranks):
+        # Loop over files written by this rank
+        for file_nr in range(final_file_on_rank[rank_nr]+1):
+            fname = "%s/%s_particles/%s_%04d.%d.hdf5" % (basedir, basename, basename, file_nr, rank_nr)
+            with h5py.File(fname, "r") as infile:
+                for prop_name in properties:
+                    if part_type in infile:
+                        data[prop_name].append(infile[part_type][prop_name][...])
+
+    # Combine arrays from files
+    for prop_name in properties:
+        data[prop_name] = np.concatenate(data[prop_name])
+
+    return data
+
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/run.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bd87ae715d751f83b92496ad8cdb2dfd930b0316
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/run.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e small_cosmo_volume.hdf5 ]
+then
+    echo "Fetching initial conditions for the small cosmological volume example..."
+    ./getIC.sh
+fi
+
+if [ ! -e UV_dust1_CR1_G1_shield1.hdf5 ]
+then
+    echo "Fetching cooling tables for the small cosmological volume example..."
+    ./getColibreCoolingTables.sh
+fi
+
+if [ ! -e photometry ]
+then
+    echo "Fetching photometry tables for the small cosmological volume example..."
+    ./getEaglePhotometryTable.sh
+fi
+
+if [ ! -e yieldtables ]
+then
+    echo "Fetching yield tables for the small cosmological volume example..."
+    ./getEagleYieldTable.sh
+fi
+
+if [ ! -e X_Ray_tables.13072021.hdf5 ]
+then
+    echo "Fetching X ray tables for the small cosmological volume example..."
+    ./getXrayTables.sh
+fi
+
+
+# Run SWIFT
+../../swift --cosmology --eagle --lightcone --pin --threads=8 \
+    small_cosmo_volume.yml 2>&1 | tee output.log
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/shell_redshifts.txt b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/shell_redshifts.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2922d99dc924b2a2bf9e2dec1979c8fcfea792a2
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/shell_redshifts.txt
@@ -0,0 +1,3 @@
+# Minimum redshift, Maximum redshift
+0.0,  0.05
+0.05, 0.25
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/small_cosmo_volume.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/small_cosmo_volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9bf9901583337abacdba2322ec33ac3c4eb7ef50
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_lightcone/small_cosmo_volume.yml
@@ -0,0 +1,289 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98841e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+Cosmology:                      # WMAP9 cosmology
+  Omega_cdm:      0.2305
+  Omega_lambda:   0.724
+  Omega_b:        0.0455
+  h:              0.703
+  a_begin:        0.019607843	# z_ini = 50.
+  a_end:          1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-8
+  dt_max:     1e-2 
+
+# Parameters governing the snapshots
+Snapshots:
+  subdir:              snapshots
+  basename:            snap
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.01
+  scale_factor_first:  0.02
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:          0.025
+  MAC:          adaptive
+  theta_cr:     0.7
+  epsilon_fmm:  0.001
+  comoving_DM_softening:         0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_DM_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  comoving_baryon_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_baryon_softening: 0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  mesh_side_length:       64
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:                    1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:                       0.01     # Minimal smoothing length in units of softening.
+  h_max:                             5.0      # Maximal smoothing length in co-moving internal units.
+  CFL_condition:                     0.2      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:               100.0    # (internal units)
+  initial_temperature:               268.7    # (internal units)
+  particle_splitting:                1        # Particle splitting is ON
+  particle_splitting_mass_threshold: 23.07    # (internal units, ~ 4x initial gas particle mass)
+
+# Parameters of the stars neighbour search
+Stars:
+  resolution_eta:        1.1642   # Target smoothing length in units of the mean inter-particle separation
+  h_tolerance:           7e-3
+  luminosity_filename:   ./photometry
+
+# Parameters for the Friends-Of-Friends algorithm
+FOF:
+  basename:                        fof_output  # Filename for the FOF outputs.
+  min_group_size:                  32          # The minimum no. of particles required for a group.
+  linking_length_ratio:            0.2         # Linking length in units of the main inter-particle separation.
+  seed_black_holes_enabled:        1           # Enable seeding of black holes in FoF groups
+  black_hole_seed_halo_mass_Msun:  1.0e10      # Minimal halo mass in which to seed a black hole (in solar masses).
+  scale_factor_first:              0.05        # Scale-factor of first FoF black hole seeding calls.
+  delta_time:                      1.00751     # Scale-factor ratio between consecutive FoF black hole seeding calls.
+
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+
+Restarts:
+  onexit:       1
+  delta_hours:  6.0
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  small_cosmo_volume.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
+  generate_gas_in_ics:         1    # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths:   1    # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Impose primoridal metallicity
+EAGLEChemistry:
+  init_abundance_metal:     0.
+  init_abundance_Hydrogen:  0.752
+  init_abundance_Helium:    0.248
+  init_abundance_Carbon:    0.0
+  init_abundance_Nitrogen:  0.0
+  init_abundance_Oxygen:    0.0
+  init_abundance_Neon:      0.0
+  init_abundance_Magnesium: 0.0
+  init_abundance_Silicon:   0.0
+  init_abundance_Iron:      0.0
+
+# EAGLE cooling parameters
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               7.5                 # Planck 2018
+  H_reion_eV_p_H:          2.0
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# COLIBRE cooling parameters
+COLIBRECooling:
+  dir_name:                ./UV_dust1_CR1_G1_shield1.hdf5 # Location of the cooling tables
+  H_reion_z:               7.5               # Redshift of Hydrogen re-ionization (Planck 2018)
+  H_reion_eV_p_H:          2.0
+  He_reion_z_centre:       3.5               # Redshift of the centre of the Helium re-ionization Gaussian
+  He_reion_z_sigma:        0.5               # Spread in redshift of the  Helium re-ionization Gaussian
+  He_reion_eV_p_H:         2.0               # Energy inject by Helium re-ionization in electron-volt per Hydrogen atom
+  delta_logTEOS_subgrid_properties: 0.3      # delta log T above the EOS below which the subgrid properties use Teq assumption
+  rapid_cooling_threshold:          0.333333 # Switch to rapid cooling regime for dt / t_cool above this threshold.
+
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  SF_threshold:                      Subgrid      # Zdep (Schaye 2004) or Subgrid
+  SF_model:                          PressureLaw  # PressureLaw (Schaye et al. 2008) or SchmidtLaw
+  KS_normalisation:                  1.515e-4     # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4          # The exponent of the Kennicutt-Schmidt law.
+  min_over_density:                  100.0        # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e8          # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0          # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  EOS_entropy_margin_dex:            0.3          # When using Z-based SF threshold, logarithm base 10 of the maximal entropy above the EOS at which stars can form.
+  threshold_norm_H_p_cm3:            0.1          # When using Z-based SF threshold, normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002        # When using Z-based SF threshold, reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64        # When using Z-based SF threshold, slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0         # When using Z-based SF threshold, maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_temperature1_K:          1000         # When using subgrid-based SF threshold, subgrid temperature below which gas is star-forming.
+  threshold_temperature2_K:          31622        # When using subgrid-based SF threshold, subgrid temperature below which gas is star-forming if also above the density limit.
+  threshold_number_density_H_p_cm3:  10           # When using subgrid-based SF threshold, subgrid number density above which gas is star-forming if also below the second temperature limit.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 1e-4      # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        800       # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        10.        # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin. (NOTE: This is below the min T and hence this floor does nothing)
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
+
+# EAGLE feedback model
+EAGLEFeedback:
+  use_SNII_feedback:                    1               # Global switch for SNII thermal (stochastic) feedback.
+  use_SNIa_feedback:                    1               # Global switch for SNIa thermal (continuous) feedback.
+  use_AGB_enrichment:                   1               # Global switch for enrichement from AGB stars.
+  use_SNII_enrichment:                  1               # Global switch for enrichement from SNII stars.
+  use_SNIa_enrichment:                  1               # Global switch for enrichement from SNIa stars.
+  filename:                             ./yieldtables/  # Path to the directory containing the EAGLE yield tables.
+  IMF_min_mass_Msun:                    0.1             # Minimal stellar mass considered for the Chabrier IMF in solar masses.
+  IMF_max_mass_Msun:                  100.0             # Maximal stellar mass considered for the Chabrier IMF in solar masses.
+  SNII_min_mass_Msun:                   8.0             # Minimal mass considered for SNII stars in solar masses.
+  SNII_max_mass_Msun:                 100.0             # Maximal mass considered for SNII stars in solar masses.
+  SNII_feedback_model:                  MinimumDistance # Feedback modes: Random, Isotropic, MinimumDistance, MinimumDensity
+  SNII_sampled_delay:                   1               # Sample the SNII lifetimes to do feedback.
+  SNII_delta_T_K:                       3.16228e7       # Change in temperature to apply to the gas particle in a SNII thermal feedback event in Kelvin.
+  SNII_energy_erg:                      1.0e51          # Energy of one SNII explosion in ergs.
+  SNII_energy_fraction_function:        Independent     # Type of functional form to use for scaling the energy fraction with density and metallicity ('EAGLE', 'Separable', or 'Independent').
+  SNII_energy_fraction_min:             0.5             # Minimal fraction of energy applied in a SNII feedback event.
+  SNII_energy_fraction_max:             1.0             # Maximal fraction of energy applied in a SNII feedback event.
+  SNII_energy_fraction_delta_E_n:       6.0             # Maximal energy increase due to high density (only used if SNII_energy_fraction_function is 'Independent').
+  SNII_energy_fraction_Z_0:             0.0012663729    # Pivot point for the metallicity dependance of the SNII energy fraction (metal mass fraction).
+  SNII_energy_fraction_n_0_H_p_cm3:     1.4588          # Pivot point for the birth density dependance of the SNII energy fraction in cm^-3.
+  SNII_energy_fraction_n_Z:             0.8686          # Power-law for the metallicity dependance of the SNII energy fraction.
+  SNII_energy_fraction_n_n:             0.8686          # Power-law for the birth density dependance of the SNII energy fraction.
+  SNII_energy_fraction_use_birth_density: 0             # Are we using the density at birth to compute f_E or at feedback time?
+  SNII_energy_fraction_use_birth_metallicity: 0         # Are we using the metallicity at birth to compuote f_E or at feedback time?
+  SNIa_DTD:                             Exponential     # Functional form of the SNIa delay time distribution.
+  SNIa_DTD_delay_Gyr:                   0.04            # Stellar age after which SNIa start in Gyr (40 Myr corresponds to stars ~ 8 Msun).
+  SNIa_DTD_exp_timescale_Gyr:           2.0             # Time-scale of the exponential decay of the SNIa rates in Gyr.
+  SNIa_DTD_exp_norm_p_Msun:             0.002           # Normalisation of the SNIa rates in inverse solar masses.
+  SNIa_energy_erg:                     1.0e51           # Energy of one SNIa explosion in ergs.
+  AGB_ejecta_velocity_km_p_s:          10.0             # Velocity of the AGB ejectas in km/s.
+  stellar_evolution_age_cut_Gyr:        0.1             # Stellar age in Gyr above which the enrichment is down-sampled.
+  stellar_evolution_sampling_rate:       10             # Number of time-steps in-between two enrichment events for a star above the age threshold.
+  SNII_yield_factor_Hydrogen:           1.0             # (Optional) Correction factor to apply to the Hydrogen yield from the SNII channel.
+  SNII_yield_factor_Helium:             1.0             # (Optional) Correction factor to apply to the Helium yield from the SNII channel.
+  SNII_yield_factor_Carbon:             0.5             # (Optional) Correction factor to apply to the Carbon yield from the SNII channel.
+  SNII_yield_factor_Nitrogen:           1.0             # (Optional) Correction factor to apply to the Nitrogen yield from the SNII channel.
+  SNII_yield_factor_Oxygen:             1.0             # (Optional) Correction factor to apply to the Oxygen yield from the SNII channel.
+  SNII_yield_factor_Neon:               1.0             # (Optional) Correction factor to apply to the Neon yield from the SNII channel.
+  SNII_yield_factor_Magnesium:          2.0             # (Optional) Correction factor to apply to the Magnesium yield from the SNII channel.
+  SNII_yield_factor_Silicon:            1.0             # (Optional) Correction factor to apply to the Silicon yield from the SNII channel.
+  SNII_yield_factor_Iron:               0.5             # (Optional) Correction factor to apply to the Iron yield from the SNII channel.
+
+# EAGLE AGN model
+EAGLEAGN:
+  subgrid_seed_mass_Msun:             1.0e4           # Black hole subgrid mass at creation time in solar masses.
+  use_multi_phase_bondi:              0               # Compute Bondi rates per neighbour particle?
+  use_subgrid_bondi:                  0               # Compute Bondi rates using the subgrid extrapolation of the gas properties around the BH?
+  with_angmom_limiter:                0               # Are we applying the Rosas-Guevara et al. (2015) viscous time-scale reduction term?
+  viscous_alpha:                      1e6             # Normalisation constant of the viscous time-scale in the accretion reduction term
+  with_boost_factor:                  0               # Are we using the model from Booth & Schaye (2009)?
+  boost_alpha_only:                   0               # If using the boost factor, are we using a constant boost only?
+  boost_alpha:                        1.              # Lowest value for the accretion effeciency for the Booth & Schaye 2009 accretion model.
+  boost_beta:                         2.              # Slope of the power law for the Booth & Schaye 2009 model, set beta to zero for constant alpha models.
+  boost_n_h_star_H_p_cm3:             0.1             # Normalization of the power law for the Booth & Schaye 2009 model in cgs (cm^-3).
+  with_fixed_T_near_EoS:              0               # Are we using a fixed temperature to compute the sound-speed of gas on the entropy floor in the Bondy-Hoyle accretion term?
+  fixed_T_above_EoS_dex:              0.3             # Distance above the entropy floor for which we use a fixed sound-speed
+  fixed_T_near_EoS_K:                 8000            # Fixed temperature assumed to compute the sound-speed of gas on the entropy floor in the Bondy-Hoyle accretion term
+  radiative_efficiency:               0.1             # Fraction of the accreted mass that gets radiated.
+  use_nibbling:                       1               # Continuously transfer small amounts of mass from all gas neighbours to a black hole [1] or stochastically swallow whole gas particles [0]?
+  min_gas_mass_for_nibbling_Msun:     7.2e6           # Minimum mass for a gas particle to be nibbled from [M_Sun]. Only used if use_nibbling is 1.
+  max_eddington_fraction:             1.              # Maximal allowed accretion rate in units of the Eddington rate.
+  eddington_fraction_for_recording:   0.1             # Record the last time BHs reached an Eddington ratio above this threshold.
+  coupling_efficiency:                0.1             # Fraction of the radiated energy that couples to the gas in feedback events.
+  AGN_feedback_model:                 MinimumDistance # Feedback modes: Random, Isotropic, MinimumDistance, MinimumDensity
+  AGN_use_deterministic_feedback:     1               # Deterministic (reservoir) [1] or stochastic [0] AGN feedback?
+  use_variable_delta_T:               1               # Switch to enable adaptive calculation of AGN dT [1], rather than using a constant value [0].
+  AGN_with_locally_adaptive_delta_T:  1               # Switch to enable additional dependence of AGN dT on local gas density and temperature (only used if use_variable_delta_T is 1).
+  AGN_delta_T_mass_norm:              3e8             # Normalisation temperature of AGN dT scaling with BH subgrid mass [K] (only used if use_variable_delta_T is 1).
+  AGN_delta_T_mass_reference:         1e8             # BH subgrid mass at which the normalisation temperature set above applies [M_Sun] (only used if use_variable_delta_T is 1).
+  AGN_delta_T_mass_exponent:          0.666667        # Power-law index of AGN dT scaling with BH subgrid mass (only used if use_variable_delta_T is 1).
+  AGN_delta_T_crit_factor:            1.0             # Multiple of critical dT for numerical efficiency (Dalla Vecchia & Schaye 2012) to use as dT floor (only used if use_variable_delta_T and AGN_with_locally_adaptive_delta_T are both 1).
+  AGN_delta_T_background_factor:      0.0             # Multiple of local gas temperature to use as dT floor (only used if use_variable_delta_T and AGN_with_locally_adaptive_delta_T are both 1).
+  AGN_delta_T_min:                    1e7             # Minimum allowed value of AGN dT [K] (only used if use_variable_delta_T is 1).
+  AGN_delta_T_max:                    3e9             # Maximum allowed value of AGN dT [K] (only used if use_variable_delta_T is 1).
+  AGN_delta_T_K:                      3.16228e8       # Change in temperature to apply to the gas particle in an AGN feedback event [K] (used if use_variable_delta_T is 0 or AGN_use_nheat_with_fixed_dT is 1 AND to initialise the BHs).
+  AGN_use_nheat_with_fixed_dT:        0               # Switch to use the constant AGN dT, rather than the adaptive one, for calculating the energy reservoir threshold.
+  AGN_use_adaptive_energy_reservoir_threshold: 0      # Switch to calculate an adaptive AGN energy reservoir threshold.
+  AGN_num_ngb_to_heat:                1.              # Target number of gas neighbours to heat in an AGN feedback event (only used if AGN_use_adaptive_energy_reservoir_threshold is 0).
+  max_reposition_mass:                1e20            # Maximal BH mass considered for BH repositioning in solar masses (large number implies we always reposition).
+  max_reposition_distance_ratio:      3.0             # Maximal distance a BH can be repositioned, in units of the softening length.
+  with_reposition_velocity_threshold: 0               # Should we only reposition to particles that move slowly w.r.t. the black hole?
+  max_reposition_velocity_ratio:      0.5             # Maximal velocity offset of a particle to reposition a BH to, in units of the ambient sound speed of the BH. Only meaningful if with_reposition_velocity_threshold is 1.
+  min_reposition_velocity_threshold: -1.0             # Minimal value of the velocity threshold for repositioning [km/s], set to < 0 for no effect. Only meaningful if with_reposition_velocity_threshold is 1.
+  set_reposition_speed:               0               # Should we reposition black holes with (at most) a prescribed speed towards the potential minimum?
+  threshold_major_merger:             0.333           # Mass ratio threshold to consider a BH merger as 'major'
+  threshold_minor_merger:             0.1             # Mass ratio threshold to consider a BH merger as 'minor'
+  merger_threshold_type:              DynamicalEscapeVelocity
+  merger_max_distance_ratio:          3.0             # Maximal distance over which two BHs can merge, in units of the softening length.
+  minimum_timestep_Myr:               0.1             # Minimum of the accretion-limited time-step length.
+  with_potential_correction:          1
+
+XrayEmissivity:
+  xray_table_path:         ./X_Ray_tables.13072021.hdf5   # Path to the X-ray emissivity tables
+
+# Parameters common to all lightcones
+LightconeCommon:
+
+  subdir:            lightcones   # All lightcone output is written to this directory
+  buffer_chunk_size: 10000        # Particles and map updates are buffered in a linked list of chunks of this size
+
+  z_range_for_DM:     [0.0, 0.05] # Output redshift range for dark matter
+  z_range_for_Gas:    [0.0, 0.05] # Output redshift range for gas
+  z_range_for_Stars:  [0.0, 0.05] # Output redshift range for stars
+  z_range_for_BH:     [0.0, 0.05] # Output redshift range for black holes
+
+  max_particles_buffered: 100000  # Output particles if buffer size reaches this value
+  max_updates_buffered:   100000  # Flush map updates if buffer size reaches this value
+  hdf5_chunk_size:        16384   # Chunk size for HDF5 particle and healpix map datasets
+
+  nside:                512                    # Healpix resolution parameter
+  radius_file:          ./shell_redshifts.txt  # Redshifts of shells for healpix maps
+  max_map_update_send_size_mb: 16.0            # Apply map updates over mutliple iterations to limit memory overhead
+  map_names_file:       ./map_types.txt        # List of types of healpix maps to make
+
+  distributed_maps:   1           # Split maps over multiple files (1) or use collective I/O to write one file (0)
+
+  particles_lossy_compression: 0  # Apply lossy compression to lightcone particles
+  particles_gzip_level:        6  # Apply lossless (deflate) compression to lightcone particles
+  maps_gzip_level:             6  # Apply lossless (deflate) compression to healpix maps
+
+# Parameters specific to lightcone 0
+Lightcone0:
+  enabled: 1                                           # Enable this lightcone
+  basename: lightcone0                                 # Base name of this lighcone's output files
+  observer_position: [35.561875, 35.561875, 35.561875] # Location of the observer in this lightcone
+
+# Parameters specific to lightcone 1
+Lightcone1:
+  enabled: 1
+  basename: lightcone1
+  observer_position: [35.561875, 35.561875, 35.561875]
+
+  gas_filtering_enabled:      1      # Enable filtering out of certain gas particles from particle outputs
+  min_z_for_gas_filtering:    0.025  # Filter gas particles above this redshift, output all below
+  min_temp_for_filtered_gas:  1.0e5  # Above min_z_for_gas_filtering only output gas with temperature above this
+  min_nh_for_filtered_gas:    1.0e-6 # Above min_z_for_gas_filtering only output gas with nh/(1+z)^4 above this
diff --git a/examples/main.c b/examples/main.c
index a4909ffc39036b39f9e87fd5d86cda6edaef4681..2b6027dbc24ddd755cda36f4859e5d0be821250e 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[]) {
   struct cooling_function_data cooling_func;
   struct cosmology cosmo;
   struct external_potential potential;
+  struct extra_io_properties extra_io_props;
   struct star_formation starform;
   struct pm_mesh mesh;
   struct gpart *gparts = NULL;
@@ -104,6 +105,7 @@ int main(int argc, char *argv[]) {
   struct entropy_floor_properties entropy_floor;
   struct black_holes_props black_holes_properties;
   struct fof_props fof_properties;
+  struct lightcone_array_props lightcone_array_properties;
   struct part *parts = NULL;
   struct phys_const prog_const;
   struct space s;
@@ -168,6 +170,7 @@ int main(int argc, char *argv[]) {
   int with_hydro = 0;
   int with_stars = 0;
   int with_fof = 0;
+  int with_lightcone = 0;
   int with_star_formation = 0;
   int with_feedback = 0;
   int with_black_holes = 0;
@@ -237,6 +240,9 @@ int main(int argc, char *argv[]) {
           'u', "fof", &with_fof,
           "Run Friends-of-Friends algorithm to perform black hole seeding.",
           NULL, 0, 0),
+
+      OPT_BOOLEAN(0, "lightcone", &with_lightcone,
+                  "Generate lightcone outputs.", NULL, 0, 0),
       OPT_BOOLEAN('x', "velociraptor", &with_structure_finding,
                   "Run with structure finding.", NULL, 0, 0),
       OPT_BOOLEAN(0, "line-of-sight", &with_line_of_sight,
@@ -539,6 +545,14 @@ int main(int argc, char *argv[]) {
     return 1;
   }
 
+  if (with_lightcone) {
+#ifndef WITH_LIGHTCONE
+    error("Running with lightcone output but compiled without it!");
+#endif
+    if (!with_cosmology)
+      error("Error: cannot make lightcones without --cosmology.");
+  }
+
   if (!with_stars && with_star_formation) {
     if (myrank == 0) {
       argparse_usage(&argparse);
@@ -1133,6 +1147,10 @@ int main(int argc, char *argv[]) {
     chemistry_init(params, &us, &prog_const, &chemistry);
     if (myrank == 0) chemistry_print(&chemistry);
 
+    /* Initialise the extra i/o */
+    bzero(&extra_io_props, sizeof(struct extra_io_properties));
+    extra_io_init(params, &us, &prog_const, &cosmo, &extra_io_props);
+
     /* Initialise the FOF properties */
     bzero(&fof_properties, sizeof(struct fof_props));
 #ifdef WITH_FOF
@@ -1308,6 +1326,16 @@ int main(int argc, char *argv[]) {
     /* Initialise the line of sight properties. */
     if (with_line_of_sight) los_init(s.dim, &los_properties, params);
 
+    /* Initialise the lightcone properties */
+    bzero(&lightcone_array_properties, sizeof(struct lightcone_array_props));
+#ifdef WITH_LIGHTCONE
+    if (with_lightcone)
+      lightcone_array_init(&lightcone_array_properties, &s, &cosmo, params, &us,
+                           &prog_const, verbose);
+    else
+      lightcone_array_properties.nr_lightcones = 0;
+#endif
+
     if (myrank == 0) {
       clocks_gettime(&toc);
       message("space_init took %.3f %s.", clocks_diff(&tic, &toc),
@@ -1454,7 +1482,8 @@ int main(int argc, char *argv[]) {
                 &black_holes_properties, &sink_properties, &neutrino_properties,
                 &neutrino_response, &feedback_properties, &rt_properties, &mesh,
                 &potential, &cooling_func, &starform, &chemistry,
-                &fof_properties, &los_properties, &ics_metadata);
+                &extra_io_props, &fof_properties, &los_properties,
+                &lightcone_array_properties, &ics_metadata);
     engine_config(/*restart=*/0, /*fof=*/0, &e, params, nr_nodes, myrank,
                   nr_threads, nr_pool_threads, with_aff, talking, restart_file);
 
@@ -1789,6 +1818,16 @@ int main(int argc, char *argv[]) {
         velociraptor_invoke(&e, /*linked_with_snap=*/0);
     }
 #endif
+
+    /* Write out any remaining lightcone data at the end of the run */
+#ifdef WITH_LIGHTCONE
+    lightcone_array_flush(e.lightcone_array_properties, &(e.threadpool),
+                          e.cosmology, e.internal_units, e.snapshot_units,
+                          /*flush_map_updates=*/1, /*flush_particles=*/1,
+                          /*end_file=*/1, /*dump_all_shells=*/1);
+    lightcone_array_write_index(e.lightcone_array_properties, e.internal_units,
+                                e.snapshot_units);
+#endif
   }
 
   /* Remove the stop file if used. Do this anyway, we could have missed the
@@ -1813,7 +1852,9 @@ int main(int argc, char *argv[]) {
   if (with_stars) stars_props_clean(e.stars_properties);
   if (with_cooling || with_temperature) cooling_clean(e.cooling_func);
   if (with_feedback) feedback_clean(e.feedback_props);
+  if (with_lightcone) lightcone_array_clean(e.lightcone_array_properties);
   if (with_rt) rt_clean(e.rt_props, restart);
+  extra_io_clean(e.io_extra_props);
   engine_clean(&e, /*fof=*/0, restart);
   free(params);
   free(output_options);
diff --git a/examples/main_fof.c b/examples/main_fof.c
index 039b464e2b4c6f98895b7274af41def79aca6441..05aa9b377d8d0d627f4c744abe4ff3d728d8fac4 100644
--- a/examples/main_fof.c
+++ b/examples/main_fof.c
@@ -653,7 +653,8 @@ int main(int argc, char *argv[]) {
       /*neutrino_response=*/NULL, /*feedback_properties=*/NULL,
       /*rt_properties=*/NULL, &mesh, /*potential=*/NULL,
       /*cooling_func=*/NULL, /*starform=*/NULL, /*chemistry=*/NULL,
-      &fof_properties, /*los_properties=*/NULL, &ics_metadata);
+      /*extra_io_props=*/NULL, &fof_properties, /*los_properties=*/NULL,
+      /*lightcone_properties=*/NULL, &ics_metadata);
   engine_config(/*restart=*/0, /*fof=*/1, &e, params, nr_nodes, myrank,
                 nr_threads, nr_threads, with_aff, talking, NULL);
 
diff --git a/examples/parameter_example.yml b/examples/parameter_example.yml
index 8f7045d0e0ca46aca63794d2ad2d11159cee9ac0..f5ef01efd41dbd8305bf45d3348f338b4864e934 100644
--- a/examples/parameter_example.yml
+++ b/examples/parameter_example.yml
@@ -667,6 +667,11 @@ Neutrino:
   fixed_bg_density: 1                            # For linear response neutrinos, whether to use a fixed present-day background density
   use_model_none: 0                              # Option to use no neutrino model
 
+# Parameters related to extra i/o (X-ray emmisivity)  ----------------------------
+
+XrayEmissivity:
+  xray_table_path:         ./X_Ray_tables.hdf5   # Path to the X-ray emissivity tables
+  
 # Parameters related to the sink particles ---------------------------------------
 
 # Default sink particles
@@ -709,3 +714,49 @@ SPHM1RT:
     photon_groups_Hz: [3.288e15, 5.945e15, 13.157e15]   # Photon frequency group bin edges in Hz. Needs to be 1 less than the number of groups (N) requested during the configuration (--with-RT=SPHM1RT_N). Outer edges of zero and infinity are assumed.
 
 
+# Parameters related to lightcones  -----------------------------------------------
+# Parameters in the LightconeCommon section apply to all lightcones but can be overridden in the LightconeX sections.
+# Up to 8 Lightcone sections named Lightcone0 to Lightcone7 may be present.
+LightconeCommon:
+
+  subdir:            lightcones   # All lightcone output is written to this directory
+  buffer_chunk_size: 10000        # Particles and map updates are buffered in a linked list of chunks of this size
+
+  z_range_for_DM:     [0.0, 0.05] # Output redshift range for dark matter
+  z_range_for_Gas:    [0.0, 0.05] # Output redshift range for gas
+  z_range_for_Stars:  [0.0, 0.05] # Output redshift range for stars
+  z_range_for_BH:     [0.0, 0.05] # Output redshift range for black holes
+
+  max_particles_buffered: 100000  # Output particles if buffer size reaches this value
+  max_updates_buffered:   100000  # Flush map updates if buffer size reaches this value
+  hdf5_chunk_size:        16384   # Chunk size for HDF5 particle and healpix map datasets
+
+  nside:                512                    # Healpix resolution parameter
+  radius_file:          ./shell_redshifts.txt  # Redshifts of shells for healpix maps
+  max_map_update_send_size_mb: 16.0            # Apply map updates over mutliple iterations to limit memory overhead
+  map_names_file:       ./map_types.txt        # List of types of healpix maps to make
+
+  distributed_maps:   1           # Split maps over multiple files (1) or use collective I/O to write one file (0)
+
+  particles_lossy_compression: 0  # Apply lossy compression to lightcone particles
+  particles_gzip_level:        6  # Apply lossless (deflate) compression to lightcone particles
+  maps_gzip_level:             6  # Apply lossless (deflate) compression to healpix maps
+
+# Parameters specific to lightcone 0 - any lightcone parameters not found here are taken from LightconeCommon, above,
+# except for 'enabled' and 'basename'.
+Lightcone0:
+  enabled: 1                                           # Enable this lightcone
+  basename: lightcone0                                 # Base name of this lighcone's output files
+  observer_position: [35.561875, 35.561875, 35.561875] # Location of the observer in this lightcone
+
+# Parameters specific to lightcone 1 - any lightcone parameters not found here are taken from LightconeCommon, above,
+# except for 'enabled' and 'basename'.
+Lightcone1:
+  enabled: 1
+  basename: lightcone1
+  observer_position: [35.561875, 35.561875, 35.561875]
+
+  gas_filtering_enabled:      1      # Enable filtering out of certain gas particles from particle outputs
+  min_z_for_gas_filtering:    0.025  # Filter gas particles above this redshift, output all below
+  min_temp_for_filtered_gas:  1.0e5  # Above min_z_for_gas_filtering only output gas with temperature above this
+  min_nh_for_filtered_gas:    1.0e-6 # Above min_z_for_gas_filtering only output gas with nh/(1+z)^4 above this
diff --git a/m4/ax_pthread.m4 b/m4/ax_pthread.m4
index 5fbf9fe0d68616042f87a8365190211cb8ccfbf1..9f35d139149f8d9bda17cddb730cd13bcf775465 100644
--- a/m4/ax_pthread.m4
+++ b/m4/ax_pthread.m4
@@ -14,20 +14,24 @@
 #   flags that are needed. (The user can also force certain compiler
 #   flags/libs to be tested by setting these environment variables.)
 #
-#   Also sets PTHREAD_CC to any special C compiler that is needed for
-#   multi-threaded programs (defaults to the value of CC otherwise). (This
-#   is necessary on AIX to use the special cc_r compiler alias.)
+#   Also sets PTHREAD_CC and PTHREAD_CXX to any special C compiler that is
+#   needed for multi-threaded programs (defaults to the value of CC
+#   respectively CXX otherwise). (This is necessary on e.g. AIX to use the
+#   special cc_r/CC_r compiler alias.)
 #
 #   NOTE: You are assumed to not only compile your program with these flags,
 #   but also to link with them as well. For example, you might link with
 #   $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS
+#   $PTHREAD_CXX $CXXFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS
 #
 #   If you are only building threaded programs, you may wish to use these
 #   variables in your default LIBS, CFLAGS, and CC:
 #
 #     LIBS="$PTHREAD_LIBS $LIBS"
 #     CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+#     CXXFLAGS="$CXXFLAGS $PTHREAD_CFLAGS"
 #     CC="$PTHREAD_CC"
+#     CXX="$PTHREAD_CXX"
 #
 #   In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant
 #   has a nonstandard name, this macro defines PTHREAD_CREATE_JOINABLE to
@@ -55,6 +59,7 @@
 #
 #   Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
 #   Copyright (c) 2011 Daniel Richard G. <skunk@iSKUNK.ORG>
+#   Copyright (c) 2019 Marc Stevens <marc.stevens@cwi.nl>
 #
 #   This program is free software: you can redistribute it and/or modify it
 #   under the terms of the GNU General Public License as published by the
@@ -82,7 +87,7 @@
 #   modified version of the Autoconf Macro, you may extend this special
 #   exception to the GPL to apply to your modified version as well.
 
-#serial 24
+#serial 31
 
 AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD])
 AC_DEFUN([AX_PTHREAD], [
@@ -104,6 +109,7 @@ if test "x$PTHREAD_CFLAGS$PTHREAD_LIBS" != "x"; then
         ax_pthread_save_CFLAGS="$CFLAGS"
         ax_pthread_save_LIBS="$LIBS"
         AS_IF([test "x$PTHREAD_CC" != "x"], [CC="$PTHREAD_CC"])
+        AS_IF([test "x$PTHREAD_CXX" != "x"], [CXX="$PTHREAD_CXX"])
         CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
         LIBS="$PTHREAD_LIBS $LIBS"
         AC_MSG_CHECKING([for pthread_join using $CC $PTHREAD_CFLAGS $PTHREAD_LIBS])
@@ -123,10 +129,12 @@ fi
 # (e.g. DEC) have both -lpthread and -lpthreads, where one of the
 # libraries is broken (non-POSIX).
 
-# Create a list of thread flags to try.  Items starting with a "-" are
-# C compiler flags, and other items are library names, except for "none"
-# which indicates that we try without any flags at all, and "pthread-config"
-# which is a program returning the flags for the Pth emulation library.
+# Create a list of thread flags to try. Items with a "," contain both
+# C compiler flags (before ",") and linker flags (after ","). Other items
+# starting with a "-" are C compiler flags, and remaining items are
+# library names, except for "none" which indicates that we try without
+# any flags at all, and "pthread-config" which is a program returning
+# the flags for the Pth emulation library.
 
 ax_pthread_flags="pthreads none -Kthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
 
@@ -194,14 +202,47 @@ case $host_os in
         # that too in a future libc.)  So we'll check first for the
         # standard Solaris way of linking pthreads (-mt -lpthread).
 
-        ax_pthread_flags="-mt,pthread pthread $ax_pthread_flags"
+        ax_pthread_flags="-mt,-lpthread pthread $ax_pthread_flags"
         ;;
 esac
 
+# Are we compiling with Clang?
+
+AC_CACHE_CHECK([whether $CC is Clang],
+    [ax_cv_PTHREAD_CLANG],
+    [ax_cv_PTHREAD_CLANG=no
+     # Note that Autoconf sets GCC=yes for Clang as well as GCC
+     if test "x$GCC" = "xyes"; then
+        AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
+            [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
+#            if defined(__clang__) && defined(__llvm__)
+             AX_PTHREAD_CC_IS_CLANG
+#            endif
+            ],
+            [ax_cv_PTHREAD_CLANG=yes])
+     fi
+    ])
+ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
+
+
 # GCC generally uses -pthread, or -pthreads on some platforms (e.g. SPARC)
 
+# Note that for GCC and Clang -pthread generally implies -lpthread,
+# except when -nostdlib is passed.
+# This is problematic using libtool to build C++ shared libraries with pthread:
+# [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25460
+# [2] https://bugzilla.redhat.com/show_bug.cgi?id=661333
+# [3] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=468555
+# To solve this, first try -pthread together with -lpthread for GCC
+
 AS_IF([test "x$GCC" = "xyes"],
-      [ax_pthread_flags="-pthread -pthreads $ax_pthread_flags"])
+      [ax_pthread_flags="-pthread,-lpthread -pthread -pthreads $ax_pthread_flags"])
+
+# Clang takes -pthread (never supported any other flag), but we'll try with -lpthread first
+
+AS_IF([test "x$ax_pthread_clang" = "xyes"],
+      [ax_pthread_flags="-pthread,-lpthread -pthread"])
+
 
 # The presence of a feature test macro requesting re-entrant function
 # definitions is, on some systems, a strong hint that pthreads support is
@@ -224,25 +265,86 @@ AS_IF([test "x$ax_pthread_check_macro" = "x--"],
       [ax_pthread_check_cond=0],
       [ax_pthread_check_cond="!defined($ax_pthread_check_macro)"])
 
-# Are we compiling with Clang?
 
-AC_CACHE_CHECK([whether $CC is Clang],
-    [ax_cv_PTHREAD_CLANG],
-    [ax_cv_PTHREAD_CLANG=no
-     # Note that Autoconf sets GCC=yes for Clang as well as GCC
-     if test "x$GCC" = "xyes"; then
-        AC_EGREP_CPP([AX_PTHREAD_CC_IS_CLANG],
-            [/* Note: Clang 2.7 lacks __clang_[a-z]+__ */
-#            if defined(__clang__) && defined(__llvm__)
-             AX_PTHREAD_CC_IS_CLANG
-#            endif
-            ],
-            [ax_cv_PTHREAD_CLANG=yes])
-     fi
-    ])
-ax_pthread_clang="$ax_cv_PTHREAD_CLANG"
+if test "x$ax_pthread_ok" = "xno"; then
+for ax_pthread_try_flag in $ax_pthread_flags; do
+
+        case $ax_pthread_try_flag in
+                none)
+                AC_MSG_CHECKING([whether pthreads work without any flags])
+                ;;
+
+                *,*)
+                PTHREAD_CFLAGS=`echo $ax_pthread_try_flag | sed "s/^\(.*\),\(.*\)$/\1/"`
+                PTHREAD_LIBS=`echo $ax_pthread_try_flag | sed "s/^\(.*\),\(.*\)$/\2/"`
+                AC_MSG_CHECKING([whether pthreads work with "$PTHREAD_CFLAGS" and "$PTHREAD_LIBS"])
+                ;;
+
+                -*)
+                AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
+                PTHREAD_CFLAGS="$ax_pthread_try_flag"
+                ;;
+
+                pthread-config)
+                AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
+                AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
+                PTHREAD_CFLAGS="`pthread-config --cflags`"
+                PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+                ;;
+
+                *)
+                AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
+                PTHREAD_LIBS="-l$ax_pthread_try_flag"
+                ;;
+        esac
+
+        ax_pthread_save_CFLAGS="$CFLAGS"
+        ax_pthread_save_LIBS="$LIBS"
+        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+        LIBS="$PTHREAD_LIBS $LIBS"
+
+        # Check for various functions.  We must include pthread.h,
+        # since some functions may be macros.  (On the Sequent, we
+        # need a special flag -Kthread to make this header compile.)
+        # We check for pthread_join because it is in -lpthread on IRIX
+        # while pthread_create is in libc.  We check for pthread_attr_init
+        # due to DEC craziness with -lpthreads.  We check for
+        # pthread_cleanup_push because it is one of the few pthread
+        # functions on Solaris that doesn't have a non-functional libc stub.
+        # We try pthread_create on general principles.
+
+        AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
+#                       if $ax_pthread_check_cond
+#                        error "$ax_pthread_check_macro must be defined"
+#                       endif
+                        static void *some_global = NULL;
+                        static void routine(void *a)
+                          {
+                             /* To avoid any unused-parameter or
+                                unused-but-set-parameter warning.  */
+                             some_global = a;
+                          }
+                        static void *start_routine(void *a) { return a; }],
+                       [pthread_t th; pthread_attr_t attr;
+                        pthread_create(&th, 0, start_routine, 0);
+                        pthread_join(th, 0);
+                        pthread_attr_init(&attr);
+                        pthread_cleanup_push(routine, 0);
+                        pthread_cleanup_pop(0) /* ; */])],
+            [ax_pthread_ok=yes],
+            [])
+
+        CFLAGS="$ax_pthread_save_CFLAGS"
+        LIBS="$ax_pthread_save_LIBS"
+
+        AC_MSG_RESULT([$ax_pthread_ok])
+        AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
+
+        PTHREAD_LIBS=""
+        PTHREAD_CFLAGS=""
+done
+fi
 
-ax_pthread_clang_warning=no
 
 # Clang needs special handling, because older versions handle the -pthread
 # option in a rather... idiosyncratic way
@@ -261,11 +363,6 @@ if test "x$ax_pthread_clang" = "xyes"; then
         # -pthread does define _REENTRANT, and while the Darwin headers
         # ignore this macro, third-party headers might not.)
 
-        PTHREAD_CFLAGS="-pthread"
-        PTHREAD_LIBS=
-
-        ax_pthread_ok=yes
-
         # However, older versions of Clang make a point of warning the user
         # that, in an invocation where only linking and no compilation is
         # taking place, the -pthread option has no effect ("argument unused
@@ -294,7 +391,7 @@ if test "x$ax_pthread_clang" = "xyes"; then
              # step
              ax_pthread_save_ac_link="$ac_link"
              ax_pthread_sed='s/conftest\.\$ac_ext/conftest.$ac_objext/g'
-             ax_pthread_link_step=`$as_echo "$ac_link" | sed "$ax_pthread_sed"`
+             ax_pthread_link_step=`AS_ECHO(["$ac_link"]) | sed "$ax_pthread_sed"`
              ax_pthread_2step_ac_link="($ac_compile) && (echo ==== >&5) && ($ax_pthread_link_step)"
              ax_pthread_save_CFLAGS="$CFLAGS"
              for ax_pthread_try in '' -Qunused-arguments -Wno-unused-command-line-argument unknown; do
@@ -320,78 +417,7 @@ if test "x$ax_pthread_clang" = "xyes"; then
 
 fi # $ax_pthread_clang = yes
 
-if test "x$ax_pthread_ok" = "xno"; then
-for ax_pthread_try_flag in $ax_pthread_flags; do
-
-        case $ax_pthread_try_flag in
-                none)
-                AC_MSG_CHECKING([whether pthreads work without any flags])
-                ;;
-
-                -mt,pthread)
-                AC_MSG_CHECKING([whether pthreads work with -mt -lpthread])
-                PTHREAD_CFLAGS="-mt"
-                PTHREAD_LIBS="-lpthread"
-                ;;
-
-                -*)
-                AC_MSG_CHECKING([whether pthreads work with $ax_pthread_try_flag])
-                PTHREAD_CFLAGS="$ax_pthread_try_flag"
-                ;;
 
-                pthread-config)
-                AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no])
-                AS_IF([test "x$ax_pthread_config" = "xno"], [continue])
-                PTHREAD_CFLAGS="`pthread-config --cflags`"
-                PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
-                ;;
-
-                *)
-                AC_MSG_CHECKING([for the pthreads library -l$ax_pthread_try_flag])
-                PTHREAD_LIBS="-l$ax_pthread_try_flag"
-                ;;
-        esac
-
-        ax_pthread_save_CFLAGS="$CFLAGS"
-        ax_pthread_save_LIBS="$LIBS"
-        CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
-        LIBS="$PTHREAD_LIBS $LIBS"
-
-        # Check for various functions.  We must include pthread.h,
-        # since some functions may be macros.  (On the Sequent, we
-        # need a special flag -Kthread to make this header compile.)
-        # We check for pthread_join because it is in -lpthread on IRIX
-        # while pthread_create is in libc.  We check for pthread_attr_init
-        # due to DEC craziness with -lpthreads.  We check for
-        # pthread_cleanup_push because it is one of the few pthread
-        # functions on Solaris that doesn't have a non-functional libc stub.
-        # We try pthread_create on general principles.
-
-        AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <pthread.h>
-#                       if $ax_pthread_check_cond
-#                        error "$ax_pthread_check_macro must be defined"
-#                       endif
-                        static void routine(void *a) { a = 0; }
-                        static void *start_routine(void *a) { return a; }],
-                       [pthread_t th; pthread_attr_t attr;
-                        pthread_create(&th, 0, start_routine, 0);
-                        pthread_join(th, 0);
-                        pthread_attr_init(&attr);
-                        pthread_cleanup_push(routine, 0);
-                        pthread_cleanup_pop(0) /* ; */])],
-            [ax_pthread_ok=yes],
-            [])
-
-        CFLAGS="$ax_pthread_save_CFLAGS"
-        LIBS="$ax_pthread_save_LIBS"
-
-        AC_MSG_RESULT([$ax_pthread_ok])
-        AS_IF([test "x$ax_pthread_ok" = "xyes"], [break])
-
-        PTHREAD_LIBS=""
-        PTHREAD_CFLAGS=""
-done
-fi
 
 # Various other checks:
 if test "x$ax_pthread_ok" = "xyes"; then
@@ -438,7 +464,8 @@ if test "x$ax_pthread_ok" = "xyes"; then
         AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT],
             [ax_cv_PTHREAD_PRIO_INHERIT],
             [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>]],
-                                             [[int i = PTHREAD_PRIO_INHERIT;]])],
+                                             [[int i = PTHREAD_PRIO_INHERIT;
+                                               return i;]])],
                             [ax_cv_PTHREAD_PRIO_INHERIT=yes],
                             [ax_cv_PTHREAD_PRIO_INHERIT=no])
             ])
@@ -460,18 +487,28 @@ if test "x$ax_pthread_ok" = "xyes"; then
                     [#handle absolute path differently from PATH based program lookup
                      AS_CASE(["x$CC"],
                          [x/*],
-                         [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])],
-                         [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])])
+                         [
+			   AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])
+			   AS_IF([test "x${CXX}" != "x"], [AS_IF([AS_EXECUTABLE_P([${CXX}_r])],[PTHREAD_CXX="${CXX}_r"])])
+			 ],
+                         [
+			   AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])
+			   AS_IF([test "x${CXX}" != "x"], [AC_CHECK_PROGS([PTHREAD_CXX],[${CXX}_r],[$CXX])])
+			 ]
+                     )
+                    ])
                 ;;
             esac
         fi
 fi
 
 test -n "$PTHREAD_CC" || PTHREAD_CC="$CC"
+test -n "$PTHREAD_CXX" || PTHREAD_CXX="$CXX"
 
 AC_SUBST([PTHREAD_LIBS])
 AC_SUBST([PTHREAD_CFLAGS])
 AC_SUBST([PTHREAD_CC])
+AC_SUBST([PTHREAD_CXX])
 
 # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
 if test "x$ax_pthread_ok" = "xyes"; then
diff --git a/m4/gv_find_library.m4 b/m4/gv_find_library.m4
new file mode 100644
index 0000000000000000000000000000000000000000..948a38a5432aa96d778a6c8cb72da79aecda2091
--- /dev/null
+++ b/m4/gv_find_library.m4
@@ -0,0 +1,119 @@
+#
+# SYNOPSIS
+#
+#   GV_FIND_LIBRARY(NAME, VARNAME, PKGNAME, LIBNAME, FUNC)
+#
+#     NAME    : Name to use in help strings
+#     VARNAME : Defines macro HAVE_VARNAME and shell variable USE_VARNAME
+#               if library found
+#     PKGNAME : package name used by pkg-config
+#     LIBNAME : Name used in the library file name
+#     FUNC    : A function in the library
+#
+# DESCRIPTION
+#
+#   Attempts to set up the specified library using pkg-config
+#
+#   Uses pkg-config to try to find package PKGNAME. If successful defines
+#   preprocessor macro HAVE_$VARNAME and sets USE_$VARNAME = "yes", otherwise
+#   sets USE_$VARNAME = "no". 
+#
+#   Also sets ${VARNAME}_LIBS and ${VARNAME}_CFLAGS on success.
+#
+# LAST MODIFICATION
+#
+#   07/03/09 Version used in Gadgetviewer
+#   02/02/21 Use export when temporarily setting PKG_CONFIG_PATH
+#   08/02/22 Add pkg-config --libs output to $LIBS rather than $LDFLAGS when running AC_CHECK_LIB
+#
+
+AC_DEFUN([GV_FIND_LIBRARY],[
+
+# Allow user to enable/disable library
+AC_ARG_WITH([$1], AC_HELP_STRING([--with-$1@<:@=PATH@:>@],[use the $1 library]),
+		  [USE_$2=$withval ; GV_SPEC="yes" ],[USE_$2="check" ; GV_SPEC="no" ])
+
+# Figure out if we have been given a path
+if test $USE_$2 = "yes" ; then
+  GV_HAVE_$2_PATH="no"
+  GV_$2_PATH=""
+elif test $USE_$2 = "no" ; then
+  GV_HAVE_$2_PATH="no"
+  GV_$2_PATH=""
+elif test $USE_$2 = "check" ; then
+  GV_HAVE_$2_PATH="no"
+  GV_$2_PATH=""
+else
+  GV_HAVE_$2_PATH="yes"
+  GV_$2_PATH=$USE_$2
+fi
+
+GV_FOUND="no"
+
+# Don't do anything if library has been disabled explicitly
+if test $USE_$2 != "no" ; then
+
+  # Add path to PKG_CONFIG_PATH if we have one
+  TMP_PKG_CONFIG_PATH=$PKG_CONFIG_PATH
+  if test $GV_HAVE_$2_PATH = "yes" ; then
+    export PKG_CONFIG_PATH=$GV_$2_PATH/pkgconfig/:$GV_$2_PATH/lib/pkgconfig/:$PKG_CONFIG_PATH
+  fi
+
+  # Try to set it up with pkg-config
+  GV_PKG="yes"
+  PKG_CHECK_MODULES([$2], [$3], [], 
+  			  [echo Unable to find $1 with pkg-config, will try to link to it anyway... ; GV_PKG="no"])
+
+  # Restore original PKG_CONFIG_PATH
+  export PKG_CONFIG_PATH=$TMP_PKG_CONFIG_PATH
+
+  # If that didn't work and flags haven't been supplied by hand but we have a path, try sensible defaults
+  if test ${GV_PKG} = "no" ; then
+    if test ${GV_HAVE_$2_PATH} = "yes" ; then
+      # CFLAGS
+      if test X${$2_CFLAGS} = X ; then
+        $2_CFLAGS=-I${GV_$2_PATH}/include/
+      fi
+      # LIBS
+      if test X${$2_LIBS} = X ; then
+        $2_LIBS="-L${GV_$2_PATH}/lib/"
+      fi
+    fi
+  fi
+
+  # Try to link to the library
+  TMP_LIBS=$LIBS
+  LIBS="${$2_LIBS} ${LIBS}"
+  AC_CHECK_LIB([$4], [$5], [GV_FOUND="yes"], 
+  		     [echo ; echo WARNING: Unable to link to $1 library. See config.log for details ; echo])
+  LIBS=$TMP_LIBS
+
+  if test $GV_FOUND = "no" ; then
+    # If we can't link the library and it was explicitly asked for, abort	
+    if test $GV_SPEC = "yes" ; then
+      AC_MSG_ERROR([Unable to link to requested library: $1])
+    fi
+    # If the test failed, don't set flags
+    $2_LIBS=""
+    $2_CFLAGS=""
+  else
+    # If the test worked make sure -lwhatever is included if we didn't
+    # use pkg-config
+    if test $GV_PKG = "no" ; then
+      $2_LIBS="${$2_LIBS} -l$4"
+    fi
+  fi
+
+  AC_SUBST($2_LIBS)
+  AC_SUBST($2_CFLAGS)
+
+fi
+
+# Set shell variable and define macro with test result
+USE_$2=$GV_FOUND
+if test $GV_FOUND = "yes" ; then
+    AC_DEFINE([HAVE_$2],[],[Defined if we have $2])
+fi
+
+])
+
diff --git a/src/Makefile.am b/src/Makefile.am
index eef7fc07d53fe73866139d9009f526331abd7e7a..af08fe51035051b77785e08cf69bcca9d4db5c4c 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -16,7 +16,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Add the non-standard paths to the included library headers
-AM_CFLAGS = $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS) $(OPENMP_CFLAGS)
+AM_CFLAGS = $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS) $(OPENMP_CFLAGS) $(CHEALPIX_CFLAGS)
 
 # Assign a "safe" version number
 AM_LDFLAGS = $(HDF5_LDFLAGS) $(FFTW_LIBS)
@@ -25,7 +25,7 @@ AM_LDFLAGS = $(HDF5_LDFLAGS) $(FFTW_LIBS)
 GIT_CMD = @GIT_CMD@
 
 # Additional dependencies for shared libraries.
-EXTRA_LIBS = $(GSL_LIBS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS)
+EXTRA_LIBS = $(GSL_LIBS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(CHEALPIX_LIBS)
 
 # MPI libraries.
 MPI_LIBS = $(PARMETIS_LIBS) $(METIS_LIBS) $(MPI_THREAD_LIBS)
@@ -52,7 +52,7 @@ include_HEADERS += csds.h active.h timeline.h xmf.h gravity_properties.h gravity
 include_HEADERS += gravity_softened_derivatives.h vector_power.h collectgroup.h hydro_space.h sort_part.h 
 include_HEADERS += chemistry.h chemistry_io.h chemistry_struct.h cosmology.h restart.h space_getsid.h utilities.h 
 include_HEADERS += cbrt.h exp10.h velociraptor_interface.h swift_velociraptor_part.h output_list.h 
-include_HEADERS += csds_io.h tracers_io.h tracers.h tracers_struct.h star_formation_io.h
+include_HEADERS += csds_io.h tracers_io.h tracers.h tracers_struct.h star_formation_io.h extra_io.h
 include_HEADERS += fof.h fof_struct.h fof_io.h fof_catalogue_io.h
 include_HEADERS += multipole.h multipole_accept.h multipole_struct.h binomial.h integer_power.h sincos.h 
 include_HEADERS += star_formation_struct.h star_formation.h star_formation_iact.h 
@@ -66,7 +66,17 @@ include_HEADERS += rays.h rays_struct.h
 include_HEADERS += particle_splitting.h particle_splitting_struct.h
 include_HEADERS += chemistry_csds.h star_formation_csds.h
 include_HEADERS += mesh_gravity.h mesh_gravity_mpi.h mesh_gravity_patch.h mesh_gravity_sort.h row_major_id.h
-include_HEADERS += hdf5_object_to_blob.h ic_info.h
+include_HEADERS += hdf5_object_to_blob.h ic_info.h particle_buffer.h exchange_structs.h
+include_HEADERS += lightcone/lightcone.h lightcone/lightcone_particle_io.h lightcone/lightcone_replications.h
+include_HEADERS += lightcone/lightcone_crossing.h lightcone/lightcone_array.h lightcone/lightcone_map.h
+include_HEADERS += lightcone/lightcone_map_types.h lightcone/projected_kernel.h lightcone/lightcone_shell.h
+include_HEADERS += lightcone/healpix_util.h lightcone/pixel_index.h
+
+# source files for EAGLE extra I/O
+EAGLE_EXTRA_IO_SOURCES=
+if HAVEEAGLEEXTRAIO
+EAGLE_EXTRA_IO_SOURCES += extra_io/EAGLE/extra_lightcone_map_types.c
+endif
 
 # source files for QLA (Ploeckinger+20) cooling
 QLA_COOLING_SOURCES =
@@ -147,8 +157,11 @@ AM_SOURCES += hashmap.c pressure_floor.c
 AM_SOURCES += mesh_gravity.c mesh_gravity_mpi.c mesh_gravity_patch.c mesh_gravity_sort.c
 AM_SOURCES += runner_neutrino.c
 AM_SOURCES += neutrino/Default/fermi_dirac.c neutrino/Default/neutrino.c neutrino/Default/neutrino_response.c 
-AM_SOURCES += rt_parameters.c
-AM_SOURCES += hdf5_object_to_blob.c ic_info.c
+AM_SOURCES += rt_parameters.c hdf5_object_to_blob.c ic_info.c exchange_structs.c particle_buffer.c
+AM_SOURCES += lightcone/lightcone.c lightcone/lightcone_particle_io.c lightcone/lightcone_replications.c
+AM_SOURCES += lightcone/healpix_util.c lightcone/lightcone_array.c lightcone/lightcone_map.c
+AM_SOURCES += lightcone/lightcone_map_types.c lightcone/projected_kernel.c lightcone/lightcone_shell.c
+AM_SOURCES += $(EAGLE_EXTRA_IO_SOURCES)
 AM_SOURCES += $(QLA_COOLING_SOURCES) $(QLA_EAGLE_COOLING_SOURCES) 
 AM_SOURCES += $(EAGLE_COOLING_SOURCES) $(EAGLE_FEEDBACK_SOURCES) 
 AM_SOURCES += $(GRACKLE_COOLING_SOURCES) $(GEAR_FEEDBACK_SOURCES) 
@@ -377,6 +390,7 @@ nobase_noinst_HEADERS += tracers/none/tracers.h tracers/none/tracers_struct.h
 nobase_noinst_HEADERS += tracers/none/tracers_io.h 
 nobase_noinst_HEADERS += tracers/EAGLE/tracers.h tracers/EAGLE/tracers_struct.h 
 nobase_noinst_HEADERS += tracers/EAGLE/tracers_io.h 
+nobase_noinst_HEADERS += extra_io/EAGLE/extra_io.h extra_io/EAGLE/extra.h
 nobase_noinst_HEADERS += feedback/none/feedback.h feedback/none/feedback_struct.h feedback/none/feedback_iact.h 
 nobase_noinst_HEADERS += feedback/none/feedback_properties.h 
 nobase_noinst_HEADERS += feedback/EAGLE_kinetic/feedback.h feedback/EAGLE_kinetic/feedback_struct.h 
diff --git a/src/cell.h b/src/cell.h
index 513e89df739fe4af060dc7ace188323d120fb86e..3a4bbb42a17c6e9f1fd2c5aa0adfd91d3b9dd921 100644
--- a/src/cell.h
+++ b/src/cell.h
@@ -50,6 +50,7 @@
 /* Avoid cyclic inclusions */
 struct engine;
 struct scheduler;
+struct replication_list;
 
 /* Max tag size set to 2^29 to take into account some MPI implementations
  * that use 2^31 as the upper bound on MPI tags and the fact that
@@ -539,11 +540,15 @@ int cell_unskip_sinks_tasks(struct cell *c, struct scheduler *s);
 int cell_unskip_rt_tasks(struct cell *c, struct scheduler *s);
 int cell_unskip_black_holes_tasks(struct cell *c, struct scheduler *s);
 int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s);
-void cell_drift_part(struct cell *c, const struct engine *e, int force);
-void cell_drift_gpart(struct cell *c, const struct engine *e, int force);
-void cell_drift_spart(struct cell *c, const struct engine *e, int force);
+void cell_drift_part(struct cell *c, const struct engine *e, int force,
+                     struct replication_list *replication_list_in);
+void cell_drift_gpart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list);
+void cell_drift_spart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list);
 void cell_drift_sink(struct cell *c, const struct engine *e, int force);
-void cell_drift_bpart(struct cell *c, const struct engine *e, int force);
+void cell_drift_bpart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list);
 void cell_drift_multipole(struct cell *c, const struct engine *e);
 void cell_drift_all_multipoles(struct cell *c, const struct engine *e);
 void cell_check_timesteps(const struct cell *c, const integertime_t ti_current,
diff --git a/src/cell_drift.c b/src/cell_drift.c
index 3cf65c661ae8486c22f8d76506a9ebb68dc3fd13..0e5fe0f723938fd43bc14605ec7bc3662a1250dd 100644
--- a/src/cell_drift.c
+++ b/src/cell_drift.c
@@ -30,6 +30,8 @@
 #include "drift.h"
 #include "feedback.h"
 #include "gravity.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_array.h"
 #include "multipole.h"
 #include "neutrino.h"
 #include "pressure_floor.h"
@@ -37,6 +39,38 @@
 #include "star_formation.h"
 #include "tracers.h"
 
+#ifdef WITH_LIGHTCONE
+/**
+ * @brief Compute refined lightcone replication list for a cell
+ *
+ * Returns a pointer to the new list, which must be freed later.
+ *
+ * @param e The #engine
+ * @param c The #cell
+ * @param replication_list_in The input replication_list struct
+ */
+static struct replication_list *refine_replications(
+    const struct engine *e, const struct cell *c,
+    struct replication_list *replication_list_in) {
+  struct replication_list *replication_list;
+  if (e->lightcone_array_properties->nr_lightcones > 0) {
+    if (replication_list_in) {
+      /* We're not at the top of the hierarchy, so use the replication lists
+       * passed in */
+      replication_list = replication_list_in;
+    } else {
+      /* Current call is top of the recursive hierarchy, so compute refined
+       * replication lists */
+      replication_list =
+          lightcone_array_refine_replications(e->lightcone_array_properties, c);
+    }
+  } else {
+    replication_list = NULL;
+  }
+  return replication_list;
+}
+#endif
+
 /**
  * @brief Recursively set the hydro's ti_old_part to the current time.
  *
@@ -108,7 +142,8 @@ void cell_set_ti_old_bpart(struct cell *c, const integertime_t ti) {
  * @param e The #engine (to get ti_current).
  * @param force Drift the particles irrespective of the #cell flags.
  */
-void cell_drift_part(struct cell *c, const struct engine *e, int force) {
+void cell_drift_part(struct cell *c, const struct engine *e, int force,
+                     struct replication_list *replication_list_in) {
   const int periodic = e->s->periodic;
   const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
   const int with_cosmology = (e->policy & engine_policy_cosmology);
@@ -146,7 +181,15 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
     return;
   }
 
-  /* Ok, we have some particles somewhere in the hierarchy to drift */
+  /* Ok, we have some particles somewhere in the hierarchy to drift
+
+     IMPORTANT: after this point we must not return without freeing the
+     replication lists if we allocated them.
+  */
+  struct replication_list *replication_list = NULL;
+#ifdef WITH_LIGHTCONE
+  replication_list = refine_replications(e, c, replication_list_in);
+#endif
 
   /* Are we not in a leaf ? */
   if (c->split && (force || cell_get_flag(c, cell_flag_do_hydro_sub_drift))) {
@@ -157,7 +200,7 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
         struct cell *cp = c->progeny[k];
 
         /* Collect */
-        cell_drift_part(cp, e, force);
+        cell_drift_part(cp, e, force, replication_list);
 
         /* Update */
         dx_max = max(dx_max, cp->hydro.dx_max_part);
@@ -212,8 +255,7 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
 
       /* Drift... */
       drift_part(p, xp, dt_drift, dt_kick_hydro, dt_kick_grav, dt_therm,
-                 ti_old_part, ti_current, e->cosmology, e->hydro_properties,
-                 e->entropy_floor);
+                 ti_old_part, ti_current, e, replication_list, c->loc);
 
       /* Update the tracers properties */
       tracers_after_drift(p, xp, e->internal_units, e->physical_constants,
@@ -328,6 +370,14 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
     c->hydro.ti_old_part = ti_current;
   }
 
+#ifdef WITH_LIGHTCONE
+  /* If we're at the top of the recursive hierarchy, clean up the refined
+   * replication lists */
+  if (e->lightcone_array_properties->nr_lightcones > 0 && !replication_list_in)
+    lightcone_array_free_replications(e->lightcone_array_properties,
+                                      replication_list);
+#endif
+
   /* Clear the drift flags. */
   cell_clear_flag(c, cell_flag_do_hydro_drift | cell_flag_do_hydro_sub_drift);
 }
@@ -339,7 +389,8 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
  * @param e The #engine (to get ti_current).
  * @param force Drift the particles irrespective of the #cell flags.
  */
-void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
+void cell_drift_gpart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list_in) {
   const int periodic = e->s->periodic;
   const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
   const int with_cosmology = (e->policy & engine_policy_cosmology);
@@ -373,7 +424,16 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
     return;
   }
 
-  /* Ok, we have some particles somewhere in the hierarchy to drift */
+  /* Ok, we have some particles somewhere in the hierarchy to drift.
+     If making lightcones, get the refined replication list for this cell.
+
+     IMPORTANT: after this point we must not return without freeing the
+     replication lists if we allocated them.
+  */
+  struct replication_list *replication_list = NULL;
+#ifdef WITH_LIGHTCONE
+  replication_list = refine_replications(e, c, replication_list_in);
+#endif
 
   /* Are we not in a leaf ? */
   if (c->split && (force || cell_get_flag(c, cell_flag_do_grav_sub_drift))) {
@@ -384,7 +444,7 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
         struct cell *cp = c->progeny[k];
 
         /* Recurse */
-        cell_drift_gpart(cp, e, force);
+        cell_drift_gpart(cp, e, force, replication_list);
       }
     }
 
@@ -417,7 +477,8 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
       }
 
       /* Drift... */
-      drift_gpart(gp, dt_drift_k, ti_old_gpart, ti_current, grav_props, e);
+      drift_gpart(gp, dt_drift_k, ti_old_gpart, ti_current, grav_props, e,
+                  replication_list, c->loc);
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Make sure the particle does not drift by more than a box length. */
@@ -478,6 +539,14 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
     c->grav.ti_old_part = ti_current;
   }
 
+#ifdef WITH_LIGHTCONE
+  /* If we're at the top of the recursive hierarchy, clean up the refined
+   * replication lists */
+  if (e->lightcone_array_properties->nr_lightcones > 0 && !replication_list_in)
+    lightcone_array_free_replications(e->lightcone_array_properties,
+                                      replication_list);
+#endif
+
   /* Clear the drift flags. */
   cell_clear_flag(c, cell_flag_do_grav_drift | cell_flag_do_grav_sub_drift);
 }
@@ -489,7 +558,8 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
  * @param e The #engine (to get ti_current).
  * @param force Drift the particles irrespective of the #cell flags.
  */
-void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
+void cell_drift_spart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list_in) {
   const int periodic = e->s->periodic;
   const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
   const int with_cosmology = (e->policy & engine_policy_cosmology);
@@ -526,7 +596,15 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
     return;
   }
 
-  /* Ok, we have some particles somewhere in the hierarchy to drift */
+  /* Ok, we have some particles somewhere in the hierarchy to drift
+
+     IMPORTANT: after this point we must not return without freeing the
+     replication lists if we allocated them.
+  */
+  struct replication_list *replication_list = NULL;
+#ifdef WITH_LIGHTCONE
+  replication_list = refine_replications(e, c, replication_list_in);
+#endif
 
   /* Are we not in a leaf ? */
   if (c->split && (force || cell_get_flag(c, cell_flag_do_stars_sub_drift))) {
@@ -537,7 +615,7 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
         struct cell *cp = c->progeny[k];
 
         /* Recurse */
-        cell_drift_spart(cp, e, force);
+        cell_drift_spart(cp, e, force, replication_list);
 
         /* Update */
         dx_max = max(dx_max, cp->stars.dx_max_part);
@@ -576,7 +654,8 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
       if (spart_is_inhibited(sp, e)) continue;
 
       /* Drift... */
-      drift_spart(sp, dt_drift, ti_old_spart, ti_current);
+      drift_spart(sp, dt_drift, ti_old_spart, ti_current, e, replication_list,
+                  c->loc);
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Make sure the particle does not drift by more than a box length. */
@@ -664,6 +743,14 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
     c->stars.ti_old_part = ti_current;
   }
 
+#ifdef WITH_LIGHTCONE
+  /* If we're at the top of the recursive hierarchy, clean up the refined
+   * replication lists */
+  if (e->lightcone_array_properties->nr_lightcones > 0 && !replication_list_in)
+    lightcone_array_free_replications(e->lightcone_array_properties,
+                                      replication_list);
+#endif
+
   /* Clear the drift flags. */
   cell_clear_flag(c, cell_flag_do_stars_drift | cell_flag_do_stars_sub_drift);
 }
@@ -675,7 +762,8 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
  * @param e The #engine (to get ti_current).
  * @param force Drift the particles irrespective of the #cell flags.
  */
-void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
+void cell_drift_bpart(struct cell *c, const struct engine *e, int force,
+                      struct replication_list *replication_list_in) {
 
   const int periodic = e->s->periodic;
   const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
@@ -713,7 +801,15 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
     return;
   }
 
-  /* Ok, we have some particles somewhere in the hierarchy to drift */
+  /* Ok, we have some particles somewhere in the hierarchy to drift
+
+     IMPORTANT: after this point we must not return without freeing the
+     replication lists if we allocated them.
+  */
+  struct replication_list *replication_list = NULL;
+#ifdef WITH_LIGHTCONE
+  replication_list = refine_replications(e, c, replication_list_in);
+#endif
 
   /* Are we not in a leaf ? */
   if (c->split && (force || cell_get_flag(c, cell_flag_do_bh_sub_drift))) {
@@ -724,7 +820,7 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
         struct cell *cp = c->progeny[k];
 
         /* Recurse */
-        cell_drift_bpart(cp, e, force);
+        cell_drift_bpart(cp, e, force, replication_list);
 
         /* Update */
         dx_max = max(dx_max, cp->black_holes.dx_max_part);
@@ -753,7 +849,7 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
       dt_drift = (ti_current - ti_old_bpart) * e->time_base;
     }
 
-    /* Loop over all the star particles in the cell */
+    /* Loop over all the black hole particles in the cell */
     const size_t nr_bparts = c->black_holes.count;
     for (size_t k = 0; k < nr_bparts; k++) {
 
@@ -764,7 +860,8 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
       if (bpart_is_inhibited(bp, e)) continue;
 
       /* Drift... */
-      drift_bpart(bp, dt_drift, ti_old_bpart, ti_current);
+      drift_bpart(bp, dt_drift, ti_old_bpart, ti_current, e, replication_list,
+                  c->loc);
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Make sure the particle does not drift by more than a box length. */
@@ -843,6 +940,14 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) {
     c->black_holes.ti_old_part = ti_current;
   }
 
+#ifdef WITH_LIGHTCONE
+  /* If we're at the top of the recursive hierarchy, clean up the refined
+   * replication lists */
+  if (e->lightcone_array_properties->nr_lightcones > 0 && !replication_list_in)
+    lightcone_array_free_replications(e->lightcone_array_properties,
+                                      replication_list);
+#endif
+
   /* Clear the drift flags. */
   cell_clear_flag(c, cell_flag_do_bh_drift | cell_flag_do_bh_sub_drift);
 }
@@ -929,7 +1034,7 @@ void cell_drift_sink(struct cell *c, const struct engine *e, int force) {
       dt_drift = (ti_current - ti_old_sink) * e->time_base;
     }
 
-    /* Loop over all the star particles in the cell */
+    /* Loop over all the sink particles in the cell */
     const size_t nr_sinks = c->sinks.count;
     for (size_t k = 0; k < nr_sinks; k++) {
 
diff --git a/src/collectgroup.c b/src/collectgroup.c
index ad080b30693f51ed211341a9d2120b788145eb34..d7530bb451094b956d672b42ae878e79430f48d9 100644
--- a/src/collectgroup.c
+++ b/src/collectgroup.c
@@ -55,6 +55,7 @@ struct mpicollectgroup1 {
   float tasks_per_cell_max;
   struct star_formation_history sfh;
   float runtime;
+  int flush_lightcone_maps;
   double deadtime;
 #ifdef WITH_CSDS
   float csds_file_size_gb;
@@ -132,6 +133,7 @@ void collectgroup1_apply(const struct collectgroup1 *grp1, struct engine *e) {
   star_formation_logger_add_to_accumulator(&e->sfh, &grp1->sfh);
 
   e->runtime = grp1->runtime;
+  e->flush_lightcone_maps = grp1->flush_lightcone_maps;
   e->global_deadtime = grp1->deadtime;
 }
 
@@ -193,6 +195,7 @@ void collectgroup1_apply(const struct collectgroup1 *grp1, struct engine *e) {
  * @param tasks_per_cell the used number of tasks per cell.
  * @param sfh The star formation history logger
  * @param runtime The runtime of rank in hours.
+ * @param flush_lightcone_maps Flag whether lightcone maps should be updated
  * @param deadtime The deadtime of rank.
  * @param csds_file_size_gb The current size of the CSDS.
  */
@@ -207,8 +210,8 @@ void collectgroup1_init(
     integertime_t ti_sinks_beg_max, integertime_t ti_black_holes_end_min,
     integertime_t ti_black_holes_beg_max, int forcerebuild,
     long long total_nr_cells, long long total_nr_tasks, float tasks_per_cell,
-    const struct star_formation_history sfh, float runtime, double deadtime,
-    float csds_file_size_gb) {
+    const struct star_formation_history sfh, float runtime,
+    int flush_lightcone_maps, double deadtime, float csds_file_size_gb) {
 
   grp1->updated = updated;
   grp1->g_updated = g_updated;
@@ -236,6 +239,7 @@ void collectgroup1_init(
   grp1->tasks_per_cell_max = tasks_per_cell;
   grp1->sfh = sfh;
   grp1->runtime = runtime;
+  grp1->flush_lightcone_maps = flush_lightcone_maps;
   grp1->deadtime = deadtime;
 #ifdef WITH_CSDS
   grp1->csds_file_size_gb = csds_file_size_gb;
@@ -282,6 +286,7 @@ void collectgroup1_reduce(struct collectgroup1 *grp1) {
   mpigrp11.tasks_per_cell_max = grp1->tasks_per_cell_max;
   mpigrp11.sfh = grp1->sfh;
   mpigrp11.runtime = grp1->runtime;
+  mpigrp11.flush_lightcone_maps = grp1->flush_lightcone_maps;
   mpigrp11.deadtime = grp1->deadtime;
 #ifdef WITH_CSDS
   mpigrp11.csds_file_size_gb = grp1->csds_file_size_gb;
@@ -319,6 +324,8 @@ void collectgroup1_reduce(struct collectgroup1 *grp1) {
   grp1->tasks_per_cell_max = mpigrp12.tasks_per_cell_max;
   grp1->sfh = mpigrp12.sfh;
   grp1->runtime = mpigrp12.runtime;
+  grp1->flush_lightcone_maps = mpigrp12.flush_lightcone_maps;
+
   grp1->deadtime = mpigrp12.deadtime;
 #ifdef WITH_CSDS
   grp1->csds_file_size_gb = mpigrp12.csds_file_size_gb;
@@ -394,6 +401,10 @@ static void doreduce1(struct mpicollectgroup1 *mpigrp11,
   /* Use the maximum runtime as the global runtime. */
   mpigrp11->runtime = max(mpigrp11->runtime, mpigrp12->runtime);
 
+  /* Lightcone maps are all updated if any need to be updated */
+  if (mpigrp11->flush_lightcone_maps || mpigrp12->flush_lightcone_maps)
+    mpigrp11->flush_lightcone_maps = 1;
+
   /* Sum the deadtime. */
   mpigrp11->deadtime += mpigrp12->deadtime;
 
diff --git a/src/collectgroup.h b/src/collectgroup.h
index 74a51a0663c52c9abad62035b35525c4a1c08bca..8c70085664c265ff38341598c4d9c4a60ca816e6 100644
--- a/src/collectgroup.h
+++ b/src/collectgroup.h
@@ -64,6 +64,9 @@ struct collectgroup1 {
   /* Global runtime of application in hours. */
   float runtime;
 
+  /* Flag to determine if lightcone maps should be updated this step */
+  int flush_lightcone_maps;
+
   /* Accumulated dead time during the step. */
   double deadtime;
 
@@ -86,8 +89,8 @@ void collectgroup1_init(
     integertime_t ti_sinks_beg_max, integertime_t ti_black_holes_end_min,
     integertime_t ti_black_holes_beg_max, int forcerebuild,
     long long total_nr_cells, long long total_nr_tasks, float tasks_per_cell,
-    const struct star_formation_history sfh, float runtime, double deadtime,
-    float csds_file_size_gb);
+    const struct star_formation_history sfh, float runtime,
+    int flush_lightcone_maps, double deadtime, float csds_file_size_gb);
 void collectgroup1_reduce(struct collectgroup1 *grp1);
 #ifdef WITH_MPI
 void mpicollect_free_MPI_type(void);
diff --git a/src/common_io.c b/src/common_io.c
index 9fc9fccd4f3985fda7c9f0258b30c89dfd4e5edc..d8a09f4f5d21d6a1f21d8008a6e43816444b51e4 100644
--- a/src/common_io.c
+++ b/src/common_io.c
@@ -38,6 +38,7 @@
 #include "black_holes_io.h"
 #include "chemistry_io.h"
 #include "cooling_io.h"
+#include "extra_io.h"
 #include "feedback.h"
 #include "fof_io.h"
 #include "gravity_io.h"
@@ -539,6 +540,7 @@ void io_write_meta_data(hid_t h_file, const struct engine* e,
       H5Gcreate(h_grp, "NamedColumns", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
   if (h_grp_columns < 0) error("Error while creating named columns group");
   entropy_floor_write_flavour(h_grp);
+  extra_io_write_flavour(h_grp, h_grp_columns);
   cooling_write_flavour(h_grp, h_grp_columns, e->cooling_func);
   chemistry_write_flavour(h_grp, h_grp_columns, e);
   tracers_write_flavour(h_grp);
@@ -1691,6 +1693,8 @@ void io_select_hydro_fields(const struct part* const parts,
   if (with_rt) {
     *num_fields += rt_write_particles(parts, list + *num_fields);
   }
+  *num_fields += extra_io_write_particles(parts, xparts, list + *num_fields,
+                                          with_cosmology);
 }
 
 /**
@@ -1793,6 +1797,8 @@ void io_select_star_fields(const struct spart* const sparts,
   if (with_rt) {
     *num_fields += rt_write_stars(sparts, list + *num_fields);
   }
+  *num_fields +=
+      extra_io_write_sparticles(sparts, list + *num_fields, with_cosmology);
 }
 
 /**
@@ -1821,4 +1827,6 @@ void io_select_bh_fields(const struct bpart* const bparts,
   if (with_stf) {
     *num_fields += velociraptor_write_bparts(bparts, list + *num_fields);
   }
+  *num_fields +=
+      extra_io_write_bparticles(bparts, list + *num_fields, with_cosmology);
 }
diff --git a/src/cosmology.c b/src/cosmology.c
index e544610321d1914b15b9e559fccad04fe658911c..0e4b9f9684da8c3ead175e80e3ca755af4538cbf 100644
--- a/src/cosmology.c
+++ b/src/cosmology.c
@@ -281,6 +281,31 @@ double gravity_kick_integrand(double a, void *param) {
   return (1. / H) * a_inv * a_inv;
 }
 
+/**
+ * @brief Computes \f$ c dt / a \f$ for the current cosmology.
+ *
+ * @param a The scale-factor of interest.
+ * @param param The current #cosmology.
+ */
+double comoving_distance_integrand(double a, void *param) {
+
+  const struct cosmology *c = (const struct cosmology *)param;
+  const double Omega_nu = cosmology_get_neutrino_density(c, a);
+  const double Omega_r = c->Omega_r + Omega_nu;
+  const double Omega_m = c->Omega_cdm + c->Omega_b;
+  const double Omega_k = c->Omega_k;
+  const double Omega_l = c->Omega_lambda;
+  const double w_0 = c->w_0;
+  const double w_a = c->w_a;
+  const double H0 = c->H0;
+  const double const_speed_light_c = c->const_speed_light_c;
+  const double a_inv = 1. / a;
+  const double E_z = E(Omega_r, Omega_m, Omega_k, Omega_l, w_0, w_a, a);
+  const double H = H0 * E_z;
+
+  return (const_speed_light_c / H) * a_inv * a_inv;
+}
+
 /**
  * @brief Computes \f$ dt / a^{3(\gamma - 1) + 1} \f$ for the current cosmology.
  *
@@ -573,6 +598,14 @@ void cosmology_init_tables(struct cosmology *c) {
                      SWIFT_STRUCT_ALIGNMENT,
                      cosmology_table_length * sizeof(double)) != 0)
     error("Failed to allocate cosmology interpolation table");
+  if (swift_memalign("cosmo.table", (void **)&c->comoving_distance_interp_table,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     cosmology_table_length * sizeof(double)) != 0)
+    error("Failed to allocate cosmology interpolation table");
+  if (swift_memalign(
+          "cosmo.table", (void **)&c->comoving_distance_inverse_interp_table,
+          SWIFT_STRUCT_ALIGNMENT, cosmology_table_length * sizeof(double)) != 0)
+    error("Failed to allocate cosmology interpolation table");
 
   /* Prepare a table of scale factors for the integral bounds */
   const double delta_a =
@@ -653,6 +686,28 @@ void cosmology_init_tables(struct cosmology *c) {
                       GSL_INTEG_GAUSS61, space, &result, &abserr);
   c->universe_age_at_present_day = result;
 
+  /* Integrate the comoving distance \int_{a_begin}^{a_table[i]} c dt/a */
+  F.function = &comoving_distance_integrand;
+  for (int i = 0; i < cosmology_table_length; i++) {
+    gsl_integration_qag(&F, a_begin, a_table[i], 0, 1.0e-10, GSL_workspace_size,
+                        GSL_INTEG_GAUSS61, space, &result, &abserr);
+
+    /* Store result */
+    c->comoving_distance_interp_table[i] = result;
+  }
+
+  /* Integrate the comoving distance \int_{a_begin}^{1.0} c dt/a */
+  F.function = &comoving_distance_integrand;
+  gsl_integration_qag(&F, a_begin, 1.0, 0, 1.0e-10, GSL_workspace_size,
+                      GSL_INTEG_GAUSS61, space, &result, &abserr);
+  c->comoving_distance_interp_table_offset = result;
+
+  /* Integrate the comoving distance \int_{a_begin}^{a_end} c dt/a */
+  F.function = &comoving_distance_integrand;
+  gsl_integration_qag(&F, a_begin, a_end, 0, 1.0e-10, GSL_workspace_size,
+                      GSL_INTEG_GAUSS61, space, &result, &abserr);
+  c->comoving_distance_start_to_end = result;
+
   /* Update the times */
   c->time_begin = cosmology_get_time_since_big_bang(c, c->a_begin);
   c->time_end = cosmology_get_time_since_big_bang(c, c->a_end);
@@ -692,6 +747,41 @@ void cosmology_init_tables(struct cosmology *c) {
     c->scale_factor_interp_table[i_time] = exp(log_a) - c->a_begin;
   }
 
+  /*
+   * Inverse comoving distance(a)
+   */
+  const double r_begin = cosmology_get_comoving_distance(c, a_begin);
+  const double r_end = cosmology_get_comoving_distance(c, a_end);
+  const double delta_r = (r_begin - r_end) / cosmology_table_length;
+
+  i_a = 0;
+  for (int i_r = 0; i_r < cosmology_table_length; i_r++) {
+
+    /* Current comoving distance from a_begin */
+    double r_interp = delta_r * (i_r + 1);
+
+    /* Find next r in comoving_distance_interp_table */
+    while (i_a < cosmology_table_length &&
+           c->comoving_distance_interp_table[i_a] <= r_interp) {
+      i_a++;
+    }
+
+    /* Find linear interpolation scaling */
+    double scale = 0;
+    if (i_a != cosmology_table_length) {
+      scale = r_interp - c->comoving_distance_interp_table[i_a - 1];
+      scale /= c->comoving_distance_interp_table[i_a] -
+               c->comoving_distance_interp_table[i_a - 1];
+    }
+
+    scale += i_a;
+
+    /* Compute interpolated scale factor */
+    double log_a = c->log_a_begin + scale * (c->log_a_end - c->log_a_begin) /
+                                        cosmology_table_length;
+    c->comoving_distance_inverse_interp_table[i_r] = exp(log_a) - c->a_begin;
+  }
+
   /* Free the workspace and temp array */
   gsl_integration_workspace_free(space);
   swift_free("cosmo.table", a_table);
@@ -798,6 +888,9 @@ void cosmology_init(struct swift_params *params, const struct unit_system *us,
   const double rho_c3_on_4sigma = c->critical_density_0 * cc * cc * cc /
                                   (4. * phys_const->const_stefan_boltzmann);
 
+  /* Store speed of light in internal units */
+  c->const_speed_light_c = phys_const->const_speed_light_c;
+
   /* Handle neutrinos only if present */
   if (c->N_ur == 0. && c->N_nu == 0) {
     /* Infer T_CMB_0 from Omega_r */
@@ -1157,6 +1250,49 @@ double cosmology_get_delta_time(const struct cosmology *c,
   return t2 - t1;
 }
 
+/**
+ * @brief Compute the comoving distance to the specified scale factor
+ *
+ * @param c The current #cosmology.
+ * @param a The scale factor
+ */
+double cosmology_get_comoving_distance(const struct cosmology *c,
+                                       const double a) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (a < c->a_begin) error("a must be >= a_begin");
+  if (a > c->a_end) error("a must be <= a_end");
+#endif
+
+  const double log_a = log(a);
+
+  /* Comoving distance from a_begin to a */
+  const double dist = interp_table(c->comoving_distance_interp_table, log_a,
+                                   c->log_a_begin, c->log_a_end);
+
+  /* Subtract dist from comoving distance from a_begin to a=1 */
+  return c->comoving_distance_interp_table_offset - dist;
+}
+
+/**
+ * @brief Compute scale factor from a comoving distance (in internal units).
+ *
+ * @param c The current #cosmology.
+ * @param r The comoving distance
+ * @return The scale factor.
+ */
+double cosmology_scale_factor_at_comoving_distance(const struct cosmology *c,
+                                                   double r) {
+
+  /* Get comoving distance from a_begin to a corresponding to input r */
+  const double r_interp = c->comoving_distance_interp_table_offset - r;
+
+  const double a =
+      interp_table(c->comoving_distance_inverse_interp_table, r_interp, 0.0,
+                   c->comoving_distance_start_to_end);
+  return a + c->a_begin;
+}
+
 /**
  * @brief Compute neutrino density parameter Omega_nu at the given scale-factor
  * This is the effective present day value, i.e. must be multiplied by (1+z)^4
@@ -1322,6 +1458,8 @@ void cosmology_clean(struct cosmology *c) {
   swift_free("cosmo.table", c->hydro_kick_corr_interp_table);
   swift_free("cosmo.table", c->time_interp_table);
   swift_free("cosmo.table", c->scale_factor_interp_table);
+  swift_free("cosmo.table", c->comoving_distance_interp_table);
+  swift_free("cosmo.table", c->comoving_distance_inverse_interp_table);
   if (c->N_nu > 0) {
     swift_free("cosmo.table", c->neutrino_density_early_table);
     swift_free("cosmo.table", c->neutrino_density_late_table);
diff --git a/src/cosmology.h b/src/cosmology.h
index 3db7a6da141f953bf565fd20c849a7c89e78870a..5336c3e24e1cbacd2494286ce4b5b6a69399eb56 100644
--- a/src/cosmology.h
+++ b/src/cosmology.h
@@ -213,6 +213,9 @@ struct cosmology {
   /*! Log of final expansion factor */
   double log_a_end;
 
+  /*! Speed of light (internal units) */
+  double const_speed_light_c;
+
   /*! Drift factor interpolation table */
   double *drift_fac_interp_table;
 
@@ -231,6 +234,18 @@ struct cosmology {
   /*! Scale factor interpolation table */
   double *scale_factor_interp_table;
 
+  /*! Comoving distance interpolation table */
+  double *comoving_distance_interp_table;
+
+  /*! Comoving distance from present day (a=1) to a_end */
+  double comoving_distance_interp_table_offset;
+
+  /*! Comoving distance from a_start to a_end */
+  double comoving_distance_start_to_end;
+
+  /*! Comoving distance inverse interpolation table */
+  double *comoving_distance_inverse_interp_table;
+
   /*! Massive neutrino density interpolation table at early times */
   double *neutrino_density_early_table;
 
@@ -279,6 +294,12 @@ double cosmology_get_timebase(struct cosmology *c,
 
 double cosmology_get_scale_factor(const struct cosmology *cosmo, double t);
 
+double cosmology_get_comoving_distance(const struct cosmology *c,
+                                       const double a);
+
+double cosmology_scale_factor_at_comoving_distance(const struct cosmology *c,
+                                                   double r);
+
 double cosmology_get_time_since_big_bang(const struct cosmology *c, double a);
 void cosmology_init(struct swift_params *params, const struct unit_system *us,
                     const struct phys_const *phys_const, struct cosmology *c);
diff --git a/src/drift.h b/src/drift.h
index e490b06bed7f9ea77bbdab82d5ab81655bfa46ee..9aba6cc765b437a354509a28ff71d180b47f0971 100644
--- a/src/drift.h
+++ b/src/drift.h
@@ -31,6 +31,8 @@
 #include "entropy_floor.h"
 #include "hydro.h"
 #include "hydro_properties.h"
+#include "lightcone/lightcone_crossing.h"
+#include "lightcone/lightcone_replications.h"
 #include "part.h"
 #include "sink.h"
 #include "stars.h"
@@ -48,7 +50,8 @@
 __attribute__((always_inline)) INLINE static void drift_gpart(
     struct gpart *restrict gp, double dt_drift, integertime_t ti_old,
     integertime_t ti_current, const struct gravity_props *grav_props,
-    const struct engine *e) {
+    const struct engine *e, struct replication_list *replication_list,
+    const double cell_loc[3]) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (gp->time_bin == time_bin_not_created) {
@@ -88,12 +91,36 @@ __attribute__((always_inline)) INLINE static void drift_gpart(
   }
 #endif
 
+#ifdef WITH_LIGHTCONE
+  /* Store initial position and velocity for lightcone check after the drift */
+  const double x[3] = {gp->x[0], gp->x[1], gp->x[2]};
+  const float v_full[3] = {gp->v_full[0], gp->v_full[1], gp->v_full[2]};
+#endif
+
   /* Drift... */
   gp->x[0] += gp->v_full[0] * dt_drift;
   gp->x[1] += gp->v_full[1] * dt_drift;
   gp->x[2] += gp->v_full[2] * dt_drift;
 
   gravity_predict_extra(gp, grav_props);
+
+#ifdef WITH_LIGHTCONE
+  /* Check for lightcone crossing */
+  switch (gp->type) {
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background:
+    case swift_type_neutrino:
+      /* This particle has no *part counterpart, so check for lightcone crossing
+       * here */
+      lightcone_check_particle_crosses(e, replication_list, x, v_full, gp,
+                                       dt_drift, ti_old, ti_current, cell_loc);
+      break;
+    default:
+      /* Particle has a counterpart or is of a type not supported in lightcones
+       */
+      break;
+  }
+#endif
 }
 
 /**
@@ -114,9 +141,12 @@ __attribute__((always_inline)) INLINE static void drift_gpart(
 __attribute__((always_inline)) INLINE static void drift_part(
     struct part *restrict p, struct xpart *restrict xp, double dt_drift,
     double dt_kick_hydro, double dt_kick_grav, double dt_therm,
-    integertime_t ti_old, integertime_t ti_current,
-    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
-    const struct entropy_floor_properties *floor) {
+    integertime_t ti_old, integertime_t ti_current, const struct engine *e,
+    struct replication_list *replication_list, const double cell_loc[3]) {
+
+  const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct entropy_floor_properties *floor = e->entropy_floor;
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (p->ti_drift != ti_old)
@@ -143,6 +173,12 @@ __attribute__((always_inline)) INLINE static void drift_part(
   }
 #endif
 
+#ifdef WITH_LIGHTCONE
+  /* Store initial position and velocity for lightcone check after the drift */
+  const double x[3] = {p->x[0], p->x[1], p->x[2]};
+  const float v_full[3] = {xp->v_full[0], xp->v_full[1], xp->v_full[2]};
+#endif
+
   /* Drift... */
   p->x[0] += xp->v_full[0] * dt_drift;
   p->x[1] += xp->v_full[1] * dt_drift;
@@ -179,6 +215,13 @@ __attribute__((always_inline)) INLINE static void drift_part(
     p->v[2] = 0.f;
   }
 #endif
+
+#ifdef WITH_LIGHTCONE
+  /* Check if the particle crossed the lightcone */
+  if (p->gpart)
+    lightcone_check_particle_crosses(e, replication_list, x, v_full, p->gpart,
+                                     dt_drift, ti_old, ti_current, cell_loc);
+#endif
 }
 
 /**
@@ -191,7 +234,8 @@ __attribute__((always_inline)) INLINE static void drift_part(
  */
 __attribute__((always_inline)) INLINE static void drift_spart(
     struct spart *restrict sp, double dt_drift, integertime_t ti_old,
-    integertime_t ti_current) {
+    integertime_t ti_current, const struct engine *e,
+    struct replication_list *replication_list, const double cell_loc[3]) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (sp->ti_drift != ti_old)
@@ -219,6 +263,12 @@ __attribute__((always_inline)) INLINE static void drift_spart(
   }
 #endif
 
+#ifdef WITH_LIGHTCONE
+  /* Store initial position and velocity for lightcone check after the drift */
+  const double x[3] = {sp->x[0], sp->x[1], sp->x[2]};
+  const float v_full[3] = {sp->v[0], sp->v[1], sp->v[2]};
+#endif
+
   /* Drift... */
   sp->x[0] += sp->v[0] * dt_drift;
   sp->x[1] += sp->v[1] * dt_drift;
@@ -233,6 +283,13 @@ __attribute__((always_inline)) INLINE static void drift_spart(
     sp->x_diff[k] -= dx;
     sp->x_diff_sort[k] -= dx;
   }
+
+#ifdef WITH_LIGHTCONE
+  /* Check for lightcone crossing */
+  if (sp->gpart)
+    lightcone_check_particle_crosses(e, replication_list, x, v_full, sp->gpart,
+                                     dt_drift, ti_old, ti_current, cell_loc);
+#endif
 }
 
 /**
@@ -245,7 +302,8 @@ __attribute__((always_inline)) INLINE static void drift_spart(
  */
 __attribute__((always_inline)) INLINE static void drift_bpart(
     struct bpart *restrict bp, double dt_drift, integertime_t ti_old,
-    integertime_t ti_current) {
+    integertime_t ti_current, const struct engine *e,
+    struct replication_list *replication_list, const double cell_loc[3]) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (bp->ti_drift != ti_old)
@@ -273,6 +331,12 @@ __attribute__((always_inline)) INLINE static void drift_bpart(
   }
 #endif
 
+#ifdef WITH_LIGHTCONE
+  /* Store initial position and velocity for lightcone check after the drift */
+  const double x[3] = {bp->x[0], bp->x[1], bp->x[2]};
+  const float v_full[3] = {bp->v[0], bp->v[1], bp->v[2]};
+#endif
+
   /* Drift... */
   bp->x[0] += bp->v[0] * dt_drift;
   bp->x[1] += bp->v[1] * dt_drift;
@@ -286,6 +350,13 @@ __attribute__((always_inline)) INLINE static void drift_bpart(
     const float dx = bp->v[k] * dt_drift;
     bp->x_diff[k] -= dx;
   }
+
+#ifdef WITH_LIGHTCONE
+  /* Check for lightcone crossing */
+  if (bp->gpart)
+    lightcone_check_particle_crosses(e, replication_list, x, v_full, bp->gpart,
+                                     dt_drift, ti_old, ti_current, cell_loc);
+#endif
 }
 
 /**
diff --git a/src/engine.c b/src/engine.c
index 6b9e7b094fa21a302b0ebaa725dc93a4a982f5c4..b3c8a4df82f166e6893c47b2b80f0ec1292b566f 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -67,11 +67,14 @@
 #include "debug.h"
 #include "equation_of_state.h"
 #include "error.h"
+#include "extra_io.h"
 #include "feedback.h"
 #include "fof.h"
 #include "gravity.h"
 #include "gravity_cache.h"
 #include "hydro.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_array.h"
 #include "line_of_sight.h"
 #include "map.h"
 #include "memuse.h"
@@ -2198,6 +2201,13 @@ void engine_step(struct engine *e) {
     e->time_step = (e->ti_current - e->ti_old) * e->time_base;
   }
 
+#ifdef WITH_LIGHTCONE
+  /* Determine which periodic replications could contribute to the lightcone
+     during this time step */
+  lightcone_array_prepare_for_step(e->lightcone_array_properties, e->cosmology,
+                                   e->ti_earliest_undrifted, e->ti_current);
+#endif
+
   /*****************************************************/
   /* OK, we now know what the next end of time-step is */
   /*****************************************************/
@@ -2453,6 +2463,14 @@ void engine_step(struct engine *e) {
   e->sink_updates_since_rebuild += e->collect_group1.sink_updated;
   e->b_updates_since_rebuild += e->collect_group1.b_updated;
 
+  /* Check if we updated all of the particles on this step */
+  if ((e->collect_group1.updated == e->total_nr_parts) &&
+      (e->collect_group1.g_updated == e->total_nr_gparts) &&
+      (e->collect_group1.s_updated == e->total_nr_sparts) &&
+      (e->collect_group1.sink_updated == e->total_nr_sinks) &&
+      (e->collect_group1.b_updated == e->total_nr_bparts))
+    e->ti_earliest_undrifted = e->ti_current;
+
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that all cells have correct time-step information */
   space_check_timesteps(e->s);
@@ -2467,9 +2485,18 @@ void engine_step(struct engine *e) {
             e->collect_group1.csds_file_size_gb);
 #endif
 
-  /********************************************************/
-  /* OK, we are done with the regular stuff. Time for i/o */
-  /********************************************************/
+    /********************************************************/
+    /* OK, we are done with the regular stuff. Time for i/o */
+    /********************************************************/
+
+#ifdef WITH_LIGHTCONE
+  /* Flush lightcone buffers if necessary */
+  const int flush = e->flush_lightcone_maps;
+  lightcone_array_flush(e->lightcone_array_properties, &(e->threadpool),
+                        e->cosmology, e->internal_units, e->snapshot_units,
+                        /*flush_map_updates=*/flush, /*flush_particles=*/0,
+                        /*end_file=*/0, /*dump_all_shells=*/0);
+#endif
 
   /* Create a restart file if needed. */
   engine_dump_restarts(e, 0, e->restart_onexit && engine_is_done(e));
@@ -2800,8 +2827,10 @@ void engine_numa_policies(int rank, int verbose) {
  * @param cooling_func The properties of the cooling function.
  * @param starform The #star_formation model of this run.
  * @param chemistry The chemistry information.
+ * @param io_extra_props The properties needed for the extra i/o fields.
  * @param fof_properties The #fof_props of this run.
  * @param los_properties the #los_props of this run.
+ * @param lightcone_array_properties the #lightcone_array_props of this run.
  * @param ics_metadata metadata read from the simulation ICs
  */
 void engine_init(
@@ -2822,7 +2851,9 @@ void engine_init(
     struct cooling_function_data *cooling_func,
     const struct star_formation *starform,
     const struct chemistry_global_data *chemistry,
+    struct extra_io_properties *io_extra_props,
     struct fof_props *fof_properties, struct los_props *los_properties,
+    struct lightcone_array_props *lightcone_array_properties,
     struct ic_info *ics_metadata) {
 
   struct clocks_time tic, toc;
@@ -2847,6 +2878,7 @@ void engine_init(
   e->reparttype = reparttype;
   e->ti_old = 0;
   e->ti_current = 0;
+  e->ti_earliest_undrifted = 0;
   e->time_step = 0.;
   e->time_base = 0.;
   e->time_base_inv = 0.;
@@ -2940,11 +2972,13 @@ void engine_init(
   e->feedback_props = feedback;
   e->rt_props = rt;
   e->chemistry = chemistry;
+  e->io_extra_props = io_extra_props;
   e->fof_properties = fof_properties;
   e->parameter_file = params;
   e->output_options = output_options;
   e->stf_this_timestep = 0;
   e->los_properties = los_properties;
+  e->lightcone_array_properties = lightcone_array_properties;
   e->ics_metadata = ics_metadata;
 #ifdef WITH_MPI
   e->usertime_last_step = 0.0;
@@ -3376,10 +3410,12 @@ void engine_clean(struct engine *e, const int fof, const int restart) {
     free((void *)e->cooling_func);
     free((void *)e->star_formation);
     free((void *)e->feedback_props);
+    free((void *)e->io_extra_props);
 #ifdef WITH_FOF
     free((void *)e->fof_properties);
 #endif
     free((void *)e->los_properties);
+    free((void *)e->lightcone_array_properties);
     free((void *)e->ics_metadata);
 #ifdef WITH_MPI
     free((void *)e->reparttype);
@@ -3437,10 +3473,12 @@ void engine_struct_dump(struct engine *e, FILE *stream) {
   neutrino_struct_dump(e->neutrino_properties, stream);
   neutrino_response_struct_dump(e->neutrino_response, stream);
   chemistry_struct_dump(e->chemistry, stream);
+  extra_io_struct_dump(e->io_extra_props, stream);
 #ifdef WITH_FOF
   fof_struct_dump(e->fof_properties, stream);
 #endif
   los_struct_dump(e->los_properties, stream);
+  lightcone_array_struct_dump(e->lightcone_array_properties, stream);
   ic_info_struct_dump(e->ics_metadata, stream);
   parser_struct_dump(e->parameter_file, stream);
   output_options_struct_dump(e->output_options, stream);
@@ -3582,6 +3620,11 @@ void engine_struct_restore(struct engine *e, FILE *stream) {
   chemistry_struct_restore(chemistry, stream);
   e->chemistry = chemistry;
 
+  struct extra_io_properties *extra_io_props =
+      (struct extra_io_properties *)malloc(sizeof(struct extra_io_properties));
+  extra_io_struct_restore(extra_io_props, stream);
+  e->io_extra_props = extra_io_props;
+
 #ifdef WITH_FOF
   struct fof_props *fof_props =
       (struct fof_props *)malloc(sizeof(struct fof_props));
@@ -3594,6 +3637,12 @@ void engine_struct_restore(struct engine *e, FILE *stream) {
   los_struct_restore(los_properties, stream);
   e->los_properties = los_properties;
 
+  struct lightcone_array_props *lightcone_array_properties =
+      (struct lightcone_array_props *)malloc(
+          sizeof(struct lightcone_array_props));
+  lightcone_array_struct_restore(lightcone_array_properties, stream);
+  e->lightcone_array_properties = lightcone_array_properties;
+
   struct ic_info *ics_metadata =
       (struct ic_info *)malloc(sizeof(struct ic_info));
   ic_info_struct_restore(ics_metadata, stream);
diff --git a/src/engine.h b/src/engine.h
index 68d91177edb3aea8ad186a469919d5c30e1f31d9..f60ea3e0f7ca477ed32e504534d7250e4672f314 100644
--- a/src/engine.h
+++ b/src/engine.h
@@ -37,6 +37,8 @@
 #include "clocks.h"
 #include "collectgroup.h"
 #include "ic_info.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_array.h"
 #include "mesh_gravity.h"
 #include "output_options.h"
 #include "parser.h"
@@ -49,6 +51,7 @@
 #include "velociraptor_interface.h"
 
 struct black_holes_properties;
+struct extra_io_properties;
 struct external_potential;
 
 /**
@@ -182,6 +185,9 @@ struct engine {
   double time;
   integertime_t ti_current;
 
+  /* The earliest time any particle may still need to be drifted from */
+  integertime_t ti_earliest_undrifted;
+
   /* The highest active bin at this time */
   timebin_t max_active_bin;
 
@@ -499,6 +505,9 @@ struct engine {
   /* Properties of the chemistry model */
   const struct chemistry_global_data *chemistry;
 
+  /* Properties used to compute the extra i/o fields */
+  struct extra_io_properties *io_extra_props;
+
   /*! The FOF properties data. */
   struct fof_props *fof_properties;
 
@@ -564,6 +573,9 @@ struct engine {
   /* Line of sight properties. */
   struct los_props *los_properties;
 
+  /* Line of sight properties. */
+  struct lightcone_array_props *lightcone_array_properties;
+
   /* Line of sight outputs information. */
   struct output_list *output_list_los;
   double a_first_los;
@@ -572,6 +584,9 @@ struct engine {
   integertime_t ti_next_los;
   int los_output_count;
 
+  /* Lightcone information */
+  int flush_lightcone_maps;
+
 #ifdef SWIFT_GRAVITY_FORCE_CHECKS
   /* Run brute force checks only on steps when all gparts active? */
   int force_checks_only_all_active;
@@ -626,7 +641,9 @@ void engine_init(
     struct cooling_function_data *cooling_func,
     const struct star_formation *starform,
     const struct chemistry_global_data *chemistry,
+    struct extra_io_properties *io_extra_props,
     struct fof_props *fof_properties, struct los_props *los_properties,
+    struct lightcone_array_props *lightcone_array_properties,
     struct ic_info *ics_metadata);
 void engine_config(int restart, int fof, struct engine *e,
                    struct swift_params *params, int nr_nodes, int nodeID,
diff --git a/src/engine_collect_end_of_step.c b/src/engine_collect_end_of_step.c
index 9f96556463b0d1895d4cd643435a591a6a7dd673..4eacabbe3d9736ca7db42b4923cad269fb550cb7 100644
--- a/src/engine_collect_end_of_step.c
+++ b/src/engine_collect_end_of_step.c
@@ -27,6 +27,7 @@
 
 /* Local headers. */
 #include "active.h"
+#include "lightcone/lightcone_array.h"
 #include "star_formation_logger.h"
 #include "timeline.h"
 
@@ -45,6 +46,7 @@ struct end_of_step_data {
   struct engine *e;
   struct star_formation_history sfh;
   float runtime;
+  int flush_lightcone_maps;
   double deadtime;
   float csds_file_size_gb;
 };
@@ -219,6 +221,11 @@ void engine_collect_end_of_step(struct engine *e, int apply) {
   /* Need to use a consistent check of the hours since we started. */
   data.runtime = clocks_get_hours_since_start();
 
+  /* Get flag to determine if lightcone maps buffers should be flushed on this
+   * step */
+  data.flush_lightcone_maps =
+      lightcone_array_trigger_map_update(e->lightcone_array_properties);
+
   data.deadtime = e->local_deadtime;
 
   /* Initialize the total SFH of the simulation to zero */
@@ -247,7 +254,8 @@ void engine_collect_end_of_step(struct engine *e, int apply) {
       data.ti_sinks_end_min, data.ti_sinks_beg_max, data.ti_black_holes_end_min,
       data.ti_black_holes_beg_max, e->forcerebuild, e->s->tot_cells,
       e->sched.nr_tasks, (float)e->sched.nr_tasks / (float)e->s->tot_cells,
-      data.sfh, data.runtime, data.deadtime, data.csds_file_size_gb);
+      data.sfh, data.runtime, data.flush_lightcone_maps, data.deadtime,
+      data.csds_file_size_gb);
 
 /* Aggregate collective data from the different nodes for this step. */
 #ifdef WITH_MPI
diff --git a/src/engine_drift.c b/src/engine_drift.c
index ccabb07575afc638e8162d96516c380619d95422..61e64b0913ef1b16c35ecf8a7bd07624366f86f0 100644
--- a/src/engine_drift.c
+++ b/src/engine_drift.c
@@ -27,6 +27,7 @@
 
 /* This object's header. */
 #include "engine.h"
+#include "lightcone/lightcone_array.h"
 
 /**
  * @brief Mapper function to drift *all* the #part to the current time.
@@ -71,7 +72,7 @@ void engine_do_drift_all_part_mapper(void *map_data, int num_elements,
     if (c->nodeID == e->nodeID) {
 
       /* Drift all the particles */
-      cell_drift_part(c, e, /* force the drift=*/1);
+      cell_drift_part(c, e, /* force the drift=*/1, NULL);
     }
   }
 }
@@ -119,7 +120,7 @@ void engine_do_drift_all_gpart_mapper(void *map_data, int num_elements,
     if (c->nodeID == e->nodeID) {
 
       /* Drift all the particles */
-      cell_drift_gpart(c, e, /* force the drift=*/1);
+      cell_drift_gpart(c, e, /* force the drift=*/1, /*replication_list=*/NULL);
     }
   }
 }
@@ -167,7 +168,7 @@ void engine_do_drift_all_spart_mapper(void *map_data, int num_elements,
     if (c->nodeID == e->nodeID) {
 
       /* Drift all the particles */
-      cell_drift_spart(c, e, /* force the drift=*/1);
+      cell_drift_spart(c, e, /* force the drift=*/1, NULL);
     }
   }
 }
@@ -215,7 +216,7 @@ void engine_do_drift_all_bpart_mapper(void *map_data, int num_elements,
     if (c->nodeID == e->nodeID) {
 
       /* Drift all the particles */
-      cell_drift_bpart(c, e, /* force the drift=*/1);
+      cell_drift_bpart(c, e, /* force the drift=*/1, NULL);
     }
   }
 }
@@ -334,6 +335,13 @@ void engine_drift_all(struct engine *e, const int drift_mpoles) {
   }
 #endif
 
+#ifdef WITH_LIGHTCONE
+  /* Determine which periodic replications could contribute to the lightcone
+     during this time step */
+  lightcone_array_prepare_for_step(e->lightcone_array_properties, e->cosmology,
+                                   e->ti_earliest_undrifted, e->ti_current);
+#endif
+
   if (!e->restarting) {
 
     /* Normal case: We have a list of local cells with tasks to play with */
@@ -421,9 +429,21 @@ void engine_drift_all(struct engine *e, const int drift_mpoles) {
                     e->verbose);
 #endif
 
+  /* All particles have now been drifted to ti_current */
+  e->ti_earliest_undrifted = e->ti_current;
+
   if (e->verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
             clocks_getunit());
+
+#ifdef WITH_LIGHTCONE
+  /* Drifting all of the particles can cause many particles to cross
+     the lightcone, so flush buffers now to reduce peak memory use . */
+  lightcone_array_flush(e->lightcone_array_properties, &e->threadpool,
+                        e->cosmology, e->internal_units, e->snapshot_units,
+                        /*flush_map_updates=*/1, /*flush_particles=*/1,
+                        /*end_file=*/0, /*dump_all_shells=*/0);
+#endif
 }
 
 /**
diff --git a/src/engine_io.c b/src/engine_io.c
index 21aa99669b8d615c8524048e11ff24424399b929..334b68123943e3b7728445aa68751c1044e0a7d2 100644
--- a/src/engine_io.c
+++ b/src/engine_io.c
@@ -34,6 +34,8 @@
 #include "csds_io.h"
 #include "distributed_io.h"
 #include "kick.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_array.h"
 #include "line_of_sight.h"
 #include "parallel_io.h"
 #include "serial_io.h"
@@ -79,6 +81,17 @@ void engine_dump_restarts(struct engine *e, int drifted_all, int force) {
       /* Drift all particles first (may have just been done). */
       if (!drifted_all) engine_drift_all(e, /*drift_mpole=*/1);
 
+#ifdef WITH_LIGHTCONE
+      /* Flush lightcone buffers before dumping restarts */
+      lightcone_array_flush(e->lightcone_array_properties, &(e->threadpool),
+                            e->cosmology, e->internal_units, e->snapshot_units,
+                            /*flush_map_updates=*/1, /*flush_particles=*/1,
+                            /*end_file=*/1, /*dump_all_shells=*/0);
+#ifdef WITH_MPI
+      MPI_Barrier(MPI_COMM_WORLD);
+#endif
+#endif
+      
         /* Free the foreign particles to get more breathing space. */
 #ifdef WITH_MPI
       if (e->free_foreign_when_dumping_restart)
diff --git a/src/exchange_structs.c b/src/exchange_structs.c
new file mode 100644
index 0000000000000000000000000000000000000000..46e630da0797e3ce56152a58e4639bb2ffe13547
--- /dev/null
+++ b/src/exchange_structs.c
@@ -0,0 +1,126 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Standard headers */
+#include <limits.h>
+#include <stdlib.h>
+
+/* Local headers */
+#include "error.h"
+
+/**
+ * @brief Given an array of structs of size element_size, send
+ * nr_send[i] elements to each node i. Allocates the receive
+ * buffer recvbuf to the appropriate size and returns its size
+ * in nr_recv_tot.
+ *
+ * @param nr_send Number of elements to send to each node
+ * @param nr_recv Number of elements to receive from each node
+ * @param sendbuf The elements to send
+ * @param recvbuf The output buffer
+ *
+ */
+void exchange_structs(size_t *nr_send, void *sendbuf, size_t *nr_recv,
+                      void *recvbuf, size_t element_size) {
+
+#ifdef WITH_MPI
+
+  /* Determine rank, number of ranks */
+  int nr_nodes, nodeID;
+  MPI_Comm_size(MPI_COMM_WORLD, &nr_nodes);
+  MPI_Comm_rank(MPI_COMM_WORLD, &nodeID);
+
+  /* Compute send offsets */
+  size_t *send_offset = malloc(nr_nodes * sizeof(size_t));
+  send_offset[0] = 0;
+  for (int i = 1; i < nr_nodes; i += 1) {
+    send_offset[i] = send_offset[i - 1] + nr_send[i - 1];
+  }
+
+  /* Compute receive offsets */
+  size_t *recv_offset = malloc(nr_nodes * sizeof(size_t));
+  recv_offset[0] = 0;
+  for (int i = 1; i < nr_nodes; i += 1) {
+    recv_offset[i] = recv_offset[i - 1] + nr_recv[i - 1];
+  }
+
+  /* Allocate request objects (one send and receive per node) */
+  MPI_Request *request = malloc(2 * sizeof(MPI_Request) * nr_nodes);
+
+  /* Make type to communicate the struct */
+  MPI_Datatype value_mpi_type;
+  if (MPI_Type_contiguous(element_size, MPI_BYTE, &value_mpi_type) !=
+          MPI_SUCCESS ||
+      MPI_Type_commit(&value_mpi_type) != MPI_SUCCESS) {
+    error("Failed to create MPI type for struct to exchange.");
+  }
+
+  /*
+   * Post the send operations. This is an alltoallv really but
+   * we want to avoid the limits imposed by int counts and offsets
+   * in MPI_Alltoallv.
+   */
+  for (int i = 0; i < nr_nodes; i += 1) {
+    if (nr_send[i] > 0) {
+
+      /* TODO: handle very large messages */
+      if (nr_send[i] > INT_MAX)
+        error("exchange_structs() fails if nr_send > INT_MAX!");
+
+      char *buf = (char *)sendbuf;
+      MPI_Isend(&(buf[send_offset[i] * element_size]), (int)nr_send[i],
+                value_mpi_type, i, 0, MPI_COMM_WORLD, &(request[i]));
+    } else {
+      request[i] = MPI_REQUEST_NULL;
+    }
+  }
+
+  /* Post the receives */
+  for (int i = 0; i < nr_nodes; i += 1) {
+    if (nr_recv[i] > 0) {
+
+      /* TODO: handle very large messages */
+      if (nr_recv[i] > INT_MAX)
+        error("exchange_structs() fails if nr_recv > INT_MAX!");
+
+      char *buf = (char *)recvbuf;
+      MPI_Irecv(&(buf[recv_offset[i] * element_size]), (int)nr_recv[i],
+                value_mpi_type, i, 0, MPI_COMM_WORLD, &(request[i + nr_nodes]));
+    } else {
+      request[i + nr_nodes] = MPI_REQUEST_NULL;
+    }
+  }
+
+  /* Wait for everything to complete */
+  MPI_Waitall(2 * nr_nodes, request, MPI_STATUSES_IGNORE);
+
+  /* Done with the MPI type */
+  MPI_Type_free(&value_mpi_type);
+
+  /* Tidy up */
+  free(recv_offset);
+  free(send_offset);
+  free(request);
+#else
+  error("should only be called in MPI mode");
+#endif
+}
diff --git a/src/exchange_structs.h b/src/exchange_structs.h
new file mode 100644
index 0000000000000000000000000000000000000000..1266978987d1849cd4e1cce5c2a4680ff3926f73
--- /dev/null
+++ b/src/exchange_structs.h
@@ -0,0 +1,26 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_EXCHANGE_STRUCTS_H
+#define SWIFT_EXCHANGE_STRUCTS_H
+
+void exchange_structs(size_t *nr_send, void *sendbuf, size_t *nr_recv,
+                      void *recvbuf, size_t element_size);
+
+#endif
diff --git a/src/extra_io.h b/src/extra_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..3c4f4e51a82948186c7e823c42a9220cb3061f6c
--- /dev/null
+++ b/src/extra_io.h
@@ -0,0 +1,88 @@
+
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2021 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EXTRA_IO_H
+#define SWIFT_EXTRA_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the i/o routines the user asked for */
+#if defined(EXTRA_IO_EAGLE)
+#include "extra_io/EAGLE/extra_io.h"
+#elif defined(EXTRA_IO_NONE)
+
+struct extra_io_properties {};
+
+INLINE static int extra_io_write_particles(const struct part* parts,
+                                           const struct xpart* xparts,
+                                           struct io_props* list,
+                                           const int with_cosmology) {
+  return 0;
+}
+
+INLINE static int extra_io_write_sparticles(const struct spart* sparts,
+                                            struct io_props* list,
+                                            const int with_cosmology) {
+  return 0;
+}
+
+INLINE static int extra_io_write_bparticles(const struct bpart* bparts,
+                                            struct io_props* list,
+                                            const int with_cosmology) {
+  return 0;
+}
+
+#ifdef HAVE_HDF5
+INLINE static void extra_io_write_flavour(hid_t h_grp, hid_t h_grp_columns) {}
+#endif
+
+INLINE static void extra_io_init(struct swift_params* parameter_file,
+                                 const struct unit_system* us,
+                                 const struct phys_const* phys_const,
+                                 const struct cosmology* cosmo,
+                                 struct extra_io_properties* props) {}
+
+INLINE static void extra_io_clean(struct extra_io_properties* props) {}
+
+INLINE static void extra_io_struct_dump(const struct extra_io_properties* props,
+                                        FILE* stream) {}
+
+INLINE static void extra_io_struct_restore(struct extra_io_properties* props,
+                                           FILE* stream) {}
+
+/* In this case there are no extra lightcone map types */
+static const struct lightcone_map_type extra_lightcone_map_types[] = {
+    {
+        .name = "",
+        .update_map = NULL,
+        .ptype_contributes = NULL,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+};
+
+#else
+#error "Invalid choice of extra-i/o."
+#endif
+
+#endif /* SWIFT_EXTRA_IO_H */
diff --git a/src/extra_io/EAGLE/extra.h b/src/extra_io/EAGLE/extra.h
new file mode 100644
index 0000000000000000000000000000000000000000..dff7adc3f5451ced55427c60b6a33e19eaaea569
--- /dev/null
+++ b/src/extra_io/EAGLE/extra.h
@@ -0,0 +1,935 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2021 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EXTRA_EAGLE_H
+#define SWIFT_EXTRA_EAGLE_H
+
+#include "chemistry.h"
+#include "cooling.h"
+#include "engine.h"
+
+#define xray_table_date_string 20210713
+
+#define xray_emission_N_temperature 46
+#define xray_emission_N_density 71
+#define xray_emission_N_helium 10
+#define xray_emission_N_element 10
+#define xray_emission_N_redshift 46
+
+/**
+ * @brief X-ray bands available in the interpolation tables
+ */
+enum xray_band_types {
+  xray_band_types_erosita_low_intrinsic_photons,   /*< eROSITA 0.2 - 2.3 keV */
+  xray_band_types_erosita_high_intrinsic_photons,  /*< eROSITA 2.3 - 8.0 keV */
+  xray_band_types_ROSAT_intrinsic_photons,         /*< ROSAT 0.5 - 2.0 keV */
+  xray_band_types_erosita_low_intrinsic_energies,  /*< eROSITA 0.2 - 2.3 keV */
+  xray_band_types_erosita_high_intrinsic_energies, /*< eROSITA 2.3 - 8.0 keV */
+  xray_band_types_ROSAT_intrinsic_energies,        /*< ROSAT 0.5 - 2.0 keV */
+  xray_band_types_count
+};
+
+/**
+ * @brief The general properties required for the extra i/o fields.
+ */
+struct extra_io_properties {
+
+  struct xray_properties {
+
+    /* Element masses for the chemistry elements (cgs) */
+    float *element_mass;
+
+    /* Temperature bins from xray table (cgs) */
+    float *Temperatures;
+
+    /* Density bins from xray table (physical cgs) */
+    float *Densities;
+
+    /* Helium fraction bins from xray table */
+    float *He_bins;
+
+    /* Redshift bins from xray table */
+    float *Redshifts;
+
+    /* Solar metallicites from xray table */
+    float *Solar_metallicity;
+
+    /* Log of solar metallicites from xray table */
+    float *Log10_solar_metallicity;
+
+    /* Integrated photon emissivity in the erosita-low band (0.2-2.3 keV)
+     * (physical) */
+    float *emissivity_erosita_low_intrinsic_photons;
+
+    /* Integrated photon emissivity in the erosita-high band (2.3-8.0 keV)
+     * (physical) */
+    float *emissivity_erosita_high_intrinsic_photons;
+
+    /* Integrated photon emissivity in the ROSAT band (0.5-2.0 keV) (physical)
+     */
+    float *emissivity_ROSAT_intrinsic_photons;
+
+    /* Integrated emissivity in the erosita-low band (0.2-2.3 keV)
+     * (physical) */
+    float *emissivity_erosita_low_intrinsic_energies;
+
+    /* Integrated emissivity in the erosita-high band (2.3-8.0 keV)
+     * (physical) */
+    float *emissivity_erosita_high_intrinsic_energies;
+
+    /* Integrated emissivity in the ROSAT band (0.5-2.0 keV) (physical)
+     */
+    float *emissivity_ROSAT_intrinsic_energies;
+
+    /* Path to the xray table */
+    char xray_table_path[500];
+
+    /* Photon emissivity unit conversion factor */
+    double xray_photon_emissivity_unit_conversion;
+
+    /* Energy emissivity unit conversion factor */
+    double xray_energy_emissivity_unit_conversion;
+
+  } xray_data;
+};
+
+/**
+ * @brief Reads in xray table header. Consists of tables
+ * of values for temperature, hydrogen number density, helium fraction,
+ * solar metallicity, redshifts, and element masses.
+ *
+ * @param xrays Xray data structure
+ * @param fname Xray table path
+ */
+INLINE static void read_xray_header(struct xray_properties *xrays,
+                                    const char *fname) {
+
+  hid_t tempfile_id = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+  if (tempfile_id < 0) error("unable to open file %s\n", fname);
+
+  /* Check whether the correct table version is being used */
+  int datestring;
+
+  hid_t dataset = H5Dopen(tempfile_id, "Date_String", H5P_DEFAULT);
+  herr_t status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+                          H5P_DEFAULT, &datestring);
+  if (status < 0) printf("error reading the date string");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+  if (datestring != xray_table_date_string)
+    error(
+        "The table and code version do not match, please use table version %i",
+        xray_table_date_string);
+
+  /* Read temperature bins */
+  if (posix_memalign((void **)&xrays->Temperatures, SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_temperature * sizeof(float)) != 0)
+    error("Failed to allocate temperatures array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/Temperature_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->Temperatures);
+  if (status < 0) printf("error reading temperatures");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Read density bins */
+  if (posix_memalign((void **)&xrays->Densities, SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_density * sizeof(float)) != 0)
+    error("Failed to allocate densities array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/Density_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->Densities);
+  if (status < 0) printf("error reading densities");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Read Helium bins */
+  if (posix_memalign((void **)&xrays->He_bins, SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_helium * sizeof(float)) != 0)
+    error("Failed to allocate He_bins array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/He_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->He_bins);
+  if (status < 0) printf("error reading Helium massfractions");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Read solar metallicity */
+  if (posix_memalign((void **)&xrays->Log10_solar_metallicity,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     chemistry_element_count * sizeof(float)) != 0)
+    error("Failed to allocate Solar_metallicity array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/Solar_metallicities", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->Log10_solar_metallicity);
+  if (status < 0) printf("error reading solar metalicities");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Get Solar metallicities from log solar metallicities */
+  if (posix_memalign((void **)&xrays->Solar_metallicity, SWIFT_STRUCT_ALIGNMENT,
+                     chemistry_element_count * sizeof(float)) != 0)
+    error("Failed to allocate Solar_metallicity array\n");
+
+  for (int i = 0; i < chemistry_element_count; ++i)
+    xrays->Solar_metallicity[i] = exp10f(xrays->Log10_solar_metallicity[i]);
+
+  /* Read redshift bins */
+  if (posix_memalign((void **)&xrays->Redshifts, SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * sizeof(float)) != 0)
+    error("Failed to allocate Redshifts array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/Redshift_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->Redshifts);
+  if (status < 0) printf("error reading redshift bins");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Read element mass */
+  if (posix_memalign((void **)&xrays->element_mass, SWIFT_STRUCT_ALIGNMENT,
+                     10 * sizeof(float)) != 0)
+    error("Failed to allocate element_mass array\n");
+
+  dataset = H5Dopen(tempfile_id, "Bins/Element_masses", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->element_mass);
+  if (status < 0) printf("error reading element masses");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+}
+
+/**
+ * @brief Reads in xray table. Consists of tables
+ * of values for xray emissivities in different bands.
+ * We read the erosita-low, erosita-high and ROSAT band
+ * in their intrinsic forms (no multiplication with response function)
+ *
+ * @param xrays Xray data structure
+ * @param fname Xray table path
+ */
+INLINE static void read_xray_table(struct xray_properties *xrays,
+                                   const char *fname) {
+
+  /* Open File */
+  hid_t file_id = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+
+  // erosita-low intrinsic photons
+  if (swift_memalign("xrays_table_erosita_low_photons",
+                     (void **)&xrays->emissivity_erosita_low_intrinsic_photons,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * xray_emission_N_helium *
+                         xray_emission_N_element * xray_emission_N_temperature *
+                         xray_emission_N_density * sizeof(float)) != 0)
+    error(
+        "Failed to allocate xray emissivity_erosita_low_intrinsic_photons "
+        "array\n");
+
+  /* Read full table */
+  hid_t dataset =
+      H5Dopen(file_id, "erosita-low/photons_intrinsic", H5P_DEFAULT);
+  herr_t status =
+      H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+              xrays->emissivity_erosita_low_intrinsic_photons);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* erosita-high intrinsic photons */
+  if (swift_memalign("xrays_table_erosita_high_photons",
+                     (void **)&xrays->emissivity_erosita_high_intrinsic_photons,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * xray_emission_N_helium *
+                         xray_emission_N_element * xray_emission_N_temperature *
+                         xray_emission_N_density * sizeof(float)) != 0)
+    error(
+        "Failed to allocate xray emissivity_erosita_high_intrinsic_photons "
+        "array\n");
+
+  /* Read full table */
+  dataset = H5Dopen(file_id, "erosita-high/photons_intrinsic", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->emissivity_erosita_high_intrinsic_photons);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* ROSAT intrinsic photons */
+  if (swift_memalign("xray_table_ROSAT_photons",
+                     (void **)&xrays->emissivity_ROSAT_intrinsic_photons,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * xray_emission_N_helium *
+                         xray_emission_N_element * xray_emission_N_temperature *
+                         xray_emission_N_density * sizeof(float)) != 0)
+    error("Failed to allocate xray emissivity_ROSAT_intrinsic_photons array\n");
+
+  /* Read full table */
+  dataset = H5Dopen(file_id, "ROSAT/photons_intrinsic", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->emissivity_ROSAT_intrinsic_photons);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  // erosita-low intrinsic energies
+  if (swift_memalign("xrays_table_erosita_low_energies",
+                     (void **)&xrays->emissivity_erosita_low_intrinsic_energies,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * xray_emission_N_helium *
+                         xray_emission_N_element * xray_emission_N_temperature *
+                         xray_emission_N_density * sizeof(float)) != 0)
+    error(
+        "Failed to allocate xray emissivity_erosita_low_intrinsic_energies "
+        "array\n");
+
+  /* Read full table */
+  dataset = H5Dopen(file_id, "erosita-low/energies_intrinsic", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->emissivity_erosita_low_intrinsic_energies);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* erosita-high intrinsic energies */
+  if (swift_memalign(
+          "xrays_table_erosita_high_energies",
+          (void **)&xrays->emissivity_erosita_high_intrinsic_energies,
+          SWIFT_STRUCT_ALIGNMENT,
+          xray_emission_N_redshift * xray_emission_N_helium *
+              xray_emission_N_element * xray_emission_N_temperature *
+              xray_emission_N_density * sizeof(float)) != 0)
+    error(
+        "Failed to allocate xray emissivity_erosita_high_intrinsic_energies "
+        "array\n");
+
+  /* Read full table */
+  dataset = H5Dopen(file_id, "erosita-high/energies_intrinsic", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->emissivity_erosita_high_intrinsic_energies);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* ROSAT intrinsic energies */
+  if (swift_memalign("xray_table_ROSAT_energies",
+                     (void **)&xrays->emissivity_ROSAT_intrinsic_energies,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     xray_emission_N_redshift * xray_emission_N_helium *
+                         xray_emission_N_element * xray_emission_N_temperature *
+                         xray_emission_N_density * sizeof(float)) != 0)
+    error(
+        "Failed to allocate xray emissivity_ROSAT_intrinsic_energies array\n");
+
+  /* Read full table */
+  dataset = H5Dopen(file_id, "ROSAT/energies_intrinsic", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   xrays->emissivity_ROSAT_intrinsic_energies);
+  if (status < 0) printf("error reading X-Ray table\n");
+  status = H5Dclose(dataset);
+  if (status < 0) printf("error closing dataset");
+
+  /* Close file */
+  status = H5Fclose(file_id);
+  if (status < 0) printf("error closing file");
+}
+
+/**
+ * @brief Find the 1d index for a table dimension
+ *
+ * @param table 1d array with binned values
+ * @param size dimensionality
+ * @param x value for which we aim to find the index
+ * @param i (return) index
+ * @param dx (return) offset from index bin
+ */
+INLINE static void get_index_1d(const float *restrict table, const int size,
+                                const float x, int *i, float *restrict dx) {
+
+  const float epsilon = 1e-4f;
+
+  const float delta = (size - 1) / (table[size - 1] - table[0]);
+
+  if (x < table[0] + epsilon) {
+    /* We are below the first element */
+    *i = 0;
+    *dx = 0.f;
+  } else if (x < table[size - 1] - epsilon) {
+    /* Normal case */
+    *i = (x - table[0]) * delta;
+    *dx = (x - table[*i]) * delta;
+  } else {
+    /* We are after the last element */
+    *i = size - 2;
+    *dx = 1.f;
+  }
+}
+
+/**
+ * @brief Find the 1d index for a table dimension with irregularly spaced bins
+ *
+ * @param table 1d array with monotonically increasing binned values
+ * @param size dimensionality
+ * @param x value for which we aim to find the index
+ * @param i (return) index
+ * @param dx (return) offset from index bin
+ */
+INLINE static void get_index_1d_irregular(const float *restrict table,
+                                          const int size, const float x, int *i,
+                                          float *restrict dx) {
+  const float epsilon = 1e-6f;
+
+  if (x < table[0] + epsilon) {
+
+    *i = 0;
+    *dx = 0.f;
+
+  } else if (x < table[size - 1] - epsilon) {
+
+    int min_idx = -1;
+
+    /* Do this the hard way: Search the table
+     * for the largest index i in table[] such
+     * that table[i] < x */
+    for (int idx = 0; idx < size; idx++) {
+
+      if (x - table[idx] <= 0.f) {
+
+        /* Found the first entry that is larger than x, go back by 1. */
+        min_idx = idx - 1;
+        break;
+      }
+    }
+
+    *i = min_idx;
+    *dx = (x - table[min_idx]) / (table[min_idx + 1] - table[min_idx]);
+
+  } else {
+
+    *i = size - 2;
+    *dx = 1.f;
+  }
+}
+
+/**
+ * @brief Returns the 1d index of element with 5d indices x,y,z,w
+ * from a flattened 5d array in row major order
+ *
+ * @param x, y, z, v, w Indices of element of interest
+ * @param Nx, Ny, Nz, Nv, Nw Sizes of array dimensions
+ */
+__attribute__((always_inline)) INLINE int row_major_index_5d(
+    const int x, const int y, const int z, const int w, const int v,
+    const int Nx, const int Ny, const int Nz, const int Nw, const int Nv) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  assert(x < Nx);
+  assert(y < Ny);
+  assert(z < Nz);
+  assert(w < Nw);
+  assert(v < Nv);
+#endif
+
+  return x * Ny * Nz * Nw * Nv + y * Nz * Nw * Nv + z * Nw * Nv + w * Nv + v;
+}
+
+/**
+ * @brief 4d interpolation of the Xray table
+ *
+ * @param emissivity xray table
+ * @param element number table index for missing element
+ * @param n_H_index Index along the Hydrogen number density dimension
+ * @param He_index Index along the Helium abundance dimension
+ * @param T_index Index along temperature dimension
+ * @param z_index Index along redshift dimension
+ * @param d_nH Offset between Hydrogen density and table[n_H_index]
+ * @param d_He Offset between Helium abundance and table[He_index]
+ * @param d_T Offset between temperture and table[T_index]
+ * @param d_z Offset between redshift and table[red_index]
+ *
+ * @return The log10 of the emssisivity
+ */
+INLINE static float interpolate_xray(const float *emissivity,
+                                     const int element_number,
+                                     const int nH_index, const int He_index,
+                                     const int T_index, const int z_index,
+                                     const float d_nH, const float d_He,
+                                     const float d_T, const float d_z) {
+  const float t_nH = 1.f - d_nH;
+  const float t_He = 1.f - d_He;
+  const float t_T = 1.f - d_T;
+  const float t_z = 1.f - d_z;
+
+  float result = 0.f;
+
+  result += t_nH * t_He * t_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 0, element_number, T_index + 0,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * t_He * d_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 0, element_number, T_index + 1,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * d_He * t_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 1, element_number, T_index + 0,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * t_He * t_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 0, element_number, T_index + 0,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * d_He * d_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 1, element_number, T_index + 1,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * t_He * d_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 0, element_number, T_index + 1,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * d_He * t_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 1, element_number, T_index + 0,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * d_He * d_T * t_z *
+            emissivity[row_major_index_5d(
+                z_index + 0, He_index + 1, element_number, T_index + 1,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * t_He * t_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 0, element_number, T_index + 0,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * t_He * d_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 0, element_number, T_index + 1,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * d_He * t_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 1, element_number, T_index + 0,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * t_He * t_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 0, element_number, T_index + 0,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += t_nH * d_He * d_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 1, element_number, T_index + 1,
+                nH_index + 0, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * t_He * d_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 0, element_number, T_index + 1,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * d_He * t_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 1, element_number, T_index + 0,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  result += d_nH * d_He * d_T * d_z *
+            emissivity[row_major_index_5d(
+                z_index + 1, He_index + 1, element_number, T_index + 1,
+                nH_index + 1, xray_emission_N_redshift, xray_emission_N_helium,
+                xray_emission_N_element, xray_emission_N_temperature,
+                xray_emission_N_density)];
+
+  return result;
+}
+
+/**
+ * @brief Find index of the Xray table to interpolate between and compute
+ * emissivities
+ *
+ * @param xrays Xray data structure
+ * @param He_fraction Helium fraction
+ * @param log_nH_cgs physical number density in CGS
+ * @param log_T temperature
+ * @param redshift redshift
+ * @param solar_ratio abundance ratio relative to solar
+ * @param band xray band to use
+ *
+ * @return The X-ray emmisivity in the corresponding band in CGS units.
+ */
+INLINE static float do_xray_interpolation(
+    const struct xray_properties *xrays, const float log10_He_fraction,
+    const float log_nH_cgs, const float log_T, const float redshift,
+    const float solar_ratio[colibre_cooling_N_elementtypes],
+    const enum xray_band_types band) {
+
+  /* Get indices in the interpolation table along the He, nH, T
+   * and z dimensions */
+  int He_index, log_nH_cgs_index, log_T_index, z_index;
+  float d_He, d_log_nH_cgs, d_log_T, d_z;
+  get_index_1d_irregular(xrays->He_bins, xray_emission_N_helium,
+                         log10_He_fraction, &He_index, &d_He);
+
+  get_index_1d(xrays->Densities, xray_emission_N_density, log_nH_cgs,
+               &log_nH_cgs_index, &d_log_nH_cgs);
+
+  get_index_1d(xrays->Temperatures, xray_emission_N_temperature, log_T,
+               &log_T_index, &d_log_T);
+
+  get_index_1d(xrays->Redshifts, xray_emission_N_redshift, redshift, &z_index,
+               &d_z);
+
+  /* Select the table corresponding to this band */
+  float *table;
+  switch (band) {
+    case xray_band_types_erosita_low_intrinsic_photons:
+      table = xrays->emissivity_erosita_low_intrinsic_photons;
+      break;
+    case xray_band_types_erosita_high_intrinsic_photons:
+      table = xrays->emissivity_erosita_high_intrinsic_photons;
+      break;
+    case xray_band_types_ROSAT_intrinsic_photons:
+      table = xrays->emissivity_ROSAT_intrinsic_photons;
+      break;
+    case xray_band_types_erosita_low_intrinsic_energies:
+      table = xrays->emissivity_erosita_low_intrinsic_energies;
+      break;
+    case xray_band_types_erosita_high_intrinsic_energies:
+      table = xrays->emissivity_erosita_high_intrinsic_energies;
+      break;
+    case xray_band_types_ROSAT_intrinsic_energies:
+      table = xrays->emissivity_ROSAT_intrinsic_energies;
+      break;
+    default:
+      error("Band doesn't exist");
+  }
+
+  /* The total flux is computed in two steps:
+   * - First, we compute the contribution excluding all elements.
+   * - Next, we loop over all the metals we trace and compute the flux
+   *   in a case one metal is added
+   * - Finally, for each metal, we add a portion of the difference
+   *   between the (one metal) and (no metals) case based on the
+   *   the abundance of each metal
+   *
+   * The interpolation table structure is as follows:
+   * First we have the individual element contributions in the same order
+   * as the COLIBRE cooling model. As we only include metals, the first
+   * entry is Carbon, second Nitrogen, etc.
+   * The contribution of no metals is the last entry.
+   *
+   * The table size is hence:
+   * colibre_cooling_N_elementtypes - 3 (H + He + OA) + 1 (no metals)
+   * which is equal to xray_emission_N_element
+   */
+
+  /* Perform the interpolation of the no metal case
+   * Note: That is stored as the last entry of the table */
+  const float log10_x_ray_no_metals_cgs = interpolate_xray(
+      table, xray_emission_N_element - 1, log_nH_cgs_index, He_index,
+      log_T_index, z_index, d_log_nH_cgs, d_He, d_log_T, d_z);
+
+  const float x_ray_no_metals_cgs = exp10f(log10_x_ray_no_metals_cgs);
+  float x_ray_cgs = x_ray_no_metals_cgs;
+
+  /* Loop over the *individual metals* used in the COLIBRE cooling */
+  for (int elem = element_C; elem <= element_Fe; elem++) {
+
+    /* Note: we deduct 2 since the interpolation tables do not include H and He
+     * and start straight with the metals */
+    const float log10_x_ray_elem_cgs = interpolate_xray(
+        table, elem - 2, log_nH_cgs_index, He_index, log_T_index, z_index,
+        d_log_nH_cgs, d_He, d_log_T, d_z);
+
+    const float x_ray_elem_cgs = exp10f(log10_x_ray_elem_cgs);
+
+    /* Add the difference multiplied by the  abundance to solar-abundance
+     * ratio */
+    x_ray_cgs += x_ray_elem_cgs * solar_ratio[elem];
+  }
+
+  /* Convert from cm^3 to cm^-3 (i.e. multiply by nH^2) */
+  x_ray_cgs *= exp10f(2.f * log_nH_cgs);
+
+  return x_ray_cgs;
+}
+
+/**
+ * @brief Compute the emmisivity of a particle in a given X-ray band.
+ *
+ * @param p The #part.
+ * @param xp The corresponding #xpart.
+ * @param e The #engine.
+ * @param band xray band to use
+ *
+ * @return The emissivity in internal units.
+ */
+INLINE static double extra_io_get_xray_fluxes(const struct part *p,
+                                              const struct xpart *xp,
+                                              const struct engine *e,
+                                              const enum xray_band_types band) {
+
+  /* Get gas particle temperature */
+  const float T = cooling_get_temperature(
+      e->physical_constants, e->hydro_properties, e->internal_units,
+      e->cosmology, e->cooling_func, p, xp);
+  const float log10_T = log10f(T);
+
+  /* Get gas particle Hydrogen number density in cgs */
+  const float rho_phys = hydro_get_physical_density(p, e->cosmology);
+  const float XH =
+      chemistry_get_metal_mass_fraction_for_cooling(p)[chemistry_element_H];
+  const float nH = rho_phys * XH / e->physical_constants->const_proton_mass *
+                   e->cooling_func->number_density_to_cgs;
+  const float log10_nH_cgs = log10f(nH);
+
+  /* If the particle is not in the table range we return a flux of 0 */
+  if ((log10_T < 5.0f || log10_T > 9.5f) ||
+      (log10_nH_cgs < -8.0f || log10_nH_cgs > 6.0f))
+    return 0.;
+
+  /* Get gas particle element mass fractions */
+  const float *const mass_fractions =
+      chemistry_get_metal_mass_fraction_for_cooling(p);
+
+  /* Convert to abundances. For now, ignore Ca and S that are not tracked */
+  float abundances[chemistry_element_count];
+  for (int el = 0; el < chemistry_element_count; el++) {
+    abundances[el] = (mass_fractions[el] / mass_fractions[0]) *
+                     (e->io_extra_props->xray_data.element_mass[0] /
+                      e->io_extra_props->xray_data.element_mass[el]);
+  }
+
+  /* We now need to convert the array we received from the chemistry
+   * module (likely EAGLE) into the COLIBRE-cooling format.
+   * This means adding un-tracked elements and changing their order */
+
+  /* Finally onvert to abundances relative to solar */
+  float abundance_ratio[colibre_cooling_N_elementtypes];
+  for (int el = 0; el < colibre_cooling_N_elementtypes; el++) {
+
+    /* Treat all regular elements */
+    if (el <= element_Si) {
+
+      abundance_ratio[el] =
+          abundances[el] / e->io_extra_props->xray_data.Solar_metallicity[el];
+
+      /* Special case for the two elements not traced in the chemistry */
+    } else if (el == element_S || el == element_Ca) {
+
+      /* S and Ca are fixed to have the same abundance ratio as Si */
+      abundance_ratio[el] = abundance_ratio[element_Si];
+
+      /* Final special case: Iron. */
+    } else if (el == element_Fe) {
+
+      /* We need to fish it out of the chemistry where it was at a different
+       * location in the array */
+      abundance_ratio[el] =
+          abundances[chemistry_element_Fe] /
+          e->io_extra_props->xray_data.Solar_metallicity[chemistry_element_Fe];
+    } else {
+
+      /* Any other element is not used in the Xray interpolation */
+      abundance_ratio[el] = 0.f;
+    }
+  }
+
+  /* Extract the (log of) Helium abundance */
+  const float log10_He_fraction = log10f(abundances[chemistry_element_He]);
+
+  /* Compute the X-ray emission in the given band */
+  const double xray_em_cgs = do_xray_interpolation(
+      &e->io_extra_props->xray_data, log10_He_fraction, log10_nH_cgs, log10_T,
+      e->cosmology->z, abundance_ratio, band);
+
+  /* Convert back to internal units */
+  double xray_em;
+  switch (band) {
+    case xray_band_types_erosita_low_intrinsic_photons:
+    case xray_band_types_erosita_high_intrinsic_photons:
+    case xray_band_types_ROSAT_intrinsic_photons:
+      xray_em =
+          xray_em_cgs /
+          e->io_extra_props->xray_data.xray_photon_emissivity_unit_conversion;
+      break;
+    case xray_band_types_erosita_low_intrinsic_energies:
+    case xray_band_types_erosita_high_intrinsic_energies:
+    case xray_band_types_ROSAT_intrinsic_energies:
+      xray_em =
+          xray_em_cgs /
+          e->io_extra_props->xray_data.xray_energy_emissivity_unit_conversion;
+      break;
+    default:
+      error("Band doesn't exist");
+  }
+
+  /* Now compute the luminosity from the emissivity
+   *  To do so, we multiply by the particle volume
+   *  luminosity = emissivity * (mass / density)
+   */
+  const double xray_lum = xray_em * (hydro_get_mass(p) / rho_phys);
+
+  return xray_lum;
+}
+
+/**
+ * @brief Initialises properties stored for the extra i/o fields
+ *
+ * @param parameter_file The parsed parameter file
+ * @param us Internal system of units data structure
+ * @param phys_const #phys_const data structure
+ * @param cosmo The cosmology model
+ * @param props #extra_io_properties struct to initialize
+ */
+INLINE static void extra_io_init(struct swift_params *parameter_file,
+                                 const struct unit_system *us,
+                                 const struct phys_const *phys_const,
+                                 const struct cosmology *cosmo,
+                                 struct extra_io_properties *props) {
+
+  parser_get_param_string(parameter_file, "XrayEmissivity:xray_table_path",
+                          props->xray_data.xray_table_path);
+
+  read_xray_header(&props->xray_data, props->xray_data.xray_table_path);
+  read_xray_table(&props->xray_data, props->xray_data.xray_table_path);
+
+  /* Compute unit conversions only once and use them throughout */
+  props->xray_data.xray_photon_emissivity_unit_conversion =
+      units_cgs_conversion_factor(us, UNIT_CONV_NUMBER_DENSITY_PER_TIME);
+  props->xray_data.xray_energy_emissivity_unit_conversion =
+      units_cgs_conversion_factor(us, UNIT_CONV_POWER_DENSITY);
+}
+
+/**
+ * @brief Free the memory allocated for the extra i/o fields.
+ *
+ * @param props #extra_io_properties struct to clean
+ */
+INLINE static void extra_io_clean(struct extra_io_properties *props) {
+
+  free(props->xray_data.Temperatures);
+  free(props->xray_data.Densities);
+  free(props->xray_data.He_bins);
+  free(props->xray_data.Solar_metallicity);
+  free(props->xray_data.Log10_solar_metallicity);
+  free(props->xray_data.Redshifts);
+  free(props->xray_data.element_mass);
+
+  swift_free("xrays_table_erosita_low_photons",
+             props->xray_data.emissivity_erosita_low_intrinsic_photons);
+  swift_free("xrays_table_erosita_high_photons",
+             props->xray_data.emissivity_erosita_high_intrinsic_photons);
+  swift_free("xray_table_ROSAT_photons",
+             props->xray_data.emissivity_ROSAT_intrinsic_photons);
+  swift_free("xrays_table_erosita_low_energies",
+             props->xray_data.emissivity_erosita_low_intrinsic_energies);
+  swift_free("xrays_table_erosita_high_energies",
+             props->xray_data.emissivity_erosita_high_intrinsic_energies);
+  swift_free("xray_table_ROSAT_energies",
+             props->xray_data.emissivity_ROSAT_intrinsic_energies);
+}
+
+/**
+ * @brief Write a extra i/o struct to the given FILE as a stream of bytes.
+ *
+ * @param feedback the struct
+ * @param stream the file stream
+ */
+INLINE static void extra_io_struct_dump(const struct extra_io_properties *props,
+                                        FILE *stream) {
+
+  struct extra_io_properties props_copy = *props;
+
+  props_copy.xray_data.Temperatures = NULL;
+  props_copy.xray_data.Densities = NULL;
+  props_copy.xray_data.He_bins = NULL;
+  props_copy.xray_data.Solar_metallicity = NULL;
+  props_copy.xray_data.Log10_solar_metallicity = NULL;
+  props_copy.xray_data.Redshifts = NULL;
+  props_copy.xray_data.element_mass = NULL;
+
+  props_copy.xray_data.emissivity_erosita_low_intrinsic_photons = NULL;
+  props_copy.xray_data.emissivity_erosita_high_intrinsic_photons = NULL;
+  props_copy.xray_data.emissivity_ROSAT_intrinsic_photons = NULL;
+  props_copy.xray_data.emissivity_erosita_low_intrinsic_energies = NULL;
+  props_copy.xray_data.emissivity_erosita_high_intrinsic_energies = NULL;
+  props_copy.xray_data.emissivity_ROSAT_intrinsic_energies = NULL;
+
+  restart_write_blocks((void *)&props_copy, sizeof(struct extra_io_properties),
+                       1, stream, "extra_io", "extra i/o properties");
+}
+
+/**
+ * @brief Restore a extra_io_properties struct from the given FILE as a
+ * stream of bytes.
+ *
+ * Read the structure from the stream and restore the extra i/o tables by
+ * re-reading them.
+ *
+ * @param feedback the struct
+ * @param stream the file stream
+ */
+INLINE static void extra_io_struct_restore(struct extra_io_properties *props,
+                                           FILE *stream) {
+
+  restart_read_blocks((void *)props, sizeof(struct extra_io_properties), 1,
+                      stream, NULL, "extra i/o properties");
+
+  read_xray_header(&props->xray_data, props->xray_data.xray_table_path);
+  read_xray_table(&props->xray_data, props->xray_data.xray_table_path);
+}
+
+#endif /* SWIFT_EXTRA_EAGLE_H */
diff --git a/src/extra_io/EAGLE/extra_io.h b/src/extra_io/EAGLE/extra_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..d39494601414dd521d0c10b8833274750f5628ce
--- /dev/null
+++ b/src/extra_io/EAGLE/extra_io.h
@@ -0,0 +1,322 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2021 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EXTRA_IO_EAGLE_H
+#define SWIFT_EXTRA_IO_EAGLE_H
+
+#include "extra.h"
+#include "io_properties.h"
+
+INLINE static void convert_part_Xray_photons(const struct engine *e,
+                                             const struct part *p,
+                                             const struct xpart *xp,
+                                             double *ret) {
+
+  ret[0] = extra_io_get_xray_fluxes(
+      p, xp, e, xray_band_types_erosita_low_intrinsic_photons);
+  ret[1] = extra_io_get_xray_fluxes(
+      p, xp, e, xray_band_types_erosita_high_intrinsic_photons);
+  ret[2] = extra_io_get_xray_fluxes(p, xp, e,
+                                    xray_band_types_ROSAT_intrinsic_photons);
+}
+
+INLINE static void convert_part_Xray_energies(const struct engine *e,
+                                              const struct part *p,
+                                              const struct xpart *xp,
+                                              double *ret) {
+
+  ret[0] = extra_io_get_xray_fluxes(
+      p, xp, e, xray_band_types_erosita_low_intrinsic_energies);
+  ret[1] = extra_io_get_xray_fluxes(
+      p, xp, e, xray_band_types_erosita_high_intrinsic_energies);
+  ret[2] = extra_io_get_xray_fluxes(p, xp, e,
+                                    xray_band_types_ROSAT_intrinsic_energies);
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extra particle array.
+ * @param list The list of i/o properties to write.
+ * @param with_cosmology Are we running with cosmology?
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int extra_io_write_particles(const struct part *parts,
+                                           const struct xpart *xparts,
+                                           struct io_props *list,
+                                           const int with_cosmology) {
+
+  list[0] = io_make_output_field_convert_part(
+      "XrayPhotonLuminosities", DOUBLE, 3, UNIT_CONV_PHOTONS_PER_TIME, 0.f,
+      parts, xparts, convert_part_Xray_photons,
+      "Intrinsic X-ray photon luminosities in various bands");
+
+  list[1] = io_make_output_field_convert_part(
+      "XrayLuminosities", DOUBLE, 3, UNIT_CONV_POWER, 0.f, parts, xparts,
+      convert_part_Xray_energies,
+      "Intrinsic X-ray luminosities in various bands");
+
+  return 2;
+}
+
+/**
+ * @brief Specifies which star particle fields to write to a dataset
+ *
+ * @param sparts The star particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int extra_io_write_sparticles(const struct spart *sparts,
+                                            struct io_props *list,
+                                            const int with_cosmology) {
+
+  return 0;
+}
+
+/**
+ * @brief Specifies which black hole particle fields to write to a dataset
+ *
+ * @param bparts The BH particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int extra_io_write_bparticles(const struct bpart *bparts,
+                                            struct io_props *list,
+                                            const int with_cosmology) {
+  return 0;
+}
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of extra-io to the file
+ * @param h_grp The HDF5 group in which to write
+ * @param h_grp_columns The HDF5 group containing named columns
+ */
+INLINE static void extra_io_write_flavour(hid_t h_grp, hid_t h_grp_columns) {
+
+  /* Write the extra-io model */
+  io_write_attribute_s(h_grp, "Extra-io", "EAGLE");
+
+  /* Create an array of xray band names */
+  static const char xrayband_names[xray_band_types_count / 2][32] = {
+      "erosita_low", "erosita_high", "ROSAT"};
+
+  /* Add to the named columns. We do it twice. Once for
+   * the energies and once for the photons.
+   * The columns use the same names for both arrays. */
+  hsize_t dims[1] = {xray_band_types_count / 2};
+  hid_t type = H5Tcopy(H5T_C_S1);
+  H5Tset_size(type, 32);
+  hid_t space = H5Screate_simple(1, dims, NULL);
+  hid_t dset = H5Dcreate(h_grp_columns, "XrayLuminosities", type, space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, xrayband_names[0]);
+  H5Dclose(dset);
+  dset = H5Dcreate(h_grp_columns, "XrayPhotonLuminosities", type, space,
+                   H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, xrayband_names[0]);
+  H5Dclose(dset);
+
+  H5Tclose(type);
+  H5Sclose(space);
+}
+#endif
+
+/*
+  Extra lightcone map types
+*/
+/*
+   Healpix map of intrinsic erosita-low photons band
+*/
+double lightcone_map_xray_erosita_low_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/*
+   Healpix map of intrinsic erosita-low energy band
+*/
+double lightcone_map_xray_erosita_low_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/*
+   Healpix map of intrinsic erosita-high photons band
+*/
+double lightcone_map_xray_erosita_high_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/*
+   Healpix map of intrinsic erosita-high energy band
+*/
+double lightcone_map_xray_erosita_high_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/*
+   Healpix map of intrinsic ROSAT photons band
+*/
+double lightcone_map_xray_rosat_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/*
+   Healpix map of intrinsic ROSAT energy band
+*/
+double lightcone_map_xray_rosat_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of compton y
+*/
+int lightcone_map_compton_y_type_contributes(int ptype);
+
+double lightcone_map_compton_y_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of doppler b
+*/
+int lightcone_map_doppler_b_type_contributes(int ptype);
+
+double lightcone_map_doppler_b_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of dispersion meassure
+*/
+int lightcone_map_dispersion_meassure_type_contributes(int ptype);
+
+double lightcone_map_dispersion_meassure_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+static const struct lightcone_map_type extra_lightcone_map_types[] = {
+    {
+        .name = "XrayErositaLowIntrinsicPhotons",
+        .update_map =
+            lightcone_map_xray_erosita_low_intrinsic_photons_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_PHOTON_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0e-62, /* to keep in range of a float */
+    },
+    {
+        .name = "XrayErositaLowIntrinsicEnergies",
+        .update_map = lightcone_map_xray_erosita_low_intrinsic_energy_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_ENERGY_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "XrayErositaHighIntrinsicPhotons",
+        .update_map =
+            lightcone_map_xray_erosita_high_intrinsic_photons_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_PHOTON_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0e-62, /* to keep in range of a float */
+    },
+    {
+        .name = "XrayErositaHighIntrinsicEnergies",
+        .update_map =
+            lightcone_map_xray_erosita_high_intrinsic_energy_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_ENERGY_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "XrayROSATIntrinsicPhotons",
+        .update_map = lightcone_map_xray_rosat_intrinsic_photons_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_PHOTON_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0e-62, /* to keep in range of a float */
+    },
+    {
+        .name = "XrayROSATIntrinsicEnergies",
+        .update_map = lightcone_map_xray_rosat_intrinsic_energy_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_ENERGY_FLUX_PER_UNIT_SURFACE,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "ComptonY",
+        .update_map = lightcone_map_compton_y_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "DopplerB",
+        .update_map = lightcone_map_doppler_b_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "DM",
+        .update_map = lightcone_map_dispersion_meassure_get_value,
+        .ptype_contributes = lightcone_map_gas_only,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_INV_AREA,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 3.40367719e-68, /* convert 1/Mpc^2 to pc/cm^3 so
+                                                  value fits in a float */
+    },
+    {
+        /* NULL functions indicate end of array */
+        .name = "",
+        .update_map = NULL,
+        .ptype_contributes = NULL,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+};
+
+#endif /* SWIFT_EXTRA_IO_EAGLE_H */
diff --git a/src/extra_io/EAGLE/extra_lightcone_map_types.c b/src/extra_io/EAGLE/extra_lightcone_map_types.c
new file mode 100644
index 0000000000000000000000000000000000000000..64ca6755e36c9e3adc613dfef5a1cd959c7b746d
--- /dev/null
+++ b/src/extra_io/EAGLE/extra_lightcone_map_types.c
@@ -0,0 +1,586 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "black_holes.h"
+#include "cooling.h"
+#include "cosmology.h"
+#include "engine.h"
+#include "error.h"
+#include "gravity.h"
+#include "hydro.h"
+#include "lightcone/lightcone_map.h"
+#include "part.h"
+#include "stars.h"
+
+/* This object's header */
+#include "lightcone/lightcone_map_types.h"
+
+/* Required for the xrays */
+#include "extra_io.h"
+#include "io_properties.h"
+
+/**
+ * @brief Determine time since AGN injection at scale factor a_cross
+ *
+ * Returns -1 if no there has been no AGN injection. Result is in
+ * internal time units.
+ *
+ * @param xp the #xpart for which we're evaluating the time
+ * @param c the #cosmology struct
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ */
+INLINE static double get_time_since_AGN_injection(const struct xpart *xp,
+                                                  const struct cosmology *c,
+                                                  double a_cross) {
+
+  /* Check for the case where there has been no AGN injection yet */
+  const double last_AGN_injection_scale_factor =
+      xp->tracers_data.last_AGN_injection_scale_factor;
+  if (last_AGN_injection_scale_factor < 0.0) return -1.0;
+
+  /* Check for heating after lightcone crossing - possible if heated on current
+   * time step? */
+  if (last_AGN_injection_scale_factor > a_cross) return 0.0;
+
+  /* Find time since the last injection in internal units */
+  const double last_AGN_injection_time =
+      cosmology_get_time_since_big_bang(c, last_AGN_injection_scale_factor);
+  const double time_at_crossing = cosmology_get_time_since_big_bang(c, a_cross);
+  const double delta_time = time_at_crossing - last_AGN_injection_time;
+
+  return delta_time;
+}
+
+INLINE static int exclude_particle(
+    const struct lightcone_props *lightcone_props, const struct engine *e,
+    const struct part *p, const struct xpart *xp, double a_cross) {
+
+  /* Get AGN heating temperature */
+  const double AGN_delta_T = e->black_holes_properties->AGN_delta_T_desired;
+
+  /* Check if we need to exclude this particle due to recent AGN heating */
+  if (lightcone_props->xray_maps_recent_AGN_injection_exclusion_time > 0) {
+    const double t = get_time_since_AGN_injection(xp, e->cosmology, a_cross);
+    if (t >= 0 &&
+        t < lightcone_props->xray_maps_recent_AGN_injection_exclusion_time) {
+
+      /* Check if it is within the exclusion temperature range */
+      const double temp_min =
+          AGN_delta_T * lightcone_props->xray_maps_recent_AGN_min_temp_factor;
+      const double temp_max =
+          AGN_delta_T * lightcone_props->xray_maps_recent_AGN_max_temp_factor;
+      const double part_temp = cooling_get_temperature(
+          e->physical_constants, e->hydro_properties, e->internal_units,
+          e->cosmology, e->cooling_func, p, xp);
+      if (part_temp > temp_min && part_temp < temp_max) return 1;
+    }
+  }
+
+  /* Not excluding this particle */
+  return 0;
+}
+
+/**
+ * @brief Make a healpix map of projected erosita-low intrinsic photon flux in
+ * each pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_erosita_low_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_erosita_low_intrinsic_photons);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        (1 + z_cross));  // photon luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/* erosita-low energy flux */
+
+/**
+ * @brief Make a healpix map of projected erosita-low intrinsic energy flux in
+ * each pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_erosita_low_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_erosita_low_intrinsic_energies);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        pow((1 + z_cross), 2));  // energy luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/* erosita_high photon flux */
+
+/**
+ * @brief Make a healpix map of projected erosita-high intrinsic photon flux in
+ * each pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_erosita_high_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_erosita_high_intrinsic_photons);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        (1 + z_cross));  // photon luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/* erosita-high energy flux */
+
+/**
+ * @brief Make a healpix map of projected erosita-high intrinsic energy flux in
+ * each pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_erosita_high_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_erosita_high_intrinsic_energies);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        pow((1 + z_cross), 2));  // energy luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/* ROSAT photon flux */
+
+/**
+ * @brief Make a healpix map of projected ROSAT intrinsic photon flux in each
+ * pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_rosat_intrinsic_photons_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_ROSAT_intrinsic_photons);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        (1 + z_cross));  // photon luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/* ROSAT energy flux */
+
+/**
+ * @brief Make a healpix map of projected ROSAT intrinsic energy flux in each
+ * pixel
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_xray_rosat_intrinsic_energy_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      const double z_cross = (1 / a_cross) - 1;
+      const double cdist_cross =
+          sqrt(pow(x_cross[0], 2) + pow(x_cross[1], 2) + pow(x_cross[2], 2));
+
+      const double luminosity = extra_io_get_xray_fluxes(
+          p, xp, e, xray_band_types_ROSAT_intrinsic_energies);
+
+      const double flux =
+          luminosity / (4 * M_PI * pow(cdist_cross, 2) *
+                        pow((1 + z_cross), 2));  // energy luminosity distance
+
+      return flux;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Make a healpix map of the compton y parameter
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_compton_y_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = e->s->xparts;
+
+  /* Handle on the physics modules */
+  const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct unit_system *us = e->internal_units;
+  const struct phys_const *phys_const = e->physical_constants;
+  const struct cooling_function_data *cool_func = e->cooling_func;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      double y_compton = cooling_get_ycompton(phys_const, hydro_props, us,
+                                              cosmo, cool_func, p, xp);
+      double x_squared = x_cross[0] * x_cross[0] * a_cross * a_cross;
+      double y_squared = x_cross[1] * x_cross[1] * a_cross * a_cross;
+      double z_squared = x_cross[2] * x_cross[2] * a_cross * a_cross;
+      double angular_diameter_distance_2 = x_squared + y_squared + z_squared;
+
+      /* This angular diameter distance is only correct for flat cosmologies */
+#ifdef SWIFT_DEBUG_CHECKS
+      if (fabs(e->cosmology->Omega_k) > 0.001)
+        error("only implemented for flat cosmology");
+#endif
+
+      double pixel_size_2 = lightcone_props->pixel_area_steradians;
+      double y_for_map =
+          y_compton / (pixel_size_2 * angular_diameter_distance_2);
+
+      return y_for_map;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+      break;
+  }
+}
+
+/**
+ * @brief Make a healpix map of the doppler b parameter
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_doppler_b_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = e->s->xparts;
+
+  /* Handle on the physics modules */
+  const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct unit_system *us = e->internal_units;
+  const struct phys_const *phys_const = e->physical_constants;
+  const struct cooling_function_data *cool_func = e->cooling_func;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      double n_e = cooling_get_electron_density(phys_const, hydro_props, us,
+                                                cosmo, cool_func, p, xp);
+
+      double rho = hydro_get_physical_density(p, cosmo);
+
+      double m = hydro_get_mass(p);
+
+      const double c = phys_const->const_speed_light_c;
+
+      const double sigma_thompson = phys_const->const_thomson_cross_section;
+
+      double x_squared = x_cross[0] * x_cross[0] * a_cross * a_cross;
+      double y_squared = x_cross[1] * x_cross[1] * a_cross * a_cross;
+      double z_squared = x_cross[2] * x_cross[2] * a_cross * a_cross;
+      double angular_diameter_distance_2 = x_squared + y_squared + z_squared;
+      double angular_diameter_distance = sqrt(angular_diameter_distance_2);
+
+      /* This angular diameter distance is only correct for flat cosmologies */
+#ifdef SWIFT_DEBUG_CHECKS
+      if (fabs(e->cosmology->Omega_k) > 0.001)
+        error("only implemented for flat cosmology");
+#endif
+
+      double radial_velocity =
+          (p->v[0] * x_cross[0] * a_cross + p->v[1] * x_cross[1] * a_cross +
+           p->v[2] * x_cross[2] * a_cross) /
+          angular_diameter_distance;
+
+      double pixel_size_2 = lightcone_props->pixel_area_steradians;
+
+      double b_for_map = n_e * m * sigma_thompson * radial_velocity /
+                         (pixel_size_2 * angular_diameter_distance_2 * rho * c);
+
+      return b_for_map;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+      break;
+  }
+}
+
+/**
+ * @brief Make a healpix map of the dispersion meassure
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_dispersion_meassure_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = e->s->xparts;
+
+  /* Handle on the physics modules */
+  const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct unit_system *us = e->internal_units;
+  const struct phys_const *phys_const = e->physical_constants;
+  const struct cooling_function_data *cool_func = e->cooling_func;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+
+      /* Exclude recently AGN heated particles */
+      if (exclude_particle(lightcone_props, e, p, xp, a_cross)) return 0.0;
+
+      double n_e = cooling_get_electron_density(phys_const, hydro_props, us,
+                                                cosmo, cool_func, p, xp);
+
+      double rho = hydro_get_physical_density(p, cosmo);
+
+      double m = hydro_get_mass(p);
+
+      double x_squared = x_cross[0] * x_cross[0] * a_cross * a_cross;
+      double y_squared = x_cross[1] * x_cross[1] * a_cross * a_cross;
+      double z_squared = x_cross[2] * x_cross[2] * a_cross * a_cross;
+      double angular_diameter_distance_2 = x_squared + y_squared + z_squared;
+
+      /* This angular diameter distance is only correct for flat cosmologies */
+#ifdef SWIFT_DEBUG_CHECKS
+      if (fabs(e->cosmology->Omega_k) > 0.001)
+        error("only implemented for flat cosmology");
+#endif
+
+      double pixel_size_2 = lightcone_props->pixel_area_steradians;
+      double dm_for_map =
+          n_e * m / (pixel_size_2 * angular_diameter_distance_2 * rho);
+
+      return dm_for_map;
+    } break;
+    default:
+      /* Not gas, nothing to do */
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+      break;
+  }
+}
diff --git a/src/kernel_hydro.h b/src/kernel_hydro.h
index fcb6e91906421a2436f645f06fb03c9116fa4a32..17a45b0bae4fd8d3ff1f9570754662f21a97eed6 100644
--- a/src/kernel_hydro.h
+++ b/src/kernel_hydro.h
@@ -317,6 +317,44 @@ __attribute__((always_inline)) INLINE static void kernel_eval(
   *W = w * kernel_constant * kernel_gamma_inv_dim;
 }
 
+/**
+ * @brief Computes the kernel function in double precision.
+ *
+ * Required for computing the projected kernel because rounding
+ * error causes problems for the GSL integration function if
+ * we evaluate in single precision.
+ *
+ * The kernel function needs to be mutliplied by \f$h^{-d}\f$,
+ * where \f$d\f$ is the dimensionality of the problem.
+ *
+ * Returns 0 if \f$u > \gamma = H/h\f$
+ *
+ * @param u The ratio of the distance to the smoothing length \f$u = x/h\f$.
+ * @param W (return) The value of the kernel function \f$W(x,h)\f$.
+ */
+__attribute__((always_inline)) INLINE static void kernel_eval_double(
+    double u, double *restrict W) {
+
+  /* Go to the range [0,1[ from [0,H[ */
+  const double x = u * kernel_gamma_inv;
+
+  /* Pick the correct branch of the kernel */
+  const int temp = (int)(x * kernel_ivals_f);
+  const int ind = temp > kernel_ivals ? kernel_ivals : temp;
+  const float *const coeffs = &kernel_coeffs[ind * (kernel_degree + 1)];
+
+  /* First two terms of the polynomial ... */
+  double w = ((double)coeffs[0]) * x + ((double)coeffs[1]);
+
+  /* ... and the rest of them */
+  for (int k = 2; k <= kernel_degree; k++) w = x * w + ((double)coeffs[k]);
+
+  w = max(w, 0.);
+
+  /* Return everything */
+  *W = w * ((double)kernel_constant) * ((double)kernel_gamma_inv_dim);
+}
+
 /**
  * @brief Computes the kernel function derivative.
  *
diff --git a/src/lightcone/healpix_util.c b/src/lightcone/healpix_util.c
new file mode 100644
index 0000000000000000000000000000000000000000..d234c8cad55f6a960bcdf59e7e0f21c0fed316bb
--- /dev/null
+++ b/src/lightcone/healpix_util.c
@@ -0,0 +1,363 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ *
+ * The functions in this file are based on code from the HEALPix
+ * 3.80 library (see http://healpix.sourceforge.net):
+ *
+ *  Copyright (C) 1997-2013 Krzysztof M. Gorski, Eric Hivon,
+ *                          Benjamin D. Wandelt, Anthony J. Banday, 
+ *                          Matthias Bartelmann, Hans K. Eriksen, 
+ *                          Frode K. Hansen, Martin Reinecke
+ *
+ * Translated and modified for SWIFT by John Helly:
+ *
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+
+#include "lightcone/healpix_util.h"
+
+/**
+ * @brief Integer modulus function, valid for b > 0 only
+ *
+ * Note that result is not the same as a % b for negative
+ * values of a. If b > 0 then result is in range 0 to b-1
+ * inclusive.
+ *
+ * @param a the first integer
+ * @param a the second integer
+ *
+ */
+static int mod(int a, int b)
+{
+  int r = a % b;
+  return r < 0 ? r + b : r;
+}
+
+/**
+ * @brief Given a normalized z coordinate, return the ring
+ *        number in range 1 to 4*nside-1.
+ *
+ * @param nside HEALPix resolution parameter
+ * @param z the z coordinate to check
+ *
+ */
+static int ring_num(int nside, double z) {
+
+  /* Equatorial regime */
+  int iring = round(nside*(2.0-1.5*z));
+
+  /* North cap */
+  if (z > 2./3.) {
+    iring = round(nside*sqrt(3.0*(1.0-z)));
+    if(iring==0)iring=1;
+  }
+
+  /* South cap */
+  if (z < -2./3.) {
+    iring = round(nside*sqrt(3.0*(1.0+z)));
+    if(iring==0)iring=1;
+    iring = 4*nside - iring;
+  }
+
+  return iring;
+}
+
+/**
+ * @brief Return information about the specified HEALPix ring
+ *
+ * @param nside HEALPix resolution parameter
+ * @param ring the ring index in range 1 to 4*Nside-1
+ * @param npr returns the number of pixels in the ring
+ * @param kshift returns the shift of this ring
+ * @param npnorth returns total number of pixels in this ring
+ *        all rings to the north of this one
+ *
+ */
+static void pixels_per_ring(int nside, int ring, int *npr, int *kshift, long long *npnorth) {
+
+  /* number of pixels in current ring */
+  *npr = nside;
+  if(ring < *npr) *npr = ring;
+  if(4*nside-ring < *npr) *npr = 4*nside-ring;
+  *npr *= 4;
+  
+  /* Shift */
+  *kshift = (ring+1) % 2; /* 1 for even, 0 for odd */
+  if (nside == 1) *kshift = 1 - *kshift; /* except for Nside=1 */
+  if (*npr < 4*nside) *kshift = 1; /* 1 on polar cap */
+
+  /* Number of pixels in current ring and above */
+  if (ring <= nside) {
+    /* in North cap */
+    *npnorth = ring*(ring+1ll)*2ll;
+  } else if (ring <= 3*nside) {
+    /* in Equatorial region */
+    long long ncap = nside*(nside+1ll)*2ll;
+    long long ir = ring-nside;
+    *npnorth = ncap + 4ll*nside*ir;
+  } else {
+    /* in South cap */
+    long long npix = (12ll*nside)*nside;
+    long long ir = 4ll*nside-ring - 1; /* count ring from south */
+    *npnorth = npix - ir*(ir+1ll)*2ll;
+  }
+}
+
+/**
+ * @brief Compute the z coordinate of a HEALPix ring
+ *
+ * @param nside HEALPix resolution parameter
+ * @param ir the ring index in range 1 to 4*Nside-1
+ *
+ */
+static double ring2z(int nside, int ir) {
+  
+  double z;
+  double fn = (double) nside;
+  if(ir < nside) {
+    /* north polar cap */
+    double tmp = (double) ir;
+    z = 1.0 - (tmp * tmp) / (3.0 * fn * fn);
+  } else if(ir < 3*nside) {
+    /* tropical band */
+    z = ((double) (2*nside-ir)) * 2.0 / (3.0 * fn);
+  } else {
+    /* south polar cap */
+    double tmp = (double) (4*nside - ir);
+    z = -1.0 + (tmp * tmp) / (3.0 * fn * fn);
+  }
+  return z;
+}
+
+/**
+ * @brief Find pixels with centres within specified radius of
+ *        the given vector
+ *
+ * Based on query_disc() from src/f90/mod/pixel_routines.F90.
+ * Assumes RING indexing and does not support inclusive mode
+ * (i.e. only returns pixels with centres within radius)
+ *
+ * If nr_ranges and range are both not NULL, returns a newly
+ * allocated array of struct pixel_range with the ranges of
+ * pixels which overlap the disc.
+ *
+ * @param nside HEALPix resolution parameter
+ * @param vec vector specifying the disc centre
+ * @param radius the radius to search
+ * @param pix_min returns minimum pixel index in the disc
+ * @param pix_max returns maximum pixel index in the disc
+ * @param nr_ranges returns the size of the range array
+ * @param range returns a new array of struct pixel_range
+ *
+ */
+void healpix_query_disc_range(int nside, double vec[3], double radius,
+                              pixel_index_t *pix_min, pixel_index_t *pix_max,
+                              int *nr_ranges, struct pixel_range **range) {
+
+  /* Get normalized disc centre vector */
+  double norm = sqrt(vec[0]*vec[0]+vec[1]*vec[1]+vec[2]*vec[2]);
+  double x0 = vec[0] / norm;
+  double y0 = vec[1] / norm;
+  double z0 = vec[2] / norm;
+  
+  /* coordinate z of highest and lowest points in the disc */
+  double rlat0 = asin(z0); /* latitude in RAD of the center */
+  double rlat1  = rlat0 + radius;
+  double rlat2  = rlat0 - radius;
+  double zmin, zmax;
+  if (rlat1 >= 0.5*M_PI) {
+    zmax = 1.0;
+  } else {
+    zmax = sin(rlat1);
+  }
+  if (rlat2 <= -0.5*M_PI) {
+    zmin = -1.0;
+  } else {
+    zmin = sin(rlat2);
+  }
+
+  /* Find which rings overlap the disc */
+  int irmin = ring_num(nside, zmax);
+  if(irmin-1 > 1) {
+    irmin = irmin - 1;
+  } else {
+    irmin = 1; 
+  }
+  int irmax = ring_num(nside, zmin);
+  if(irmax+1 < 4*nside-1) {
+    irmax = irmax + 1;
+  } else {
+    irmax = 4*nside-1;
+  }
+
+  /* Get phi at disc centre */
+  double phi0 = 0.0;
+  if((x0 != 0) || (y0 != 0)) phi0 = atan2(y0, x0);
+
+  /* Allocate output array:
+     need to allow for worst case where all rings cross the periodic
+     boundary and therefore contribute two disjoint ranges of pixels */
+  int nout_max = 2*(irmax - irmin + 1);
+  if(nout_max < 1)nout_max = 1;
+  if(nr_ranges && range) {
+    *range = malloc(nout_max*sizeof(struct pixel_range));
+  }
+
+  /* Will return min and max pixel indexes in the disk */
+  long long npix = (12ll*nside)*nside;
+  long long pix_min_ll = npix;
+  long long pix_max_ll = -1;
+
+  /* Now have min/max ring index (in range 1 to 4*nside-1) */
+  /* Loop over rings which overlap the disc */
+  if(nr_ranges && range)*nr_ranges = 0;
+  for(int iring=irmin; iring<=irmax; iring+=1) {
+    
+    /* Find z coordinate of this ring */
+    double z = ring2z(nside, iring);
+    
+    /* Find range in phi which overlaps the disc in this ring:
+       taken from  discphirange_at_z() in pix_tools.F90 */
+    double cosang = cos(radius);
+    double a = x0*x0 + y0*y0;
+    double dphi = -1000.0; /* Indicates outside disc */
+    double b = cosang - z*z0;
+    if(a == 0.0) { /* Poles */
+      if(b <= 0.0)dphi = M_PI;
+    } else {
+      double c = fmax(1.0-z*z, 1.0e-12);
+      double cosdphi = b / sqrt(a*c);
+      if(cosdphi < -1.0) dphi = M_PI; /* all the pixels at this elevation are in the disc */
+      if(fabs(cosdphi) <= 1.0) dphi = acos(cosdphi); /* in [0,Pi] */
+    }
+    
+    /* Look up number of pixels in this ring */
+    int npr, kshift;
+    long long npnorth;
+    pixels_per_ring(nside, iring, &npr, &kshift, &npnorth);
+
+    /* For each ring, store the range of pixels which overlaps the disc.
+       If the disc overlaps the periodic boundary at phi=2pi we need to split the
+       range into two pieces. */
+    int my_low = -1;
+    int my_hi = -1;
+    if(dphi > M_PI) {
+      /* Full ring */
+      my_low = 0;
+      my_hi  = npr-1;
+    } else if(dphi >= 0.0) {
+      /* Partial ring */
+      double shift = kshift * 0.5;
+      int iphi_low = ceil(npr * (phi0 - dphi) / (2*M_PI) - shift);
+      int iphi_hi  = floor(npr * (phi0 + dphi) / (2*M_PI) - shift);
+      if(iphi_hi >= iphi_low) {
+        my_low = mod(iphi_low, npr);
+        my_hi  = mod(iphi_hi, npr);
+      }
+    }
+    if(my_low >= 0) {
+      long long first;
+      long long last;
+      if (my_hi >= my_low) {
+        /* Not crossing periodic boundary, so we can return a single range */
+        first = (npnorth - npr) + my_low;
+        last  = (npnorth - npr) + my_hi;
+        if(first < pix_min_ll)pix_min_ll = first;
+        if(last  > pix_max_ll)pix_max_ll = last;
+        if(nr_ranges && range) {
+          (*range)[*nr_ranges].first = first;
+          (*range)[*nr_ranges].last  = last;
+          *nr_ranges += 1;
+        }
+      } else {
+        /* Range overlaps periodic boundary, so will be split in two */
+        /* Start of ring to my_hi */
+        first = (npnorth - npr) + 0;
+        last  = (npnorth - npr) + my_hi;
+        if(first < pix_min_ll)pix_min_ll = first;
+        if(nr_ranges && range) {
+          (*range)[*nr_ranges].first = first;
+          (*range)[*nr_ranges].last  = last;
+          *nr_ranges += 1;
+        }
+        /* my_low to end of ring */
+        first = (npnorth - npr) + my_low;
+        last  = (npnorth - npr) + (npr-1);
+        if(last > pix_max_ll)pix_max_ll = last;
+        if(nr_ranges && range) {
+          (*range)[*nr_ranges].first = first;
+          (*range)[*nr_ranges].last  = last;
+          *nr_ranges += 1;
+        }
+      }      
+    }
+    /* Next ring */
+  }
+
+  /* Return min and max pixel indexes */
+  *pix_min = (pixel_index_t) pix_min_ll;
+  *pix_max = (pixel_index_t) pix_max_ll;
+}
+
+/**
+ * @brief Make a 3D vector given z and phi coordinates
+ *
+ * @param v returns the new vector
+ * @param z normalized coordinate in the z axis
+ * @param phi angular coordinate
+ *
+ */
+static void set_z_phi(double *v, double z, double phi) {
+
+  double sintheta = sqrt((1.0-z)*(1.0+z));
+  v[0] = sintheta*cos(phi);
+  v[1] = sintheta*sin(phi);
+  v[2] = z;
+}
+
+/**
+ * @brief Return the maximum radius of any pixel for a given nside.
+ *
+ * Based on Healpix_base::max_pixrad() from the C++ library.
+ *
+ * @param nside HEALPix resolution parameter
+ *
+ */
+double healpix_max_pixrad(int nside) {
+  
+  double va[3];
+  set_z_phi(va, 2./3., M_PI/(4*nside));
+
+  double t1 = 1.-1./nside;
+  t1*=t1;
+  double vb[3];
+  set_z_phi(vb, 1.-t1/3., 0.);
+
+  double dotprod = va[0]*vb[0]+va[1]*vb[1]+va[2]*vb[2];
+  double crossprod[3];
+  crossprod[0] = va[1]*vb[2] - va[2]*vb[1];
+  crossprod[1] = va[2]*vb[0] - va[0]*vb[2];
+  crossprod[2] = va[0]*vb[1] - va[1]*vb[0];
+  double length = sqrt(crossprod[0]*crossprod[0]+
+                       crossprod[1]*crossprod[1]+
+                       crossprod[2]*crossprod[2]);
+  return atan2(length, dotprod);
+}
diff --git a/src/lightcone/healpix_util.h b/src/lightcone/healpix_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..dfa64c23ee3f1e542eafe86153bf830decc4dfec
--- /dev/null
+++ b/src/lightcone/healpix_util.h
@@ -0,0 +1,46 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ *
+ * The functions in this file are based on code from the HEALPix
+ * 3.80 library (see http://healpix.sourceforge.net):
+ *
+ *  Copyright (C) 1997-2013 Krzysztof M. Gorski, Eric Hivon,
+ *                          Benjamin D. Wandelt, Anthony J. Banday, 
+ *                          Matthias Bartelmann, Hans K. Eriksen, 
+ *                          Frode K. Hansen, Martin Reinecke
+ *
+ * Translated and modified for SWIFT by John Helly:
+ *
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include "lightcone/pixel_index.h"
+
+struct pixel_range {
+  pixel_index_t first;
+  pixel_index_t last;
+};
+
+/*
+  Functions we need which are missing from the HEALPix C API
+*/
+
+double healpix_max_pixrad(int nside);
+
+void healpix_query_disc_range(int nside, double vec[3], double radius,
+                              pixel_index_t *pix_min, pixel_index_t *pix_max,
+                              int *nr_ranges, struct pixel_range **range);
diff --git a/src/lightcone/lightcone.c b/src/lightcone/lightcone.c
new file mode 100644
index 0000000000000000000000000000000000000000..45dc52cac4a5b94f6da1d13c8925948f14f3afb1
--- /dev/null
+++ b/src/lightcone/lightcone.c
@@ -0,0 +1,1783 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <hdf5.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <limits.h>
+
+/* HEALPix C API */
+#ifdef HAVE_CHEALPIX
+#include <chealpix.h>
+#endif
+
+/* This object's header. */
+#include "lightcone/lightcone.h"
+
+/* Local headers */
+#include "common_io.h"
+#include "cosmology.h"
+#include "engine.h"
+#include "error.h"
+#include "extra_io.h"
+#include "gravity_io.h"
+#include "hydro.h"
+#include "lightcone/lightcone_particle_io.h"
+#include "lightcone/lightcone_replications.h"
+#include "lock.h"
+#include "neutrino_io.h"
+#include "parser.h"
+#include "part_type.h"
+#include "particle_buffer.h"
+#include "periodic.h"
+#include "restart.h"
+#include "space.h"
+#include "timeline.h"
+#include "tools.h"
+#include "units.h"
+
+/* Whether to dump the replication list */
+//#define DUMP_REPLICATIONS
+#ifdef DUMP_REPLICATIONS
+static int output_nr = 0;
+#endif
+
+/* MPI rank for diagnostic messages */
+extern int engine_rank;
+
+#ifdef HAVE_CHEALPIX
+/**
+ * @brief Read in map types and compression info from a text file
+ *
+ * The first column in the file is the map type name and the second
+ * column is the compression method. Columns are separated by
+ * whitespace.
+ *
+ * @param map_types_file file with the map type names and compression methods
+ * @param nr_map_types returns the number of map types read
+ * @param map_types returns an array of nr_map_types map type structs
+ */
+static void read_map_types_file(const char *map_types_file, int *nr_map_types,
+                                struct lightcone_map_type **map_types) {
+
+  int map_type_nr = 0;
+  if (engine_rank == 0) {
+
+    FILE *fd = fopen(map_types_file, "r");
+    if (!fd)
+      error("Failed to open lightcone map types file %s", map_types_file);
+
+    /* Count number of non-zero length lines */
+    size_t len = 0;
+    char *line = NULL;
+    int nr_lines = 0;
+    while (getline(&line, &len, fd) != -1) nr_lines += 1;
+    rewind(fd);
+
+    /* Allocate output arrays */
+    *map_types = calloc(nr_lines, sizeof(struct lightcone_map_type));
+
+    /* Read lines */
+    for (int i = 0; i < nr_lines; i += 1) {
+
+      /* Get name and compression type from this line */
+      char compression[PARSER_MAX_LINE_SIZE];
+      if (fscanf(fd, "%s %s", (*map_types)[map_type_nr].name, compression) != 2)
+        error("Failed to read line from map types file");
+
+      /* Look up compression scheme */
+      (*map_types)[map_type_nr].compression =
+          compression_scheme_from_name(compression);
+
+      /* Only keep maps which have not been disabled */
+      if ((*map_types)[map_type_nr].compression != compression_do_not_write)
+        map_type_nr += 1;
+    }
+    fclose(fd);
+    free(line);
+  }
+
+#ifdef WITH_MPI
+  MPI_Bcast(&map_type_nr, 1, MPI_INT, 0, MPI_COMM_WORLD);
+  if (engine_rank != 0)
+    *map_types = calloc(map_type_nr, sizeof(struct lightcone_map_type));
+  MPI_Bcast(*map_types, sizeof(struct lightcone_map_type) * map_type_nr,
+            MPI_BYTE, 0, MPI_COMM_WORLD);
+#endif
+
+  /* Return number of enabled map types */
+  *nr_map_types = map_type_nr;
+}
+#endif
+
+/**
+ * @brief Identify which healpix map types we're making
+ *
+ * @param props the #lightcone_props structure
+ *
+ * For each requested map type find the update functions by matching names.
+ * Map types are defined in lightcone_map_types.h and there may be extra
+ * types defined by various physics modules. The array map_type_array
+ * below determines where we look for extra map types.
+ *
+ * This function assumes that props->map_type is already allocated and
+ * props->map_type[:].name has been set to the list of map names from the
+ * .yml file. It sets the update_map, ptype_contributes and units fields
+ * in the props->map_type array.
+ *
+ */
+static void lightcone_identify_map_types(struct lightcone_props *props) {
+
+  /* Loop over requested map types */
+  for (int map_nr = 0; map_nr < props->nr_maps; map_nr += 1) {
+
+    /* Use null function pointer to indicate not found yet */
+    props->map_type[map_nr].update_map = NULL;
+
+    /* Places to search for lightcone map types:
+       extra map types are provided by various physics modules. */
+    const int num_places = 3;
+    const struct lightcone_map_type *map_type_array[] = {
+        lightcone_map_types, extra_lightcone_map_types,
+        neutrino_lightcone_map_types};
+
+    /* Loop over places to search for map types */
+    for (int i = 0; i < num_places; i += 1) {
+
+      int type_nr = 0;
+      const struct lightcone_map_type *map_types_to_search = map_type_array[i];
+      while (map_types_to_search[type_nr].update_map) {
+        if (strcmp(map_types_to_search[type_nr].name,
+                   props->map_type[map_nr].name) == 0) {
+          props->map_type[map_nr] = map_types_to_search[type_nr];
+          if (engine_rank == 0)
+            message("lightcone %d: lightcone map %d is of type %s",
+                    props->index, map_nr, map_types_to_search[type_nr].name);
+        }
+        type_nr += 1;
+      }
+
+    } /* Next place to search */
+
+    if (!props->map_type[map_nr].update_map)
+      error("Unable to locate lightcone map type %s",
+            props->map_type[map_nr].name);
+  }
+}
+
+/**
+ * @brief Allocate particle I/O buffers for a lightcone
+ *
+ * @param props the #lightcone_props structure
+ */
+static void lightcone_allocate_buffers(struct lightcone_props *props) {
+
+  /* Initialize particle output buffers */
+  const size_t elements_per_block = (size_t)props->buffer_chunk_size;
+
+  if (props->use_type[swift_type_gas]) {
+    particle_buffer_init(&props->buffer[swift_type_gas],
+                         sizeof(struct lightcone_gas_data), elements_per_block,
+                         "lightcone_gas");
+  }
+
+  if (props->use_type[swift_type_dark_matter]) {
+    particle_buffer_init(&props->buffer[swift_type_dark_matter],
+                         sizeof(struct lightcone_dark_matter_data),
+                         elements_per_block, "lightcone_dm");
+  }
+
+  if (props->use_type[swift_type_dark_matter_background]) {
+    particle_buffer_init(&props->buffer[swift_type_dark_matter_background],
+                         sizeof(struct lightcone_dark_matter_data),
+                         elements_per_block, "lightcone_dm_bg");
+  }
+
+  if (props->use_type[swift_type_stars]) {
+    particle_buffer_init(&props->buffer[swift_type_stars],
+                         sizeof(struct lightcone_stars_data),
+                         elements_per_block, "lightcone_stars");
+  }
+
+  if (props->use_type[swift_type_black_hole]) {
+    particle_buffer_init(&props->buffer[swift_type_black_hole],
+                         sizeof(struct lightcone_black_hole_data),
+                         elements_per_block, "lightcone_bh");
+  }
+
+  if (props->use_type[swift_type_neutrino]) {
+    particle_buffer_init(&props->buffer[swift_type_neutrino],
+                         sizeof(struct lightcone_neutrino_data),
+                         elements_per_block, "lightcone_neutrino");
+  }
+}
+
+/**
+ * @brief Dump lightcone_props struct to the output stream.
+ *
+ * @param props the #lightcone_props structure
+ * @param stream The stream to write to.
+ */
+void lightcone_struct_dump(const struct lightcone_props *props, FILE *stream) {
+
+  /* Don't dump the replication list - will regenerate it as needed */
+  struct lightcone_props tmp = *props;
+  tmp.replication_list.nrep = 0;
+  tmp.replication_list.replication = NULL;
+  tmp.have_replication_list = 0;
+
+  /* Don't write out particle buffers - must flush before dumping restart. */
+  memset(tmp.buffer, 0, sizeof(struct particle_buffer) * swift_type_count);
+
+  /* Don't write array pointers */
+  tmp.shell = NULL;
+  tmp.map_type = NULL;
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1)
+    tmp.part_type[ptype].map_index = NULL;
+
+  /* Dump the lightcone struct */
+  restart_write_blocks((void *)&tmp, sizeof(struct lightcone_props), 1, stream,
+                       "lightcone_props", "lightcone_props");
+
+  /* Dump the array of map types */
+  restart_write_blocks((void *)props->map_type,
+                       sizeof(struct lightcone_map_type), props->nr_maps,
+                       stream, "lightcone_props", "lightcone_props");
+
+  /* Dump the array of shells */
+  lightcone_shell_array_dump(props->shell, props->nr_shells, stream);
+
+  /* For each particle type we have an array of lightcone map indexes to update.
+   * Dump these. */
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    const struct lightcone_particle_type *this_type =
+        &(props->part_type[ptype]);
+    restart_write_blocks((void *)this_type->map_index, sizeof(int),
+                         this_type->nr_maps, stream, "lightcone_props",
+                         "lightcone_props");
+  }
+}
+
+/**
+ * @brief Initialise the particle output fields for each particle type.
+ *
+ * @param props the #lightcone_props structure
+ */
+void lightcone_define_output_fields(struct lightcone_props *props) {
+
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1)
+    lightcone_io_field_list_init(&props->particle_fields[ptype]);
+
+  /* Add the default set of fields for all models, from lightcone_particle_io.c
+   */
+  lightcone_io_append_gas_output_fields(
+      &props->particle_fields[swift_type_gas]);
+  lightcone_io_append_dark_matter_output_fields(
+      &props->particle_fields[swift_type_dark_matter]);
+  lightcone_io_append_dark_matter_background_output_fields(
+      &props->particle_fields[swift_type_dark_matter_background]);
+  lightcone_io_append_stars_output_fields(
+      &props->particle_fields[swift_type_stars]);
+  lightcone_io_append_black_hole_output_fields(
+      &props->particle_fields[swift_type_black_hole]);
+  lightcone_io_append_neutrino_output_fields(
+      &props->particle_fields[swift_type_neutrino]);
+}
+
+/**
+ * @brief Restore lightcone_props struct from the input stream.
+ *
+ * @param props the #lightcone_props structure
+ * @param stream The stream to read from.
+ */
+void lightcone_struct_restore(struct lightcone_props *props, FILE *stream) {
+
+  /* Restore lightcone struct */
+  restart_read_blocks((void *)props, sizeof(struct lightcone_props), 1, stream,
+                      NULL, "lightcone_props");
+
+  /* Read in the map types */
+  props->map_type = malloc(sizeof(struct lightcone_map_type) * props->nr_maps);
+  restart_read_blocks((void *)props->map_type,
+                      sizeof(struct lightcone_map_type), props->nr_maps, stream,
+                      NULL, "lightcone_props");
+
+  /* Read in the shells */
+  props->shell = lightcone_shell_array_restore(
+      stream, props->nr_shells, props->part_type, props->buffer_chunk_size);
+
+  /* For each particle type we have an array of lightcone map indexes to update.
+   * Restore these. */
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    struct lightcone_particle_type *this_type = &(props->part_type[ptype]);
+    this_type->map_index = malloc(sizeof(int) * this_type->nr_maps);
+    restart_read_blocks((void *)this_type->map_index, sizeof(int),
+                        this_type->nr_maps, stream, NULL, "lightcone_props");
+  }
+
+  /* Restore pointers to functions for updating healpix maps */
+  lightcone_identify_map_types(props);
+
+  /* Update function pointers for each map */
+  for (int shell_nr = 0; shell_nr < props->nr_shells; shell_nr += 1) {
+    for (int map_nr = 0; map_nr < props->nr_maps; map_nr += 1) {
+      props->shell[shell_nr].map[map_nr].type = props->map_type[map_nr];
+    }
+  }
+
+  /* Re-allocate particle data buffers */
+  lightcone_allocate_buffers(props);
+
+  /* Define output quantities */
+  lightcone_define_output_fields(props);
+
+  /* Tabulate the projected kernel */
+  projected_kernel_init(&props->kernel_table);
+}
+
+#ifdef HAVE_CHEALPIX
+/**
+ * @brief Locate a lightcone parameter in the .yml file
+ *
+ * First check the section specific to this lightcone then
+ * fall back to LightconeCommon if not found.
+ *
+ * @param params the swift parameters struct
+ * @param index index of the lightcone
+ * @param name name of the parameter to find
+ * @param outbuf returns the parameter value
+ *
+ */
+static char *find_parameter(struct swift_params *params, const int index,
+                            const char *name, char *outbuf) {
+
+  char full_name[PARSER_MAX_LINE_SIZE];
+
+  /* Check section specific to this lightcone */
+  check_snprintf(full_name, PARSER_MAX_LINE_SIZE, "Lightcone%d:%s", index,
+                 name);
+  if (parser_does_param_exist(params, full_name)) {
+    strcpy(outbuf, full_name);
+    return outbuf;
+  }
+
+  /* Will look in LightconeCommon section if parameter was not found */
+  check_snprintf(full_name, PARSER_MAX_LINE_SIZE, "LightconeCommon:%s", name);
+  strcpy(outbuf, full_name);
+  return outbuf;
+}
+#endif
+
+/**
+ * @brief Initialise the properties of the lightcone code.
+ *
+ * @param props the #lightcone_props structure to fill.
+ * @param index index of the lightcone to initialize
+ * @param s the #space structure.
+ * @param cosmo the #cosmology structure.
+ * @param params the parameter file parser.
+ * @param internal_units swift internal unit system
+ * @param physical_constants swift physical constant values
+ * @param verbose the verbosity flag
+ */
+void lightcone_init(struct lightcone_props *props, const int index,
+                    const struct space *s, const struct cosmology *cosmo,
+                    struct swift_params *params,
+                    const struct unit_system *internal_units,
+                    const struct phys_const *physical_constants,
+                    const int verbose) {
+
+#ifdef HAVE_CHEALPIX
+
+  /* Macro to generate parameter names given section name */
+  char buf[PARSER_MAX_LINE_SIZE];
+#define YML_NAME(x) find_parameter(params, index, x, buf)
+
+  /* Store index of this lightcone in the .yml file */
+  props->index = index;
+
+  /* Verbose lightcone output - use passed in value of --verbose flag */
+  props->verbose = verbose;
+
+  /* Define output quantities */
+  lightcone_define_output_fields(props);
+
+  /* For each particle type, get redshift range for lightcone particle output */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    const int len = PARSER_MAX_LINE_SIZE;
+    char param_name[len];
+    double zrange[2] = {0.0, -1.0}; /* default max < min means do not output */
+    check_snprintf(param_name, len, "z_range_for_%s", part_type_names[i]);
+    parser_get_opt_param_double_array(params, YML_NAME(param_name), 2, zrange);
+    props->z_min_for_type[i] = zrange[0];
+    props->z_max_for_type[i] = zrange[1];
+    /* Will only output types with z_max > z_min */
+    props->use_type[i] = props->z_max_for_type[i] > props->z_min_for_type[i];
+    if (engine_rank == 0 && verbose) {
+      if (props->use_type[i]) {
+        message("lightcone %d: %s particles will be output from z=%f to z=%f",
+                props->index, part_type_names[i], zrange[0], zrange[1]);
+      } else {
+        message("lightcone %d: %s particle output is disabled", props->index,
+                part_type_names[i]);
+      }
+    }
+  }
+
+  /* For each type, find range in comoving distance squared in which we output
+   * particles */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    if (props->use_type[i]) {
+      const double a_min = 1.0 / (1.0 + props->z_max_for_type[i]);
+      props->r2_max_for_type[i] =
+          pow(cosmology_get_comoving_distance(cosmo, a_min), 2.0);
+      const double a_max = 1.0 / (1.0 + props->z_min_for_type[i]);
+      props->r2_min_for_type[i] =
+          pow(cosmology_get_comoving_distance(cosmo, a_max), 2.0);
+    } else {
+      props->r2_min_for_type[i] = 0.0;
+      props->r2_max_for_type[i] = 0.0;
+    }
+  }
+
+  /*
+    Allow selective output of gas particles at high redshift.
+    Will output gas particles if redshift < min_z_for_gas_filtering OR
+    (temperature > min_temp_for_filtered_gas AND nh >
+    min_nh_for_filtered_gas*(1+z)^4)
+  */
+  props->gas_filtering_enabled =
+      parser_get_opt_param_int(params, YML_NAME("gas_filtering_enabled"), 0);
+  if (props->gas_filtering_enabled) {
+    props->min_z_for_gas_filtering =
+        parser_get_param_double(params, YML_NAME("min_z_for_gas_filtering"));
+    props->min_temp_for_filtered_gas =
+        parser_get_param_double(params, YML_NAME("min_temp_for_filtered_gas"));
+    props->min_nh_for_filtered_gas =
+        parser_get_param_double(params, YML_NAME("min_nh_for_filtered_gas"));
+    props->max_a_for_gas_filtering =
+        1.0 / (1.0 + props->min_z_for_gas_filtering);
+    /* Convert temperature and density thresholds to internal units, assuming
+     * they're input in CGS */
+    props->min_temp_for_filtered_gas /=
+        units_cgs_conversion_factor(internal_units, UNIT_CONV_TEMPERATURE);
+    props->min_nh_for_filtered_gas /=
+        units_cgs_conversion_factor(internal_units, UNIT_CONV_NUMBER_DENSITY);
+  }
+
+  /* Exclude particles from xray and sz maps if they have been recently AGN
+   * heated */
+  props->xray_maps_recent_AGN_injection_exclusion_time =
+      parser_get_opt_param_double(
+          params, YML_NAME("xray_maps_recent_AGN_injection_exclusion_time_myr"),
+          -1.0);
+  /* Assume supplied value is in megayears and physical constants are in
+   * internal units */
+  props->xray_maps_recent_AGN_injection_exclusion_time *=
+      1.0e6 * physical_constants->const_year;
+
+  /*
+    Temperature limits for recently AGN heated gas to be excluded from xray and
+    sz maps.
+
+    Gas is excluded if it has log(temperature) which is greater than
+    log(AGN_Delta_T)+xray_maps_recent_AGN_logdT_min and less than
+    log(AGN_Delta_T)+xray_maps_recent_AGN_logdT_max.
+
+    Only takes effect if xray_maps_recent_AGN_injection_exclusion_time_myr is
+    set.
+  */
+  if (props->xray_maps_recent_AGN_injection_exclusion_time > 0.0) {
+    double delta_logt_min = parser_get_param_double(
+        params, YML_NAME("xray_maps_recent_AGN_injection_delta_logT_min"));
+    if (delta_logt_min > 0.0)
+      error("xray_maps_recent_AGN_injection_delta_logT_min should be negative");
+    props->xray_maps_recent_AGN_min_temp_factor = pow(10.0, delta_logt_min);
+    double delta_logt_max = parser_get_param_double(
+        params, YML_NAME("xray_maps_recent_AGN_injection_delta_logT_max"));
+    if (delta_logt_max < 0.0)
+      error("xray_maps_recent_AGN_injection_delta_logT_max should be positive");
+    props->xray_maps_recent_AGN_max_temp_factor = pow(10.0, delta_logt_max);
+    if (delta_logt_max < delta_logt_min)
+      error(
+          "xray_maps_recent_AGN_injection_delta_logT_max should be greater "
+          "than _min!");
+  }
+
+  /* Directory in which to write this lightcone */
+  parser_get_opt_param_string(params, YML_NAME("subdir"), props->subdir, ".");
+
+  /* Base name for output files */
+  parser_get_param_string(params, YML_NAME("basename"), props->basename);
+
+  /* Coordinates of the observer in the simulation box */
+  parser_get_param_double_array(params, YML_NAME("observer_position"), 3,
+                                props->observer_position);
+
+  /* Write particles to disk if this many or more are in the buffer */
+  props->max_particles_buffered = parser_get_opt_param_int(
+      params, YML_NAME("max_particles_buffered"), 100000);
+
+  /* Chunk size for particles buffered in memory */
+  props->buffer_chunk_size =
+      parser_get_opt_param_int(params, YML_NAME("buffer_chunk_size"), 20000);
+
+  /* Chunk size for particles in the HDF5 output files */
+  props->hdf5_chunk_size =
+      parser_get_opt_param_int(params, YML_NAME("hdf5_chunk_size"), 16384);
+
+  /* Maximum amount of data (in megabytes) to send from any one rank when
+   * updating healpix maps */
+  props->max_map_update_send_size_mb = parser_get_opt_param_double(
+      params, YML_NAME("max_map_update_send_size_mb"), 512.0);
+
+  /* Compression options */
+  props->particles_lossy_compression = parser_get_opt_param_int(
+      params, YML_NAME("particles_lossy_compression"), 0);
+  props->particles_gzip_level =
+      parser_get_opt_param_int(params, YML_NAME("particles_gzip_level"), 0);
+  props->maps_gzip_level =
+      parser_get_opt_param_int(params, YML_NAME("maps_gzip_level"), 0);
+
+  /* Get the size of the simulation box */
+  props->boxsize = s->dim[0];
+  if (s->dim[1] != s->dim[0] || s->dim[2] != s->dim[0])
+    error("Lightcones require a cubic simulation box.");
+
+  /* Get top level cell size */
+  props->cell_width = s->width[0];
+  if (s->width[1] != s->width[0] || s->width[2] != s->width[0])
+    error("Lightcones require cubic top level cells.");
+
+  /* Initially have no replication list */
+  props->have_replication_list = 0;
+  props->ti_old = 0;
+  props->ti_current = 0;
+
+  /* Initialize various counters */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    props->num_particles_written_this_rank[i] = 0;
+    props->num_particles_written_to_file[i] = 0;
+  }
+  props->current_file = -1;
+  props->file_needs_finalizing = 0;
+
+  /* Always start a new file initially */
+  props->start_new_file = 1;
+
+  /*
+     Healpix map parameters for this lightcone
+  */
+
+  /* Healpix nside parameter */
+  props->nside = parser_get_param_int(params, YML_NAME("nside"));
+
+  /* Update lightcone pixel data if more than this number of updates are
+   * buffered */
+  props->max_updates_buffered = parser_get_opt_param_int(
+      params, YML_NAME("max_updates_buffered"), 1000000);
+
+  /*! Whether to write distributed maps in MPI mode */
+  props->distributed_maps =
+      parser_get_opt_param_int(params, YML_NAME("distributed_maps"), 1);
+
+  /* Name of the file with radii of spherical shells */
+  parser_get_param_string(params, YML_NAME("radius_file"), props->radius_file);
+
+  /* Get names of the healpix maps to make for this lightcone */
+  char map_types_file[FILENAME_BUFFER_SIZE];
+  parser_get_param_string(params, YML_NAME("map_names_file"), map_types_file);
+  read_map_types_file(map_types_file, &props->nr_maps, &props->map_type);
+
+  /* For each requested map type find the update function by matching names */
+  lightcone_identify_map_types(props);
+
+  /* For each particle type, determine which healpix maps will be updated */
+  const int nr_maps = props->nr_maps;
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+
+    struct lightcone_particle_type *this_type = &(props->part_type[ptype]);
+
+    /* Count maps updated by this particle type */
+    this_type->nr_maps = 0;
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      if (props->map_type[map_nr].ptype_contributes(ptype))
+        this_type->nr_maps += 1;
+    }
+
+    /* Store indexes of maps to update for this particle type */
+    this_type->map_index = malloc(sizeof(int) * this_type->nr_maps);
+    this_type->nr_maps = 0;
+    this_type->nr_smoothed_maps = 0;
+    this_type->nr_unsmoothed_maps = 0;
+
+    /* First the smoothed maps */
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      const struct lightcone_map_type *map_type = &(props->map_type[map_nr]);
+      if (map_type->ptype_contributes(ptype) &&
+          map_type->smoothing == map_smoothed) {
+        this_type->map_index[this_type->nr_maps] = map_nr;
+        this_type->nr_maps += 1;
+        this_type->nr_smoothed_maps += 1;
+      }
+    }
+
+    /* Then the un-smoothed maps */
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      const struct lightcone_map_type *map_type = &(props->map_type[map_nr]);
+      if (map_type->ptype_contributes(ptype) &&
+          map_type->smoothing == map_unsmoothed) {
+        this_type->map_index[this_type->nr_maps] = map_nr;
+        this_type->nr_maps += 1;
+        this_type->nr_unsmoothed_maps += 1;
+      }
+    }
+
+    /* Determine how much data we need to store per particle of this type.
+       We need theta and phi angular coordinates, angular size of the particle,
+       and the values to be added to the healpix maps */
+    this_type->buffer_element_size =
+        (3 + this_type->nr_maps) * sizeof(union lightcone_map_buffer_entry);
+  }
+
+  /* Check the number of healpix pixels doesn't overflow pixel_index_t */
+  const unsigned long long nside_ull = props->nside;
+  const unsigned long long npix_ull = 12ull*nside_ull*nside_ull;
+  if(npix_ull > MAX_PIXEL_INDEX)
+    error("Number of HEALPix pixels is to large for pixel_index_t (see lightcone/pixel_index.h)");
+
+  /* Set up the array of lightcone shells for this lightcone */
+  const pixel_index_t total_nr_pix = nside2npix64(props->nside);
+  props->shell = lightcone_shell_array_init(
+      cosmo, props->radius_file, props->nr_maps, props->map_type, props->nside,
+      total_nr_pix, props->part_type, props->buffer_chunk_size,
+      &props->nr_shells);
+
+  /* Compute area of a healpix pixel */
+  props->pixel_area_steradians = 4 * M_PI / total_nr_pix;
+
+  /* Report shell radii */
+  const int nr_shells = props->nr_shells;
+  if (engine_rank == 0) {
+    for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+      message("lightcone %d: shell %d has inner radius %e and outer radius %e",
+              index, shell_nr, props->shell[shell_nr].rmin,
+              props->shell[shell_nr].rmax);
+    }
+  }
+  if (engine_rank == 0)
+    message("lightcone %d: there are %d lightcone shells and %d maps per shell",
+            index, nr_shells, nr_maps);
+
+  /* For each particle type, find the full redshift range to search for
+   * lightcone crossings */
+  int have_particle_output = 0;
+  for (int i = 0; i < swift_type_count; i += 1) {
+
+    /* Initially set range to search to range used for particle output, if any
+     */
+    if (props->use_type[i]) {
+      props->a_min_search_for_type[i] = 1.0 / (1.0 + props->z_max_for_type[i]);
+      props->a_max_search_for_type[i] = 1.0 / (1.0 + props->z_min_for_type[i]);
+      have_particle_output = 1;
+    } else {
+      props->a_min_search_for_type[i] = DBL_MAX;
+      props->a_max_search_for_type[i] = 0.0;
+    }
+
+    /* Then expand the range to include any healpix maps this type contributes
+     * to */
+    for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+      const double shell_a_min = props->shell[shell_nr].amin;
+      const double shell_a_max = props->shell[shell_nr].amax;
+      for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+        const struct lightcone_map_type *map_type = &(props->map_type[map_nr]);
+        if (map_type->ptype_contributes(i)) {
+          if (shell_a_min < props->a_min_search_for_type[i])
+            props->a_min_search_for_type[i] = shell_a_min;
+          if (shell_a_max > props->a_max_search_for_type[i])
+            props->a_max_search_for_type[i] = shell_a_max;
+        }
+      }
+    }
+    /* Next particle type */
+  }
+
+  /* Determine the full redshift range to search for all particle types */
+  double a_min = DBL_MAX;
+  double a_max = 0.0;
+  for (int i = 0; i < swift_type_count; i += 1) {
+    if (props->a_max_search_for_type[i] > props->a_min_search_for_type[i]) {
+      if (props->a_min_search_for_type[i] < a_min)
+        a_min = props->a_min_search_for_type[i];
+      if (props->a_max_search_for_type[i] > a_max)
+        a_max = props->a_max_search_for_type[i];
+    }
+  }
+
+  /* Check we have a valid range in expansion factor for the lightcone */
+  if (a_min > a_max)
+    error(
+        "Code was run with --lightcone but no particle outputs or healpix maps "
+        "are enabled");
+  props->a_min = a_min;
+  props->a_max = a_max;
+  if (engine_rank == 0) {
+    for (int i = 0; i < swift_type_count; i += 1) {
+      if (props->a_max_search_for_type[i] > props->a_min_search_for_type[i]) {
+        message("lightcone %d: range in expansion factor for %s: %e to %e",
+                index, part_type_names[i], props->a_min_search_for_type[i],
+                props->a_max_search_for_type[i]);
+      } else {
+        message("lightcone %d: no lightcone output for %s", index,
+                part_type_names[i]);
+      }
+    }
+    message("lightcone %d: range in expansion factor overall: %e to %e", index,
+            a_min, a_max);
+  }
+
+  /* Store the corresponding comoving distance squared */
+  props->r2_max = pow(cosmology_get_comoving_distance(cosmo, a_min), 2.0);
+  props->r2_min = pow(cosmology_get_comoving_distance(cosmo, a_max), 2.0);
+
+  /* Allocate lightcone output buffers */
+  lightcone_allocate_buffers(props);
+
+  /* Tabulate the projected kernel */
+  projected_kernel_init(&props->kernel_table);
+
+  /* Ensure that the output directories exist */
+  if (engine_rank == 0) {
+    const int len = FILENAME_BUFFER_SIZE;
+    char dirname[len];
+    safe_checkdir(props->subdir, 1);
+    /* Directory for particle outputs */
+    if (have_particle_output) {
+      check_snprintf(dirname, len, "%s/%s_particles", props->subdir,
+                     props->basename);
+      safe_checkdir(dirname, 1);
+    }
+    /* Directory for shell outputs */
+    if ((props->nr_shells > 0) && (props->nr_maps > 0)) {
+      check_snprintf(dirname, len, "%s/%s_shells", props->subdir,
+                     props->basename);
+      safe_checkdir(dirname, 1);
+    }
+  }
+#ifdef WITH_MPI
+  MPI_Barrier(MPI_COMM_WORLD);
+#endif
+#else
+  error("Need HEALPix C API to make lightcones");
+#endif
+}
+
+
+/**
+ * @brief Return the name of a lightcone particle output file
+ *
+ * @param buf returns the filename
+ * @param len length of the buffer buf
+ * @param subdir subdirectory in which to write output
+ * @param basename base name of this lightcone
+ * @param current_file lightcone particle file index, which is
+ *        incremented after each restart dump
+ * @param comm_rank rank of this MPI communicator
+ */
+static void particle_file_name(char *buf, int len, char *subdir, char *basename,
+                               int current_file, int comm_rank) {
+
+  check_snprintf(buf, len, "%s/%s_particles/%s_%04d.%d.hdf5", subdir, basename,
+                 basename, current_file, comm_rank);
+}
+
+/**
+ * @brief Flush any buffers which exceed the specified size.
+ *
+ * Also used to flush buffers before dumping restart files, in
+ * which case we should have flush_all=1 and end_file=1 so that
+ * buffers are flushed regardless of size and we will start a
+ * new set of lightcone files after the restart dump.
+ *
+ * @param props the #lightcone_props structure.
+ * @param a the current expansion factor
+ * @param internal_units swift internal unit system
+ * @param snapshot_units swift snapshot unit system
+ * @param flush_all flag to force flush of all buffers
+ * @param end_file if true, subsequent calls write to a new file
+ *
+ */
+void lightcone_flush_particle_buffers(struct lightcone_props *props, double a,
+                                      const struct unit_system *internal_units,
+                                      const struct unit_system *snapshot_units,
+                                      int flush_all, int end_file) {
+
+  ticks tic = getticks();
+
+  /* Should never be called with end_file=1 and flush_all=0 */
+  if (end_file && (!flush_all))
+    error("Finalizing file without flushing buffers!");
+
+  /* Will flush any buffers with more particles than this */
+  size_t max_to_buffer = (size_t)props->max_particles_buffered;
+  if (flush_all) max_to_buffer = 0;
+
+  /* Count how many types have data to write out */
+  int types_to_flush = 0;
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    if (props->use_type[ptype]) {
+      const size_t num_to_write =
+          particle_buffer_num_elements(&props->buffer[ptype]);
+      if (num_to_write >= max_to_buffer && num_to_write > 0)
+        types_to_flush += 1;
+    }
+  }
+
+  /* Check if there's anything to do */
+  if ((types_to_flush > 0) || (end_file && props->file_needs_finalizing)) {
+
+    /* We have data to flush, so open or create the output file */
+    hid_t file_id;
+    char fname[FILENAME_BUFFER_SIZE];
+    if (props->start_new_file) {
+
+      /* Get the name of the next file */
+      props->current_file += 1;
+      particle_file_name(fname, FILENAME_BUFFER_SIZE, props->subdir,
+                         props->basename, props->current_file, engine_rank);
+
+      /* Create the file */
+      file_id = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+      if (file_id < 0) error("Unable to create new lightcone file: %s", fname);
+
+      /* This new file has not been finalized yet */
+      props->file_needs_finalizing = 1;
+
+      /* We have now written no particles to the current file */
+      for (int ptype = 0; ptype < swift_type_count; ptype += 1)
+        props->num_particles_written_to_file[ptype] = 0;
+
+      /* Write the system of Units used in the snapshot */
+      io_write_unit_system(file_id, snapshot_units, "Units");
+
+      /* Write the system of Units used internally */
+      io_write_unit_system(file_id, internal_units, "InternalCodeUnits");
+
+      /* Write the observer position and redshift limits */
+      hid_t group_id = H5Gcreate(file_id, "Lightcone", H5P_DEFAULT, H5P_DEFAULT,
+                                 H5P_DEFAULT);
+      io_write_attribute(group_id, "observer_position", DOUBLE,
+                         props->observer_position, 3);
+
+      for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+        char name[PARSER_MAX_LINE_SIZE];
+        check_snprintf(name, PARSER_MAX_LINE_SIZE, "minimum_redshift_%s",
+                       part_type_names[ptype]);
+        io_write_attribute_d(group_id, name, props->z_min_for_type[ptype]);
+        check_snprintf(name, PARSER_MAX_LINE_SIZE, "maximum_redshift_%s",
+                       part_type_names[ptype]);
+        io_write_attribute_d(group_id, name, props->z_max_for_type[ptype]);
+      }
+
+      /* Record number of MPI ranks so we know how many files there are */
+      int comm_rank = 0;
+      int comm_size = 1;
+#ifdef WITH_MPI
+      MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+      MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+#endif
+      io_write_attribute_i(group_id, "mpi_rank", comm_rank);
+      io_write_attribute_i(group_id, "nr_mpi_ranks", comm_size);
+      io_write_attribute_i(group_id, "file_index", props->current_file);
+
+      H5Gclose(group_id);
+
+      /* We no longer need to create a new file */
+      props->start_new_file = 0;
+
+    } else {
+
+      /* Re-open an existing file */
+      particle_file_name(fname, FILENAME_BUFFER_SIZE, props->subdir,
+                         props->basename, props->current_file, engine_rank);
+      file_id = H5Fopen(fname, H5F_ACC_RDWR, H5P_DEFAULT);
+      if (file_id < 0)
+        error("Unable to open current lightcone file: %s", fname);
+    }
+
+    /* Loop over particle types */
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      if (props->use_type[ptype]) {
+        const size_t num_to_write =
+            particle_buffer_num_elements(&props->buffer[ptype]);
+        if (num_to_write >= max_to_buffer && num_to_write > 0) {
+          lightcone_write_particles(props, internal_units, snapshot_units,
+                                    ptype, file_id);
+          particle_buffer_empty(&props->buffer[ptype]);
+          props->num_particles_written_to_file[ptype] += num_to_write;
+          props->num_particles_written_this_rank[ptype] += num_to_write;
+        }
+      }
+    }
+
+    /* Check if this is the last write to this file */
+    if (end_file) {
+      hid_t group_id = H5Gopen(file_id, "Lightcone", H5P_DEFAULT);
+      /* Flag the file as complete */
+      io_write_attribute_i(group_id, "file_complete", 1);
+      /* Write the expected number of particles in all files written by this
+         rank up to and including this one. */
+      for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+        char name[PARSER_MAX_LINE_SIZE];
+        check_snprintf(name, PARSER_MAX_LINE_SIZE, "cumulative_count_%s",
+                       part_type_names[ptype]);
+        io_write_attribute_ll(group_id, name,
+                              props->num_particles_written_this_rank[ptype]);
+      }
+      /* Write the expansion factor at which we closed this file */
+      io_write_attribute_d(group_id, "expansion_factor", a);
+      H5Gclose(group_id);
+      props->file_needs_finalizing = 0;
+    }
+
+    /* We're done updating the output file */
+    H5Fclose(file_id);
+  }
+
+  /* If we need to start a new file next time, record this */
+  if (end_file) props->start_new_file = 1;
+
+  if (props->verbose && engine_rank == 0 && types_to_flush > 0)
+    message("lightcone %d: Flushing particle buffers took %.3f %s.",
+            props->index, clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
+
+/**
+ * @brief Flush lightcone map update buffers for all shells
+ *
+ * @param props the #lightcone_props structure.
+ * @param tp the swift #threadpool struct to use
+ *
+ */
+void lightcone_flush_map_updates(struct lightcone_props *props,
+                                 struct threadpool *tp) {
+
+  ticks tic = getticks();
+
+  /* Apply updates to all current shells */
+  for (int shell_nr = 0; shell_nr < props->nr_shells; shell_nr += 1) {
+    if (props->shell[shell_nr].state == shell_current) {
+      lightcone_shell_flush_map_updates(&props->shell[shell_nr], tp, props->part_type,
+                                        props->max_map_update_send_size_mb,
+                                        &props->kernel_table, props->verbose);
+    }
+  }
+
+  /* Report runtime */
+  if (props->verbose && engine_rank == 0)
+    message("lightcone %d: Applying lightcone map updates took %.3f %s.",
+            props->index, clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
+
+/**
+ * @brief Write and deallocate any completed lightcone shells
+ *
+ * @param props the #lightcone_props structure.
+ * @param tp the swift #threadpool struct to use
+ * @param c the #cosmology structure
+ * @param internal_units swift internal unit system
+ * @param snapshot_units swift snapshot unit system
+ * @param dump_all flag to indicate that all shells should be dumped
+ * @param need_flush whether there might be buffered updates to apply
+ *
+ */
+void lightcone_dump_completed_shells(struct lightcone_props *props,
+                                     struct threadpool *tp,
+                                     const struct cosmology *c,
+                                     const struct unit_system *internal_units,
+                                     const struct unit_system *snapshot_units,
+                                     const int dump_all, const int need_flush) {
+#ifdef HAVE_HDF5
+
+  ticks tic = getticks();
+
+  int comm_size = 1;
+#ifdef WITH_MPI
+  MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+#endif
+
+  /* Get number of shells and maps per shell */
+  const int nr_shells = props->nr_shells;
+  const int nr_maps = props->nr_maps;
+
+  /* Get conversion factor for shell radii */
+  const double length_conversion_factor =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_LENGTH);
+
+  /* Compute expansion factor corresponding to time props->ti_old,
+     which is the earliest time any particle might have been drifted
+     from on this step. Here we assume that no particle remains to
+     be drifted from any time earlier than this so that any shell
+     whose redshift range is entirely before ti_old can be now be
+     written out and deallocated. */
+  const double a_complete = c->a_begin * exp(props->ti_old * c->time_base);
+
+  int num_shells_written = 0;
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+
+    /* Will write out this shell if it has been updated but not written
+       out yet and either we advanced past its redshift range or we're
+       dumping all remaining shells at the end of the simulation */
+    if (props->shell[shell_nr].state == shell_current) {
+      if (props->shell[shell_nr].amax < a_complete || dump_all) {
+
+        if (props->verbose && engine_rank == 0)
+          message("lightcone %d: writing out completed shell %d at a=%f",
+                  props->index, shell_nr, c->a);
+
+        num_shells_written += 1;
+
+        /* Apply any buffered updates for this shell, if we didn't already */
+        if (need_flush) {
+            lightcone_shell_flush_map_updates(
+                &props->shell[shell_nr], tp, props->part_type,
+                props->max_map_update_send_size_mb, &props->kernel_table,
+                props->verbose);
+        }
+
+        /* Set the baseline value for the maps */
+        for (int map_nr = 0; map_nr < nr_maps; map_nr += 1)
+          lightcone_map_set_baseline(c, props,
+                                     &(props->shell[shell_nr].map[map_nr]));
+
+        /* Ensure output directory exists */
+        char fname[FILENAME_BUFFER_SIZE];
+        check_snprintf(fname, FILENAME_BUFFER_SIZE, "%s/%s_shells/shell_%d",
+                       props->subdir, props->basename, shell_nr);
+        if (engine_rank == 0) safe_checkdir(fname, 1);
+#ifdef WITH_MPI
+        MPI_Barrier(MPI_COMM_WORLD);
+#endif
+
+        /* Get the name of the file to write:
+           In collective mode all ranks get the same file name.
+           In distributed mode we include engine_rank in the file name. */
+        int file_num = props->distributed_maps ? engine_rank : 0;
+        check_snprintf(fname, FILENAME_BUFFER_SIZE,
+                       "%s/%s_shells/shell_%d/%s.shell_%d.%d.hdf5",
+                       props->subdir, props->basename, shell_nr,
+                       props->basename, shell_nr, file_num);
+
+        /* Create the output file for this shell */
+        hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+        /* Set MPI collective mode, if necessary */
+        int collective = 0;
+#ifdef WITH_MPI
+#ifdef HAVE_PARALLEL_HDF5
+        if (!props->distributed_maps) {
+          if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0)
+            error("Unable to set HDF5 MPI-IO file access mode");
+          collective = 1;
+        }
+#else
+        if (!props->distributed_maps)
+          error(
+              "Writing lightcone maps in MPI collective mode requires parallel "
+              "HDF5");
+#endif
+#endif
+        /* Number of files to write */
+        int nr_files_per_shell = collective ? 1 : comm_size;
+
+        /* Create the output file(s) */
+        hid_t file_id = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+        if (file_id < 0) error("Unable to create file %s", fname);
+
+        /* Write header with metadata */
+        hid_t header =
+            H5Gcreate(file_id, "Shell", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+        io_write_attribute_i(header, "nr_files_per_shell", nr_files_per_shell);
+        io_write_attribute_d(
+            header, "comoving_inner_radius",
+            props->shell[shell_nr].rmin * length_conversion_factor);
+        io_write_attribute_d(
+            header, "comoving_outer_radius",
+            props->shell[shell_nr].rmax * length_conversion_factor);
+        H5Gclose(header);
+
+        /* Write the system of Units used in the snapshot */
+        io_write_unit_system(file_id, snapshot_units, "Units");
+
+        /* Write the system of Units used internally */
+        io_write_unit_system(file_id, internal_units, "InternalCodeUnits");
+
+        /* Write the lightcone maps for this shell */
+        for (int map_nr = 0; map_nr < nr_maps; map_nr += 1)
+          lightcone_map_write(&(props->shell[shell_nr].map[map_nr]), file_id,
+                              props->map_type[map_nr].name, internal_units,
+                              snapshot_units, collective,
+                              props->maps_gzip_level, props->hdf5_chunk_size,
+                              props->map_type[map_nr].compression);
+
+        /* Close the file */
+        H5Pclose(fapl_id);
+        H5Fclose(file_id);
+
+        /* Free the pixel data associated with this shell */
+        for (int map_nr = 0; map_nr < nr_maps; map_nr += 1)
+          lightcone_map_free_pixels(&(props->shell[shell_nr].map[map_nr]));
+
+        /* Update status of this shell */
+        props->shell[shell_nr].state = shell_complete;
+      }
+    }
+  }
+
+  if (props->verbose && engine_rank == 0 && num_shells_written > 0)
+    message("lightcone %d: Writing completed lightcone shells took %.3f %s.",
+            props->index, clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+
+#else
+  error("Need HDF5 to write out lightcone maps");
+#endif
+}
+
+/**
+ * @brief Deallocate lightcone data.
+ *
+ * @param props the #lightcone_props structure.
+ *
+ */
+void lightcone_clean(struct lightcone_props *props) {
+
+  /* Deallocate particle buffers */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    if (props->use_type[i]) particle_buffer_free(&props->buffer[i]);
+  }
+
+  /* Free replication list, if we have one */
+  if (props->have_replication_list)
+    replication_list_clean(&props->replication_list);
+
+  /* Clean lightcone maps and free the structs */
+  const int nr_shells = props->nr_shells;
+  const int nr_maps = props->nr_maps;
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      lightcone_map_clean(&(props->shell[shell_nr].map[map_nr]));
+    }
+    free(props->shell[shell_nr].map);
+  }
+
+  /* Free buffers associated with each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      particle_buffer_free(&(props->shell[shell_nr].buffer[ptype]));
+    }
+  }
+
+  /* Free array of shells */
+  free(props->shell);
+
+  /* Free array of lightcone map types */
+  free(props->map_type);
+
+  /* Free data associated with particle types */
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    struct lightcone_particle_type *this_type = &(props->part_type[ptype]);
+    free(this_type->map_index);
+  }
+
+  /* Free lists of output quantities */
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1)
+    lightcone_io_field_list_clean(&props->particle_fields[ptype]);
+
+  /* Free the projected kernel */
+  projected_kernel_clean(&props->kernel_table);
+}
+
+/**
+ * @brief Determine periodic copies of the simulation box which could
+ * contribute to the lightcone.
+ *
+ *                     \
+ *           \          \
+ *            |         |
+ * Obs      A |    B    | C
+ *            |         |
+ *           /          /
+ *          R1         /
+ *                    R0
+ *
+ * Consider a single particle being drifted. Here R0 is the comoving
+ * distance to the time the particle is drifted FROM. R1 is the comoving
+ * distance to the time the particle is drifted TO on this step.
+ *
+ * Particles which are beyond the lightcone surface at the start of
+ * their drift (C) cannot cross the lightcone on this step if v < c.
+ * Particles between the lightcone surfaces at the start and end of
+ * their drift (B) may cross the lightcone (and certainly will if they
+ * have zero velocity).
+ *
+ * Particles just within the lightcone surface at the start of their
+ * drift (A) may be able to cross the lightcone due to their velocity so
+ * we need to allow a boundary layer on the inside edge of the shell.
+ * If we assume v < c, then we can use a layer of thickness R0-R1.
+ *
+ * Here we compute the earliest and latest times particles may be drifted
+ * between, find the corresponding comoving distances R0 and R1, reduce
+ * the inner distance by R0-R1, and find all periodic copies of the
+ * simulation box which overlap this spherical shell.
+ *
+ * Later we use this list to know which periodic copies to check when
+ * particles are drifted.
+ *
+ * This routine also determines which lightcone healpix maps might be
+ * updated on this time step and allocates the pixel data if necessary.
+ *
+ * @param props The #lightcone_props structure
+ * @param cosmo The #cosmology structure
+ * @param ti_earliest_undrifted earliest integer time any particle might
+ *        be drifted from on this step
+ * @param ti_current End of the timestep
+ *
+ */
+void lightcone_prepare_for_step(struct lightcone_props *props,
+                                const struct cosmology *cosmo,
+                                const integertime_t ti_earliest_undrifted,
+                                const integertime_t ti_current) {
+  ticks tic = getticks();
+
+  /* Deallocate the old list, if there is one */
+  if (props->have_replication_list)
+    replication_list_clean(&props->replication_list);
+
+  /* Get the size of the simulation box */
+  const double boxsize = props->boxsize;
+
+  /* Get a lower limit on earliest time particle may be drifted from */
+  const integertime_t ti_lim = ti_earliest_undrifted;
+
+  /* Get expansion factor at earliest and latest times particles might be
+   * drifted between */
+  double a_current = cosmo->a_begin * exp(ti_current * cosmo->time_base);
+  double a_old = cosmo->a_begin * exp(ti_lim * cosmo->time_base);
+  if (a_old < cosmo->a_begin) a_old = cosmo->a_begin;
+
+  /* Convert redshift range to a distance range */
+  double lightcone_rmin = cosmology_get_comoving_distance(cosmo, a_current);
+  double lightcone_rmax = cosmology_get_comoving_distance(cosmo, a_old);
+  if (lightcone_rmin > lightcone_rmax) error("Lightcone has rmin > rmax");
+
+  /* Allow inner boundary layer, assuming all particles have v < c.
+     This is to account for particles moving during the time step. */
+  double boundary = lightcone_rmax - lightcone_rmin;
+  lightcone_rmin -= boundary;
+  if (lightcone_rmin < 0) lightcone_rmin = 0;
+
+  if (a_current < props->a_min || a_old > props->a_max) {
+    /* Timestep does not overlap the lightcone redshift range */
+    replication_list_init_empty(&props->replication_list);
+  } else {
+    /* Timestep may contribute particles to the lightcone */
+    replication_list_init(&props->replication_list, boxsize, props->cell_width,
+                          props->observer_position, lightcone_rmin,
+                          lightcone_rmax);
+  }
+
+  /* Record that we made the list */
+  props->have_replication_list = 1;
+
+  /* Store times we used to make the list, for consistency check later */
+  props->ti_old = ti_lim;
+  props->ti_current = ti_current;
+
+  /* Report the size of the list */
+#ifdef DUMP_REPLICATIONS
+  if (engine_rank == 0) {
+    message("lightcone %d: no. of replications to check: %d", props->index,
+            props->replication_list.nrep);
+    message("lightcone %d: shell to search inner radius=%e, outer radius=%e",
+            props->index, lightcone_rmin, lightcone_rmax);
+  }
+#endif
+
+  /* Write out the list, if required */
+#ifdef DUMP_REPLICATIONS
+  if (engine_rank == 0) {
+    char fname[500];
+    sprintf(fname, "replication_list.%d.txt", output_nr);
+    FILE *fd_rep = fopen(fname, "w");
+    fprintf(fd_rep, "# Observer x, y, z\n");
+    fprintf(fd_rep, "%e, %e, %e\n", props->observer_position[0],
+            props->observer_position[1], props->observer_position[2]);
+    fprintf(fd_rep, "# Box size, inner radius, outer radius\n");
+    fprintf(fd_rep, "%e, %e, %e\n", boxsize, lightcone_rmin - boundary,
+            lightcone_rmax);
+    fprintf(fd_rep, "# x, y, z, rmin2, rmax2\n");
+    replication_list_write(&props->replication_list, fd_rep);
+    fclose(fd_rep);
+    output_nr += 1;
+  }
+#endif
+
+  /* Number of shells and maps per shell */
+  const int nr_maps = props->nr_maps;
+  const int nr_shells = props->nr_shells;
+
+  /* Range of shells that might be updated this step */
+  int shell_nr_min = nr_shells;
+  int shell_nr_max = -1;
+
+  /* Loop over healpix map shells */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+
+    const double shell_amin = props->shell[shell_nr].amin;
+    const double shell_amax = props->shell[shell_nr].amax;
+    const double step_amin = a_old;
+    const double step_amax = a_current;
+
+    /* Check if this shell might be updated */
+    if (step_amin <= shell_amax && step_amax >= shell_amin) {
+
+      switch (props->shell[shell_nr].state) {
+        case shell_uninitialized:
+          /* This shell has not been allocated yet, so allocate it */
+          if (props->verbose && engine_rank == 0)
+            message("lightcone %d: allocating pixels for shell %d at a=%f",
+                    props->index, shell_nr, cosmo->a);
+          for (int map_nr = 0; map_nr < nr_maps; map_nr += 1)
+            lightcone_map_allocate_pixels(&(props->shell[shell_nr].map[map_nr]),
+                                          /* zero_pixels = */ 1);
+          props->shell[shell_nr].state = shell_current;
+          break;
+        case shell_complete:
+          /* Shell has already been written out and freed - should never happen
+           */
+          error(
+              "Lightcone shell has been written out while particles could "
+              "still contribute");
+          break;
+        case shell_current:
+          /* Already initialized, nothing to do */
+          break;
+      }
+
+      /* Record range of shells that might be updated this step */
+      if (shell_nr < shell_nr_min) shell_nr_min = shell_nr;
+      if (shell_nr > shell_nr_max) shell_nr_max = shell_nr;
+    }
+  }
+  props->shell_nr_min = shell_nr_min;
+  props->shell_nr_max = shell_nr_max;
+
+  /* Determine which particle types might contribute to lightcone outputs at
+   * this step */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    props->check_type_for_crossing[i] = 0;
+    if (props->a_max_search_for_type[i] >= props->a_min_search_for_type[i]) {
+      if (a_current >= props->a_min_search_for_type[i] &&
+          a_old <= props->a_max_search_for_type[i])
+        props->check_type_for_crossing[i] = 1;
+    }
+  }
+
+  if (props->verbose && engine_rank == 0)
+    message("lightcone %d: Lightcone timestep preparations took %.3f %s.",
+            props->index, clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
+
+/**
+ * @brief Determine whether lightcone map buffers should be flushed this step.
+ *
+ * @param props The #lightcone_props structure
+ *
+ */
+int lightcone_trigger_map_update(struct lightcone_props *props) {
+
+  size_t total_updates = 0;
+  const int nr_shells = props->nr_shells;
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    if (props->shell[shell_nr].state == shell_current) {
+      for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+        total_updates += particle_buffer_num_elements(
+            &(props->shell[shell_nr].buffer[ptype]));
+      }
+    }
+  }
+  return total_updates >= ((size_t)props->max_updates_buffered);
+}
+
+/**
+ * @brief Add a particle to the output buffer
+ *
+ * @param props The #lightcone_props structure
+ * @param e The #engine structure
+ * @param gp The #gpart to buffer
+ * @param a_cross Expansion factor of lightcone crossing
+ * @param x_cross Position of the gpart at lightcone crossing
+ */
+void lightcone_buffer_particle(struct lightcone_props *props,
+                               const struct engine *e, const struct gpart *gp,
+                               const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+  const struct spart *sparts = s->sparts;
+  const struct bpart *bparts = s->bparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+      struct lightcone_gas_data data;
+      if (lightcone_store_gas(e, props, gp, p, xp, a_cross, x_cross, &data))
+        particle_buffer_append(props->buffer + swift_type_gas, &data);
+
+    } break;
+
+    case swift_type_stars: {
+
+      const struct spart *sp = &sparts[-gp->id_or_neg_offset];
+      struct lightcone_stars_data data;
+      if (lightcone_store_stars(e, props, gp, sp, a_cross, x_cross, &data))
+        particle_buffer_append(props->buffer + swift_type_stars, &data);
+
+    } break;
+
+    case swift_type_black_hole: {
+
+      const struct bpart *bp = &bparts[-gp->id_or_neg_offset];
+      struct lightcone_black_hole_data data;
+      if (lightcone_store_black_hole(e, props, gp, bp, a_cross, x_cross, &data))
+        particle_buffer_append(props->buffer + swift_type_black_hole, &data);
+
+    } break;
+
+    case swift_type_dark_matter: {
+
+      struct lightcone_dark_matter_data data;
+      if (lightcone_store_dark_matter(e, props, gp, a_cross, x_cross, &data))
+        particle_buffer_append(props->buffer + swift_type_dark_matter, &data);
+
+    } break;
+
+    case swift_type_dark_matter_background: {
+
+      /* Assumed to have same properties as DM particles */
+      struct lightcone_dark_matter_data data;
+      if (lightcone_store_dark_matter(e, props, gp, a_cross, x_cross, &data))
+        particle_buffer_append(
+            props->buffer + swift_type_dark_matter_background, &data);
+
+    } break;
+
+    case swift_type_neutrino: {
+
+      struct lightcone_neutrino_data data;
+      if (lightcone_store_neutrino(e, props, gp, a_cross, x_cross, &data))
+        particle_buffer_append(props->buffer + swift_type_neutrino, &data);
+
+    } break;
+
+    default:
+      error("Particle type not supported in lightcones");
+  }
+}
+
+#ifdef HAVE_CHEALPIX
+/**
+ * @brief Compute the angular smoothing length of a particle
+ *
+ * @param pos particle position vector relative to observer
+ * @param hsml physical smoothing length of the particle
+ *
+ */
+static double angular_smoothing_scale(const double *pos, const double hsml) {
+
+  /* Compute distance to particle */
+  double dist = 0;
+  for (int i = 0; i < 3; i += 1) dist += pos[i] * pos[i];
+  dist = sqrt(dist);
+
+  /* Avoid trig call for small angles (accurate to about 0.3%) */
+  if (dist > 10.0 * hsml)
+    return hsml / dist;
+  else
+    return atan(hsml / dist);
+}
+#endif
+
+/**
+ * @brief Buffer a particle's contribution to the healpix map(s)
+ *
+ * @param props The #lightcone_props structure
+ * @param e The #engine structure
+ * @param gp The #gpart to buffer
+ * @param a_cross Expansion factor of lightcone crossing
+ * @param x_cross Position of the gpart at lightcone crossing
+ *
+ */
+void lightcone_buffer_map_update(struct lightcone_props *props,
+                                 const struct engine *e, const struct gpart *gp,
+                                 const double a_cross,
+                                 const double x_cross[3]) {
+#ifdef HAVE_CHEALPIX
+
+  /* Find information on healpix maps this particle type contributes to */
+  const struct lightcone_particle_type *part_type_info =
+      &(props->part_type[gp->type]);
+
+  /* If this particle type contributes to no healpix maps, do nothing */
+  if (part_type_info->nr_maps == 0) return;
+
+  /* Get angular coordinates of the particle */
+  double theta, phi;
+  vec2ang(x_cross, &theta, &phi);
+
+  /* Get angular size of the particle */
+  double radius;
+  if (gp->type == swift_type_gas) {
+    const struct part *parts = e->s->parts;
+    const struct part *p = &parts[-gp->id_or_neg_offset];
+    radius = angular_smoothing_scale(x_cross, p->h);
+  } else {
+    radius = 0.0;
+  }
+
+  /* Loop over shells to update */
+  for (int shell_nr = props->shell_nr_min; shell_nr <= props->shell_nr_max;
+       shell_nr += 1) {
+    if (a_cross > props->shell[shell_nr].amin &&
+        a_cross <= props->shell[shell_nr].amax) {
+
+      /* Make sure this shell is available for updating */
+      if (props->shell[shell_nr].state == shell_uninitialized)
+        error("Attempt to update shell which has not been allocated");
+      if (props->shell[shell_nr].state == shell_complete)
+        error("Attempt to update shell which has been written out");
+
+      /* Allocate storage for updates and set particle coordinates and radius */
+      union lightcone_map_buffer_entry *data =
+          malloc(part_type_info->buffer_element_size);
+      data[0].i = angle_to_int(theta);
+      data[1].i = angle_to_int(phi);
+      data[2].f = radius;
+
+      /* Loop over healpix maps which this particle type contributes to and find
+       * values to add */
+      for (int i = 0; i < part_type_info->nr_maps; i += 1) {
+        int map_nr = part_type_info->map_index[i];
+        /* The value to add to the map may need to be scaled to fit in a float
+         */
+        const double fac = props->map_type[map_nr].buffer_scale_factor;
+        /* Fetch the value to add to the map */
+        const double val =
+            props->map_type[map_nr].update_map(e, props, gp, a_cross, x_cross);
+        /* Store the scaled value */
+        data[3 + i].f = fac * val;
+#ifdef LIGHTCONE_MAP_CHECK_TOTAL
+        /* Accumulate total quantity added to each map for consistency check */
+        atomic_add_d(&props->shell[shell_nr].map[map_nr].total, val);
+#endif
+      }
+
+      /* Buffer the updates */
+      particle_buffer_append(&(props->shell[shell_nr].buffer[gp->type]), data);
+
+      /* Free update info */
+      free(data);
+    }
+  } /* Next shell */
+#else
+  error("Need HEALPix C API to make lightcones");
+#endif
+}
+
+/**
+ * @brief Compute memory used by lightcones on this rank
+ *
+ * @param props The #lightcone_props structure
+ * @param particle_buffer_bytes returns bytes used to buffer particles
+ * @param map_buffer bytes returns bytes used to buffer map updates
+ * @param pixel_data_bytes returns bytes used to store map pixels
+ *
+ */
+void lightcone_memory_use(struct lightcone_props *props,
+                          size_t *particle_buffer_bytes,
+                          size_t *map_buffer_bytes, size_t *pixel_data_bytes) {
+
+  *particle_buffer_bytes = 0;
+  *map_buffer_bytes = 0;
+  *pixel_data_bytes = 0;
+
+  /* Accumulate memory used by particle buffers - one buffer per particle type
+   */
+  for (int i = 0; i < swift_type_count; i += 1) {
+    if (props->use_type[i])
+      *particle_buffer_bytes += particle_buffer_memory_use(props->buffer + i);
+  }
+
+  /* Accumulate memory used by map update buffers and pixel data */
+  const int nr_maps = props->nr_maps;
+  const int nr_shells = props->nr_shells;
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+
+    /* Healpix map updates - one buffer per particle type per shell */
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      *map_buffer_bytes +=
+          particle_buffer_memory_use(&(props->shell[shell_nr].buffer[ptype]));
+    }
+
+    /* Pixel data - one buffer per map per shell */
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      struct lightcone_map *map = &(props->shell[shell_nr].map[map_nr]);
+      if (map->data) *pixel_data_bytes += map->local_nr_pix * sizeof(double);
+    }
+  }
+}
+
+/**
+ * @brief Write out number of files per rank for this lightcone
+ *
+ * @param props The #lightcone_props structure
+ * @param internal_units swift internal unit system
+ * @param snapshot_units swift snapshot unit system
+ *
+ */
+void lightcone_write_index(struct lightcone_props *props,
+                           const struct unit_system *internal_units,
+                           const struct unit_system *snapshot_units) {
+  int comm_size = 1;
+#ifdef WITH_MPI
+  MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+#endif
+
+  /* Collect current file index on each rank */
+  int *current_file_on_rank = malloc(sizeof(int) * comm_size);
+#ifdef WITH_MPI
+  MPI_Gather(&props->current_file, 1, MPI_INT, current_file_on_rank, 1, MPI_INT,
+             0, MPI_COMM_WORLD);
+#else
+  current_file_on_rank[0] = props->current_file;
+#endif
+
+  if (engine_rank == 0) {
+
+    /* Get conversion factor for shell radii */
+    const double length_conversion_factor = units_conversion_factor(
+        internal_units, snapshot_units, UNIT_CONV_LENGTH);
+
+    /* Get the name of the index file */
+    char fname[FILENAME_BUFFER_SIZE];
+    check_snprintf(fname, FILENAME_BUFFER_SIZE, "%s/%s_index.hdf5",
+                   props->subdir, props->basename);
+
+    /* Create the file */
+    hid_t file_id = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+    /* Write number of MPI ranks and number of files */
+    hid_t group_id =
+        H5Gcreate(file_id, "Lightcone", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    io_write_attribute_i(group_id, "nr_mpi_ranks", comm_size);
+    io_write_attribute(group_id, "final_particle_file_on_rank", INT,
+                       current_file_on_rank, comm_size);
+
+    /* Write number of files the lightcone maps are distributed over */
+    int nr_files_per_shell = props->distributed_maps ? comm_size : 1;
+    io_write_attribute_i(group_id, "nr_files_per_shell", nr_files_per_shell);
+
+    /* Write observer position and redshift limits */
+    io_write_attribute(group_id, "observer_position", DOUBLE,
+                       props->observer_position, 3);
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      char name[PARSER_MAX_LINE_SIZE];
+      check_snprintf(name, PARSER_MAX_LINE_SIZE, "minimum_redshift_%s",
+                     part_type_names[ptype]);
+      io_write_attribute_d(group_id, name, props->z_min_for_type[ptype]);
+      check_snprintf(name, PARSER_MAX_LINE_SIZE, "maximum_redshift_%s",
+                     part_type_names[ptype]);
+      io_write_attribute_d(group_id, name, props->z_max_for_type[ptype]);
+    }
+
+    /* Write the number of shells and their radii */
+    const int nr_shells = props->nr_shells;
+    io_write_attribute_i(group_id, "nr_shells", nr_shells);
+    double *shell_inner_radii = malloc(sizeof(double) * nr_shells);
+    double *shell_outer_radii = malloc(sizeof(double) * nr_shells);
+    for (int i = 0; i < nr_shells; i += 1) {
+      shell_inner_radii[i] = props->shell[i].rmin * length_conversion_factor;
+      shell_outer_radii[i] = props->shell[i].rmax * length_conversion_factor;
+    }
+    io_write_attribute(group_id, "shell_inner_radii", DOUBLE, shell_inner_radii,
+                       nr_shells);
+    io_write_attribute(group_id, "shell_outer_radii", DOUBLE, shell_outer_radii,
+                       nr_shells);
+    free(shell_outer_radii);
+    free(shell_inner_radii);
+
+    H5Gclose(group_id);
+    H5Fclose(file_id);
+  }
+
+  free(current_file_on_rank);
+}
+
+/**
+ * @brief Add the baseline value to a lightcone map
+ *
+ * @param c the #cosmology struct
+ * @param props the properties of this lightcone
+ * @param map the #lightcone_map structure
+ */
+void lightcone_map_set_baseline(const struct cosmology *c,
+                                struct lightcone_props *props,
+                                struct lightcone_map *map) {
+
+  /* Nothing to do if there is no baseline function */
+  if (map->type.baseline_func == NULL) return;
+
+  /* Fetch the baseline value */
+  double baseline_value = map->type.baseline_func(c, props, map);
+
+  /* Add it to the map if necessary */
+  if (baseline_value != 0.0) {
+    for (pixel_index_t i = 0; i < map->local_nr_pix; i += 1) {
+#ifdef LIGHTCONE_MAP_CHECK_TOTAL
+      map->total += baseline_value;
+#endif
+      map->data[i] += baseline_value;
+    }
+  }
+}
diff --git a/src/lightcone/lightcone.h b/src/lightcone/lightcone.h
new file mode 100644
index 0000000000000000000000000000000000000000..55239afa5c0552fda3cf9ee230cf2034a853ce9c
--- /dev/null
+++ b/src/lightcone/lightcone.h
@@ -0,0 +1,273 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_H
+#define SWIFT_LIGHTCONE_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers */
+#include "lightcone/lightcone_map_types.h"
+#include "lightcone/lightcone_particle_io.h"
+#include "lightcone/lightcone_replications.h"
+#include "lightcone/lightcone_shell.h"
+#include "parser.h"
+#include "part_type.h"
+#include "particle_buffer.h"
+#include "lightcone/pixel_index.h"
+#include "lightcone/projected_kernel.h"
+#include "threadpool.h"
+#include "timeline.h"
+#include "units.h"
+
+/* Avoid cyclic inclusions */
+struct cosmology;
+struct engine;
+struct space;
+
+/**
+ * @brief Lightcone data
+ */
+struct lightcone_props {
+
+  /*! Index of this lightcone */
+  int index;
+
+  /*! Whether to write extra log messages */
+  int verbose;
+
+  /*! Which particle types we're doing */
+  int use_type[swift_type_count];
+
+  /*! Minimum redshift for particle output for each type */
+  double z_min_for_type[swift_type_count];
+
+  /*! Maximum redshift for particle output for each type */
+  double z_max_for_type[swift_type_count];
+
+  /*! Minimum a to search for lightcone crossing for each type  */
+  double a_min_search_for_type[swift_type_count];
+
+  /*! Maximum a to search for lightcone crossing for each type  */
+  double a_max_search_for_type[swift_type_count];
+
+  /*! Whether we need to do lightcone crossing checks for each type at this step
+   */
+  int check_type_for_crossing[swift_type_count];
+
+  /*! Enable selective output of high redshift gas */
+  int gas_filtering_enabled;
+
+  /*! Will output all gas below this redshift */
+  double min_z_for_gas_filtering;
+
+  /*! Will output all gas after this scale factor */
+  double max_a_for_gas_filtering;
+
+  /*! At z>min_z_for_gas_filtering require gas T>min_temp_for_high_z_gas */
+  double min_temp_for_filtered_gas;
+
+  /*! At z>min_z_for_gas_filtering require gas
+   * nh>min_nh_for_filtered_gas*(1+z)^4 */
+  double min_nh_for_filtered_gas;
+
+  /*! Exclude recently heated gas from xray and sz maps */
+  double xray_maps_recent_AGN_injection_exclusion_time;
+
+  /*! Don't exclude gas with temperature less than this factor times AGN_delta_T
+   */
+  double xray_maps_recent_AGN_min_temp_factor;
+
+  /*! Don't exclude gas with temperature more than this factor times AGN_delta_T
+   */
+  double xray_maps_recent_AGN_max_temp_factor;
+
+  /*! Output base name */
+  char basename[PARSER_MAX_LINE_SIZE];
+
+  /*! Output directory */
+  char subdir[PARSER_MAX_LINE_SIZE];
+
+  /*! Position of the observer in the simulation box */
+  double observer_position[3];
+
+  /*! Range in distance squared in which we output particles of each type */
+  double r2_min_for_type[swift_type_count], r2_max_for_type[swift_type_count];
+
+  /*! Range in expansion factor covered by particle outputs and healpix maps */
+  double a_min, a_max;
+
+  /*! Corresponding range in distance squared for a_max and a_min */
+  double r2_min, r2_max;
+
+  /*! Size of chunks in particle buffer */
+  int buffer_chunk_size;
+
+  /*! Size of chunks in HDF5 output files */
+  int hdf5_chunk_size;
+
+  /* Maximum amount of data (in megabytes) to send from any one rank when
+   * updating healpix maps */
+  double max_map_update_send_size_mb;
+
+  /*! Whether to apply lossy compression */
+  int particles_lossy_compression;
+
+  /*! Lossless compression level for particles (0 to disable) */
+  int particles_gzip_level;
+
+  /*! Lossless compression level for healpix maps (0 to disable) */
+  int maps_gzip_level;
+
+  /*! Simulation box size (volume must be a cube) */
+  double boxsize;
+
+  /*! Top level cell width */
+  double cell_width;
+
+  /*! Whether list of replications exists */
+  int have_replication_list;
+
+  /*! List of periodic replications to check on this timestep */
+  struct replication_list replication_list;
+
+  /*! Number of particles written to the current file by this MPI rank */
+  long long num_particles_written_to_file[swift_type_count];
+
+  /*! Number of particles of each type which have been output on this rank */
+  long long num_particles_written_this_rank[swift_type_count];
+
+  /*! Index of the current output file for this MPI rank */
+  int current_file;
+
+  /*! Range of times used to generate the replication list */
+  integertime_t ti_old, ti_current;
+
+  /*! Expansion factors corresponding to z_min, z_max */
+  double a_at_z_min, a_at_z_max;
+
+  /*! Buffers to store particles on the lightcone */
+  struct particle_buffer buffer[swift_type_count];
+
+  /*! Will write particles to disk if buffer exceeds this size */
+  int max_particles_buffered;
+
+  /*! Whether we should make a new file on the next flush */
+  int start_new_file;
+
+  /*! Whether we have started a particle file and not finalized it yet */
+  int file_needs_finalizing;
+
+  /*! Number of pending map updates to trigger communication */
+  int max_updates_buffered;
+
+  /*! Whether to write distributed maps in MPI mode */
+  int distributed_maps;
+
+  /*! Name of the file with radii of spherical shells */
+  char radius_file[PARSER_MAX_LINE_SIZE];
+
+  /*! Healpix nside parameter */
+  int nside;
+
+  /*! Healpix pixel area */
+  double pixel_area_steradians;
+
+  /*! Number of shells */
+  int nr_shells;
+
+  /*! Array of lightcone shells */
+  struct lightcone_shell *shell;
+
+  /*! Number of healpix maps we're making for each shell */
+  int nr_maps;
+
+  /*! Types of healpix map we're making for each shell */
+  struct lightcone_map_type *map_type;
+
+  /*! Range of shells that might be updated this step */
+  int shell_nr_min, shell_nr_max;
+
+  /*! Information about each particle type contributing to the maps */
+  struct lightcone_particle_type part_type[swift_type_count];
+
+  /*! Output fields */
+  struct lightcone_io_field_list particle_fields[swift_type_count];
+
+  /*! Tabulation of projected SPH smoothing kernel */
+  struct projected_kernel_table kernel_table;
+};
+
+void lightcone_init(struct lightcone_props *props, const int index,
+                    const struct space *s, const struct cosmology *cosmo,
+                    struct swift_params *params,
+                    const struct unit_system *internal_units,
+                    const struct phys_const *physical_constants,
+                    const int verbose);
+
+void lightcone_clean(struct lightcone_props *props);
+
+void lightcone_struct_dump(const struct lightcone_props *props, FILE *stream);
+
+void lightcone_struct_restore(struct lightcone_props *props, FILE *stream);
+
+void lightcone_prepare_for_step(struct lightcone_props *props,
+                                const struct cosmology *cosmo,
+                                const integertime_t ti_earliest_undrifted,
+                                const integertime_t ti_current);
+
+void lightcone_buffer_particle(struct lightcone_props *props,
+                               const struct engine *e, const struct gpart *gp,
+                               const double a_cross, const double x_cross[3]);
+
+void lightcone_flush_particle_buffers(struct lightcone_props *props, double a,
+                                      const struct unit_system *internal_units,
+                                      const struct unit_system *snapshot_units,
+                                      int flush_all, int end_file);
+
+void lightcone_buffer_map_update(struct lightcone_props *props,
+                                 const struct engine *e, const struct gpart *gp,
+                                 const double a_cross, const double x_cross[3]);
+
+void lightcone_flush_map_updates(struct lightcone_props *props,
+                                 struct threadpool *tp);
+
+void lightcone_dump_completed_shells(struct lightcone_props *props,
+                                     struct threadpool *tp,
+                                     const struct cosmology *c,
+                                     const struct unit_system *internal_units,
+                                     const struct unit_system *snapshot_units,
+                                     const int dump_all, const int need_flush);
+
+int lightcone_trigger_map_update(struct lightcone_props *props);
+
+void lightcone_memory_use(struct lightcone_props *props,
+                          size_t *particle_buffer_bytes,
+                          size_t *map_buffer_bytes, size_t *pixel_data_bytes);
+
+void lightcone_write_index(struct lightcone_props *props,
+                           const struct unit_system *internal_units,
+                           const struct unit_system *snapshot_units);
+
+void lightcone_map_set_baseline(const struct cosmology *c,
+                                struct lightcone_props *props,
+                                struct lightcone_map *map);
+
+#endif /* SWIFT_LIGHTCONE_H */
diff --git a/src/lightcone/lightcone_array.c b/src/lightcone/lightcone_array.c
new file mode 100644
index 0000000000000000000000000000000000000000..bee20d21392e92e9b13244ceeb569b5a0809408c
--- /dev/null
+++ b/src/lightcone/lightcone_array.c
@@ -0,0 +1,342 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <stdio.h>
+#include <string.h>
+
+/* This object's header. */
+#include "lightcone/lightcone_array.h"
+
+/* Local headers */
+#include "common_io.h"
+#include "cosmology.h"
+#include "engine.h"
+#include "error.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_particle_io.h"
+#include "lightcone/lightcone_replications.h"
+#include "parser.h"
+#include "particle_buffer.h"
+#include "periodic.h"
+#include "restart.h"
+#include "space.h"
+#include "timeline.h"
+#include "tools.h"
+
+/**
+ * @brief Initialise the properties of the lightcone code.
+ *
+ */
+void lightcone_array_init(struct lightcone_array_props *props,
+                          const struct space *s, const struct cosmology *cosmo,
+                          struct swift_params *params,
+                          const struct unit_system *internal_units,
+                          const struct phys_const *physical_constants,
+                          const int verbose) {
+
+  /* Determine number of lightcones */
+  props->nr_lightcones = 0;
+  for (int lightcone_nr = 0; lightcone_nr <= MAX_LIGHTCONES;
+       lightcone_nr += 1) {
+    char name[PARSER_MAX_LINE_SIZE];
+    check_snprintf(name, PARSER_MAX_LINE_SIZE, "Lightcone%d:enabled",
+                   props->nr_lightcones);
+    if (parser_get_opt_param_int(params, name, 0)) {
+      props->nr_lightcones += 1;
+    }
+  }
+
+  if (engine_rank == 0)
+    message("found %d lightcones to generate", props->nr_lightcones);
+
+  /* Allocate array of lightcones */
+  props->lightcone =
+      malloc(sizeof(struct lightcone_props) * props->nr_lightcones);
+  if (!props->lightcone) error("Failed to allocate lightcone array");
+
+  /* Initialise lightcones */
+  props->nr_lightcones = 0;
+  for (int lightcone_nr = 0; lightcone_nr <= MAX_LIGHTCONES;
+       lightcone_nr += 1) {
+    char name[PARSER_MAX_LINE_SIZE];
+    check_snprintf(name, PARSER_MAX_LINE_SIZE, "Lightcone%d:enabled",
+                   props->nr_lightcones);
+    if (parser_get_opt_param_int(params, name, 0)) {
+      check_snprintf(name, PARSER_MAX_LINE_SIZE, "Lightcone%d",
+                     props->nr_lightcones);
+      lightcone_init(props->lightcone + lightcone_nr, lightcone_nr, s, cosmo,
+                     params, internal_units, physical_constants, verbose);
+      props->nr_lightcones += 1;
+    }
+  }
+
+  /* Check lightcones have unique output file names */
+  for (int i = 0; i < props->nr_lightcones; i += 1) {
+    for (int j = 0; j < props->nr_lightcones; j += 1) {
+      if (i != j) {
+        const struct lightcone_props *lc1 = props->lightcone + i;
+        const struct lightcone_props *lc2 = props->lightcone + j;
+        if (strcmp(lc1->basename, lc2->basename) == 0)
+          error("Lightcones must have unique basenames!");
+      }
+    }
+  }
+
+  props->verbose = verbose;
+}
+
+void lightcone_array_clean(struct lightcone_array_props *props) {
+
+  for (int i = 0; i < props->nr_lightcones; i += 1)
+    lightcone_clean(props->lightcone + i);
+  free(props->lightcone);
+}
+
+void lightcone_array_struct_dump(const struct lightcone_array_props *props,
+                                 FILE *stream) {
+
+  struct lightcone_array_props tmp = *props;
+  tmp.lightcone = NULL;
+  restart_write_blocks((void *)&tmp, sizeof(struct lightcone_array_props), 1,
+                       stream, "lightcone_array_props",
+                       "lightcone_array_props");
+
+  for (int i = 0; i < props->nr_lightcones; i += 1)
+    lightcone_struct_dump(props->lightcone + i, stream);
+}
+
+void lightcone_array_struct_restore(struct lightcone_array_props *props,
+                                    FILE *stream) {
+
+  restart_read_blocks((void *)props, sizeof(struct lightcone_array_props), 1,
+                      stream, NULL, "lightcone_array_props");
+
+  props->lightcone =
+      malloc(sizeof(struct lightcone_props) * props->nr_lightcones);
+  if (!props->lightcone) error("Failed to allocate lightcone array");
+
+  for (int i = 0; i < props->nr_lightcones; i += 1)
+    lightcone_struct_restore(props->lightcone + i, stream);
+}
+
+void lightcone_array_prepare_for_step(struct lightcone_array_props *props,
+                                      const struct cosmology *cosmo,
+                                      const integertime_t ti_earliest_undrifted,
+                                      const integertime_t ti_current) {
+
+  for (int i = 0; i < props->nr_lightcones; i += 1)
+    lightcone_prepare_for_step(props->lightcone + i, cosmo,
+                               ti_earliest_undrifted, ti_current);
+
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    props->check_type_for_crossing[ptype] = 0;
+    for (int i = 0; i < props->nr_lightcones; i += 1) {
+      if (props->lightcone[i].check_type_for_crossing[ptype]) {
+        props->check_type_for_crossing[ptype] = 1;
+      }
+    }
+    if (props->check_type_for_crossing[ptype] && props->verbose &&
+        engine_rank == 0) {
+      message("need to check type %s for crossing at this step",
+              part_type_names[ptype]);
+    }
+  }
+}
+
+int lightcone_array_trigger_map_update(struct lightcone_array_props *props) {
+
+  for (int i = 0; i < props->nr_lightcones; i += 1) {
+    if (lightcone_trigger_map_update(props->lightcone + i)) return 1;
+  }
+  return 0;
+}
+
+/**
+ * @brief Flush buffers for all lightcones in the array
+ *
+ * Buffers are flushed if they get large or a flush is forced
+ * by setting one of the input flags.
+ *
+ * props the #lightcone_array_props struct
+ * flush_map_updates force full update of the healpix maps
+ * flush_particles force output of all buffered particles
+ * end_file start a new file next time particles are written out
+ * dump_all_shells immediately output all remaining healpix maps
+ *
+ */
+void lightcone_array_flush(struct lightcone_array_props *props,
+                           struct threadpool *tp, const struct cosmology *cosmo,
+                           const struct unit_system *internal_units,
+                           const struct unit_system *snapshot_units,
+                           int flush_map_updates, int flush_particles,
+                           int end_file, int dump_all_shells) {
+
+  if (props->verbose) lightcone_array_report_memory_use(props);
+
+  /* Loop over lightcones */
+  const int nr_lightcones = props->nr_lightcones;
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+
+    /* Get a pointer to this lightcone */
+    struct lightcone_props *lc_props = props->lightcone + lightcone_nr;
+
+    /* Apply lightcone map updates if requested */
+    if (flush_map_updates) lightcone_flush_map_updates(lc_props, tp);
+
+    /* Flush particle buffers if they're large or flag is set */
+    lightcone_flush_particle_buffers(lc_props, cosmo->a, internal_units,
+                                     snapshot_units, flush_particles, end_file);
+
+    /* Write out any completed healpix maps */
+    lightcone_dump_completed_shells(lc_props, tp, cosmo, internal_units,
+                                    snapshot_units, dump_all_shells,
+                                    /*need_flush=*/!flush_map_updates);
+  }
+}
+
+/**
+ * @brief Make a refined replication list for each lightcone
+ *
+ * Returns an array of struct #replication_list. Must be freed
+ * with lightcone_array_free_replications().
+ *
+ * props the #lightcone_array_props struct
+ * cell the #cell for which we're making replication lists
+ *
+ */
+struct replication_list *lightcone_array_refine_replications(
+    struct lightcone_array_props *props, const struct cell *cell) {
+
+  /* Get number of lightcones */
+  const int nr_lightcones = props->nr_lightcones;
+
+  /* Allocate a replication list for each lightcone */
+  struct replication_list *lists =
+      malloc(sizeof(struct replication_list) * nr_lightcones);
+
+  /* Loop over lightcones */
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+
+    /* Make refined replication list for this lightcone */
+    struct lightcone_props *lightcone = props->lightcone + lightcone_nr;
+    replication_list_subset_for_cell(&lightcone->replication_list, cell,
+                                     lightcone->observer_position,
+                                     lists + lightcone_nr);
+  }
+
+  return lists;
+}
+
+/**
+ * @brief Free lists returned by lightcone_array_refine_replications
+ *
+ * props the #lightcone_array_props struct
+ * lists the array of struct #replication_list to free
+ *
+ */
+void lightcone_array_free_replications(struct lightcone_array_props *props,
+                                       struct replication_list *lists) {
+
+  /* Get number of lightcones */
+  const int nr_lightcones = props->nr_lightcones;
+
+  /* Loop over lightcones and clean replication lists */
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+    replication_list_clean(lists + lightcone_nr);
+  }
+
+  /* Free replication list array */
+  free(lists);
+}
+
+/**
+ * @brief Write the index file for each lightcone
+ *
+ * props the #lightcone_array_props struct
+ *
+ */
+void lightcone_array_write_index(struct lightcone_array_props *props,
+                                 const struct unit_system *internal_units,
+                                 const struct unit_system *snapshot_units) {
+
+  /* Get number of lightcones */
+  const int nr_lightcones = props->nr_lightcones;
+
+  /* Loop over lightcones and clean replication lists */
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+    lightcone_write_index(props->lightcone + lightcone_nr, internal_units,
+                          snapshot_units);
+  }
+}
+
+void lightcone_array_report_memory_use(struct lightcone_array_props *props) {
+
+  long long memuse_local[4] = {0LL, 0LL, 0LL, 0LL};
+
+  /* Get number of lightcones */
+  const int nr_lightcones = props->nr_lightcones;
+
+  /* Loop over lightcones and clean replication lists */
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+
+    /* Accumulate mmeory use of this lightcone */
+    size_t particle_buffer_bytes;
+    size_t map_buffer_bytes;
+    size_t pixel_data_bytes;
+    lightcone_memory_use(&props->lightcone[lightcone_nr],
+                         &particle_buffer_bytes, &map_buffer_bytes,
+                         &pixel_data_bytes);
+    memuse_local[0] += particle_buffer_bytes;
+    memuse_local[1] += map_buffer_bytes;
+    memuse_local[2] += pixel_data_bytes;
+  }
+  memuse_local[3] = memuse_local[0] + memuse_local[1] + memuse_local[2];
+
+  /* Find min and max memory over MPI ranks */
+  long long memuse_min[4];
+  long long memuse_max[4];
+#ifdef WITH_MPI
+  MPI_Reduce(memuse_local, memuse_min, 4, MPI_LONG_LONG, MPI_MIN, 0,
+             MPI_COMM_WORLD);
+  MPI_Reduce(memuse_local, memuse_max, 4, MPI_LONG_LONG, MPI_MAX, 0,
+             MPI_COMM_WORLD);
+#else
+  for (int i = 0; i < 4; i += 1) {
+    memuse_min[i] = memuse_local[i];
+    memuse_max[i] = memuse_local[i];
+  }
+#endif
+
+  /* Report memory use, if non-zero */
+  if (engine_rank == 0 && memuse_max[3] > 0) {
+    const long long MB = 1024 * 1024;
+    message("particle buffer Mbytes: min=%lldMB, max=%lldMB",
+            memuse_min[0] / MB, memuse_max[0] / MB);
+    message("map update buffer Mbytes: min=%lldMB, max=%lldMB",
+            memuse_min[1] / MB, memuse_max[1] / MB);
+    message("map pixel data Mbytes: min=%lldMB, max=%lldMB", memuse_min[2] / MB,
+            memuse_max[2] / MB);
+    message("total lightcone data Mbytes: min=%lldMB, max=%lldMB",
+            memuse_min[3] / MB, memuse_max[3] / MB);
+  }
+}
diff --git a/src/lightcone/lightcone_array.h b/src/lightcone/lightcone_array.h
new file mode 100644
index 0000000000000000000000000000000000000000..c4eb92e9e2d612606876576c7c41402cfe2b87a3
--- /dev/null
+++ b/src/lightcone/lightcone_array.h
@@ -0,0 +1,102 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_ARRAY_H
+#define SWIFT_LIGHTCONE_ARRAY_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers */
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_replications.h"
+#include "parser.h"
+#include "part_type.h"
+#include "particle_buffer.h"
+#include "threadpool.h"
+#include "timeline.h"
+
+/* Avoid cyclic inclusions */
+struct cosmology;
+struct engine;
+struct space;
+
+#define MAX_LIGHTCONES 8
+
+/**
+ * @brief Lightcone data for multiple lightcones
+ */
+struct lightcone_array_props {
+
+  /*! Number of lightcones */
+  int nr_lightcones;
+
+  /*! Lightcone properties */
+  struct lightcone_props *lightcone;
+
+  /*! Whether we need to do lightcone crossing checks for each type at this step
+   */
+  int check_type_for_crossing[swift_type_count];
+
+  /*! Whether to generate memory usage reports */
+  int verbose;
+};
+
+void lightcone_array_init(struct lightcone_array_props *props,
+                          const struct space *s, const struct cosmology *cosmo,
+                          struct swift_params *params,
+                          const struct unit_system *internal_units,
+                          const struct phys_const *physical_constants,
+                          const int verbose);
+
+void lightcone_array_clean(struct lightcone_array_props *props);
+
+void lightcone_array_struct_dump(const struct lightcone_array_props *props,
+                                 FILE *stream);
+
+void lightcone_array_struct_restore(struct lightcone_array_props *props,
+                                    FILE *stream);
+
+void lightcone_array_prepare_for_step(struct lightcone_array_props *props,
+                                      const struct cosmology *cosmo,
+                                      const integertime_t ti_earliest_undrifted,
+                                      const integertime_t ti_current);
+
+int lightcone_array_trigger_map_update(struct lightcone_array_props *props);
+
+void lightcone_array_flush(struct lightcone_array_props *props,
+                           struct threadpool *tp, const struct cosmology *cosmo,
+                           const struct unit_system *internal_units,
+                           const struct unit_system *snapshot_units,
+                           int flush_map_updates, int flush_particles,
+                           int end_file, int dump_all_shells);
+
+struct replication_list *lightcone_array_refine_replications(
+    struct lightcone_array_props *props, const struct cell *cell);
+
+void lightcone_array_free_replications(struct lightcone_array_props *props,
+                                       struct replication_list *lists);
+
+void lightcone_array_write_index(struct lightcone_array_props *props,
+                                 const struct unit_system *internal_units,
+                                 const struct unit_system *snapshot_units);
+
+void lightcone_array_report_memory_use(struct lightcone_array_props *props);
+
+#endif /* SWIFT_LIGHTCONE_ARRAY_H */
diff --git a/src/lightcone/lightcone_crossing.h b/src/lightcone/lightcone_crossing.h
new file mode 100644
index 0000000000000000000000000000000000000000..000eb3e09f57e4e1d90c667b879f5169f44eed64
--- /dev/null
+++ b/src/lightcone/lightcone_crossing.h
@@ -0,0 +1,252 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers. */
+#include "cosmology.h"
+#include "gravity.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_particle_io.h"
+#include "lightcone/lightcone_replications.h"
+#include "part.h"
+#include "stars.h"
+#include "timeline.h"
+
+#ifndef SWIFT_LIGHTCONE_CROSSING_H
+#define SWIFT_LIGHTCONE_CROSSING_H
+
+/**
+ * @brief Check if a particle crosses the lightcone during a drift.
+ *
+ * Here we don't assume anything about the particle type except
+ * that it has a corresponding #gpart. The particle type is checked
+ * if we decide to output the particle.
+ *
+ * Note that x and v_full are values at the start of the time step but
+ * the particle has been drifted to the end of the time step when this
+ * function is called.
+ *
+ * @param e the #engine struct
+ * @param replication_list_array one replication list for each lightcone
+ * @param x the position of the particle BEFORE it is drifted
+ * @param v_full the velocity of the particle
+ * @param gp pointer to the #gpart to check
+ * @param dt_drift the time step size used to update the position
+ * @param ti_old begining of the time step on the integer time line
+ * @param ti_current end of the time step on the integer time line
+ * @param cell_loc coordinates of the #cell containing the #gpart
+ *
+ */
+__attribute__((always_inline)) INLINE static void
+lightcone_check_particle_crosses(
+    const struct engine *e, struct replication_list *replication_list_array,
+    const double *x, const float *v_full, const struct gpart *gp,
+    const double dt_drift, const integertime_t ti_old,
+    const integertime_t ti_current, const double cell_loc[3]) {
+
+  /* Does this particle type contribute to any lightcone outputs at this
+   * redshift? */
+  if (e->lightcone_array_properties->check_type_for_crossing[gp->type] == 0)
+    return;
+
+  /* Check if we have any replications to search */
+  /* TODO: pre-calculate this for each cell to save time */
+  int nrep_tot = 0;
+  const int nr_lightcones = e->lightcone_array_properties->nr_lightcones;
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+    nrep_tot += replication_list_array[lightcone_nr].nrep;
+  }
+  if (nrep_tot == 0) return;
+
+  /* Unpack some variables we need */
+  const struct cosmology *c = e->cosmology;
+
+  /* Determine expansion factor at start and end of the drift */
+  const double a_start = c->a_begin * exp(ti_old * c->time_base);
+  const double a_end = c->a_begin * exp(ti_current * c->time_base);
+
+  /* Find comoving distance to these expansion factors */
+  const double comoving_dist_start =
+      cosmology_get_comoving_distance(c, a_start);
+  const double comoving_dist_2_start =
+      comoving_dist_start * comoving_dist_start;
+  const double comoving_dist_end = cosmology_get_comoving_distance(c, a_end);
+  const double comoving_dist_2_end = comoving_dist_end * comoving_dist_end;
+
+  /* Thickness of the 'shell' between the lightcone surfaces at start and end of
+     drift.
+     We use this as a limit on how far a particle can drift (i.e. assume v <
+     c).*/
+  const double boundary = comoving_dist_2_start - comoving_dist_2_end;
+
+  /* Wrap particle starting coordinates to nearest it's parent cell */
+  const double boxsize = e->s->dim[0];
+  const double x_wrapped[3] = {
+      box_wrap(x[0], cell_loc[0] - 0.5 * boxsize, cell_loc[0] + 0.5 * boxsize),
+      box_wrap(x[1], cell_loc[1] - 0.5 * boxsize, cell_loc[1] + 0.5 * boxsize),
+      box_wrap(x[2], cell_loc[2] - 0.5 * boxsize, cell_loc[2] + 0.5 * boxsize)};
+
+  /* Loop over lightcones to make */
+  for (int lightcone_nr = 0; lightcone_nr < nr_lightcones; lightcone_nr += 1) {
+
+    /* Find the current lightcone and its replication list */
+    struct lightcone_props *props =
+        e->lightcone_array_properties->lightcone + lightcone_nr;
+    struct replication_list *replication_list =
+        replication_list_array + lightcone_nr;
+
+    /* Consistency check - are our limits on the drift endpoints good? */
+    if (ti_old < props->ti_old || ti_current > props->ti_current)
+      error(
+          "Particle drift is outside the range used to make replication list!");
+
+    /* Are there any replications to check at this timestep? */
+    const int nreps = replication_list->nrep;
+    if (nreps == 0) continue;
+    const struct replication *rep = replication_list->replication;
+
+    /* Find observer position for this lightcone */
+    const double *observer_position = props->observer_position;
+
+    /* Does this drift overlap the lightcone redshift range? If not, nothing to
+     * do. */
+    if ((a_start > props->a_max) || (a_end < props->a_min)) continue;
+
+    /* Get wrapped position relative to observer */
+    const double x_wrapped_rel[3] = {x_wrapped[0] - observer_position[0],
+                                     x_wrapped[1] - observer_position[1],
+                                     x_wrapped[2] - observer_position[2]};
+
+    /* Loop over periodic copies of the volume:
+
+       Here we're looking for cases where a periodic copy of the particle
+       is closer to the observer than the lightcone surface at the start
+       of the drift, and further away than the lightcone surface at the
+       end of the drift. I.e. the surface of the lightcone has swept over
+       the particle as it contracts towards the observer.
+    */
+    for (int i = 0; i < nreps; i += 1) {
+
+      /* If all particles in this periodic replica are beyond the lightcone
+         surface at the earlier time, then they already crossed the lightcone.
+         Since the replications are in ascending order of rmin we don't need to
+         check any more. */
+      if (rep[i].rmin2 > comoving_dist_2_start) break;
+
+      /* If all particles in this periodic replica start their drifts inside the
+         lightcone surface, and are sufficiently far inside that their velocity
+         can't cause them to cross the lightcone, then we don't need to consider
+         this replication */
+      if (rep[i].rmax2 + boundary < comoving_dist_2_end) continue;
+
+      /* Get the coordinates of this periodic copy of the gpart relative to the
+       * observer */
+      const double x_start[3] = {
+          x_wrapped_rel[0] + rep[i].coord[0],
+          x_wrapped_rel[1] + rep[i].coord[1],
+          x_wrapped_rel[2] + rep[i].coord[2],
+      };
+
+      /* Get distance squared from the observer at start of drift */
+      const double r2_start = x_start[0] * x_start[0] +
+                              x_start[1] * x_start[1] + x_start[2] * x_start[2];
+
+      /* If particle is initially beyond the lightcone surface, it can't cross
+       */
+      if (r2_start > comoving_dist_2_start) continue;
+
+      /* Get position of this periodic copy at the end of the drift */
+      const double x_end[3] = {
+          x_start[0] + dt_drift * v_full[0],
+          x_start[1] + dt_drift * v_full[1],
+          x_start[2] + dt_drift * v_full[2],
+      };
+
+      /* Get distance squared from the observer at end of drift */
+      const double r2_end =
+          x_end[0] * x_end[0] + x_end[1] * x_end[1] + x_end[2] * x_end[2];
+
+      /* If particle is still within the lightcone surface at the end of the
+         drift, it didn't cross*/
+      if (r2_end < comoving_dist_2_end) continue;
+
+      /* This periodic copy of the gpart crossed the lightcone during this
+         drift. Now need to estimate when it crossed within the timestep.
+
+         If r is the distance from the observer to this periodic copy of the
+         particle, and it crosses after a fraction f of the drift:
+
+         r_cross = r_start + (r_end - r_start) * f
+
+         and if R is the comoving distance to the lightcone surface
+
+         R_cross = R_start + (R_end - R_start) * f
+
+         The particle crosses the lightcone when r_cross = R_cross, so
+
+         r_start + (r_end - r_start) * f = R_start + (R_end - R_start) * f
+
+         Solving for f:
+
+         f = (r_start - R_start) / (R_end - R_start - r_end + r_start)
+
+      */
+      const double f = (sqrt(r2_start) - comoving_dist_start) /
+                       (comoving_dist_end - comoving_dist_start - sqrt(r2_end) +
+                        sqrt(r2_start));
+
+      /* f should always be in the range 0-1 */
+      const double eps = 1.0e-5;
+      if ((f < 0.0 - eps) || (f > 1.0 + eps))
+        error("Particle interpolated outside time step!");
+
+      /* Compute position at crossing */
+      const double x_cross[3] = {
+          x_start[0] + dt_drift * f * v_full[0],
+          x_start[1] + dt_drift * f * v_full[1],
+          x_start[2] + dt_drift * f * v_full[2],
+      };
+
+      /* Get distance squared at crossing */
+      const double r2_cross =
+          (x_cross[0] * x_cross[0] + x_cross[1] * x_cross[1] +
+           x_cross[2] * x_cross[2]);
+
+      /* Compute expansion factor at crossing */
+      const double a_cross =
+          cosmology_scale_factor_at_comoving_distance(c, sqrt(r2_cross));
+
+      /* Add this particle to the particle output buffer if it's in the redshift
+       * range */
+      if (r2_cross >= props->r2_min_for_type[gp->type] &&
+          r2_cross <= props->r2_max_for_type[gp->type] &&
+          props->use_type[gp->type])
+        lightcone_buffer_particle(props, e, gp, a_cross, x_cross);
+
+      /* Buffer this particle's contribution to the healpix maps */
+      if (props->shell_nr_max >= props->shell_nr_min)
+        lightcone_buffer_map_update(props, e, gp, a_cross, x_cross);
+
+    } /* Next periodic replication*/
+  }   /* Next lightcone */
+}
+
+#endif /* SWIFT_LIGHTCONE_CROSSING_H */
diff --git a/src/lightcone/lightcone_map.c b/src/lightcone/lightcone_map.c
new file mode 100644
index 0000000000000000000000000000000000000000..394baf04829cd895e67c563f17c3014437f8ae3e
--- /dev/null
+++ b/src/lightcone/lightcone_map.c
@@ -0,0 +1,319 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* This object's header. */
+#include "lightcone/lightcone_map.h"
+
+/* Local headers */
+#include "align.h"
+#include "common_io.h"
+#include "error.h"
+#include "memuse.h"
+#include "restart.h"
+
+/* HDF5 */
+#ifdef HAVE_HDF5
+#include <hdf5.h>
+#endif
+
+void lightcone_map_init(struct lightcone_map *map, const int nside,
+                        const pixel_index_t total_nr_pix, const pixel_index_t pix_per_rank,
+                        const pixel_index_t local_nr_pix,
+                        const pixel_index_t local_pix_offset, const double r_min,
+                        const double r_max, struct lightcone_map_type type) {
+
+  /*Store number of pixels in the map etc */
+  map->nside = nside;
+  map->total_nr_pix = total_nr_pix;
+  map->pix_per_rank = pix_per_rank;
+  map->local_nr_pix = local_nr_pix;
+  map->local_pix_offset = local_pix_offset;
+
+  /* Pixel data is initially not allocated */
+  map->data = NULL;
+
+  /* Store resolution parameter, shell size, units etc */
+  map->r_min = r_min;
+  map->r_max = r_max;
+  map->type = type;
+
+  /* Store factor used to retrieve values from the update buffer */
+  map->buffer_scale_factor_inv = 1.0 / (type.buffer_scale_factor);
+
+#ifdef LIGHTCONE_MAP_CHECK_TOTAL
+  /* Initialize total for consistency check */
+  map->total = 0.0;
+#endif
+}
+
+/**
+ * @brief Deallocate the lightcone_map pixel data
+ *
+ * @param map the #lightcone_map structure
+ */
+void lightcone_map_clean(struct lightcone_map *map) {
+
+  if (map->data) lightcone_map_free_pixels(map);
+}
+
+/**
+ * @brief Allocate (and maybe initialize) the lightcone_map pixel data
+ *
+ * @param map the #lightcone_map structure
+ * @param zero_pixels if true, set allocated pixels to zero
+ */
+void lightcone_map_allocate_pixels(struct lightcone_map *map,
+                                   const int zero_pixels) {
+
+  if (swift_memalign("lightcone_map_pixels", (void **)&map->data,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     sizeof(double) * map->local_nr_pix) != 0)
+    error("Failed to allocate lightcone map pixel data");
+
+  if (zero_pixels) {
+    for (pixel_index_t i = 0; i < map->local_nr_pix; i += 1) map->data[i] = 0.0;
+  }
+}
+
+void lightcone_map_free_pixels(struct lightcone_map *map) {
+
+  swift_free("lightcone_map_pixels", (void *)map->data);
+  map->data = NULL;
+}
+
+/**
+ * @brief Dump lightcone_map struct to the output stream.
+ *
+ * @param map the #lightcone_map structure
+ * @param stream The stream to write to.
+ */
+void lightcone_map_struct_dump(const struct lightcone_map *map, FILE *stream) {
+
+  /* Write the struct */
+  restart_write_blocks((void *)map, sizeof(struct lightcone_map), 1, stream,
+                       "lightcone_map", "lightcone_map");
+
+  /* Write the pixel data if it is allocated */
+  if (map->data)
+    restart_write_blocks((void *)map->data, sizeof(double), map->local_nr_pix,
+                         stream, "lightcone_map_data", "lightcone_map_data");
+}
+
+/**
+ * @brief Restore lightcone_map struct from the input stream.
+ *
+ * @param map the #lightcone_map structure
+ * @param stream The stream to read from.
+ */
+void lightcone_map_struct_restore(struct lightcone_map *map, FILE *stream) {
+
+  /* Read the struct */
+  restart_read_blocks((void *)map, sizeof(struct lightcone_map), 1, stream,
+                      NULL, "lightcone_map");
+
+  /* Read the pixel data if it was allocated.
+     map->data from the restart file is not a valid pointer now but we can
+     check if it is not null to see if the pixel data block was written out. */
+  if (map->data) {
+    lightcone_map_allocate_pixels(map, /* zero_pixels = */ 0);
+    restart_read_blocks((void *)map->data, sizeof(double), map->local_nr_pix,
+                        stream, NULL, "lightcone_map");
+  }
+}
+
+#ifdef HAVE_HDF5
+/**
+ * @brief Write a lightcone map to a HDF5 file
+ *
+ * @param map the #lightcone_map structure
+ * @param loc a HDF5 file or group identifier to write to
+ * @param name the name of the dataset to create
+ */
+void lightcone_map_write(struct lightcone_map *map, const hid_t loc_id,
+                         const char *name,
+                         const struct unit_system *internal_units,
+                         const struct unit_system *snapshot_units,
+                         const int collective, const int gzip_level,
+                         const int chunk_size,
+                         enum lossy_compression_schemes compression) {
+
+#ifdef WITH_MPI
+  int comm_rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+#endif
+
+  /* Find unit conversion factor for this quantity */
+  const double map_conversion_factor =
+      units_conversion_factor(internal_units, snapshot_units, map->type.units);
+
+  /* Convert units of pixel data if necessary */
+  if (map_conversion_factor != 1.0) {
+    for (pixel_index_t i = 0; i < map->local_nr_pix; i += 1)
+      map->data[i] *= map_conversion_factor;
+  }
+
+  /* Get conversion factor for shell radii */
+  const double length_conversion_factor =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_LENGTH);
+
+  /* Create dataspace in memory corresponding to local pixels */
+  const hsize_t mem_dims[1] = {(hsize_t)map->local_nr_pix};
+  hid_t mem_space_id = H5Screate_simple(1, mem_dims, NULL);
+  if (mem_space_id < 0) error("Unable to create memory dataspace");
+
+  /* Create dataspace corresponding to the part of the map in this file. */
+  hsize_t file_dims[1];
+  if (collective) {
+    /* For collective writes the file contains the full map */
+    file_dims[0] = (hsize_t)map->total_nr_pix;
+  } else {
+    /* For distributed writes the file contains the local pixels only */
+    file_dims[0] = (hsize_t)map->local_nr_pix;
+  }
+  hid_t file_space_id = H5Screate_simple(1, file_dims, NULL);
+  if (file_space_id < 0) error("Unable to create file dataspace");
+
+    /* Select the part of the dataset in the file to write to */
+#ifdef WITH_MPI
+#ifdef HAVE_PARALLEL_HDF5
+  if (collective) {
+    const pixel_index_t pixel_offset = map->local_pix_offset;
+    const hsize_t start[1] = {(hsize_t)pixel_offset};
+    const hsize_t count[1] = {(hsize_t)map->local_nr_pix};
+    if (H5Sselect_hyperslab(file_space_id, H5S_SELECT_SET, start, NULL, count,
+                            NULL) < 0)
+      error("Unable to select part of file dataspace to write to");
+  }
+#else
+  if (collective)
+    error("Writing lightcone maps with collective I/O requires parallel HDF5");
+#endif
+#endif
+
+  /* Property list for creating the dataset */
+  hid_t prop_id = H5Pcreate(H5P_DATASET_CREATE);
+
+  /* Data type to write in the file */
+  hid_t dtype_id = H5Tcopy(H5T_NATIVE_DOUBLE);
+
+  /* Use chunked writes and possibly filters in non-collective mode */
+  if (!collective) {
+
+    /* Set the chunk size */
+    const hsize_t dim[1] = {(hsize_t)chunk_size};
+    if (H5Pset_chunk(prop_id, 1, dim) < 0)
+      error("Unable to set HDF5 chunk size for healpix map");
+
+    /* Set lossy compression, if requested. This might change the
+       output data type and add to the property list. */
+    char filter_name[32];
+    set_hdf5_lossy_compression(&prop_id, &dtype_id, compression, name,
+                               filter_name);
+
+    /* Set lossless compression */
+    if (gzip_level > 0) {
+      H5Pset_shuffle(prop_id);
+      if (H5Pset_deflate(prop_id, gzip_level) < 0)
+        error("Unable to set HDF5 deflate filter for healpix map");
+    }
+  }
+
+  /* Create the dataset */
+  hid_t dset_id = H5Dcreate(loc_id, name, dtype_id, file_space_id, H5P_DEFAULT,
+                            prop_id, H5P_DEFAULT);
+  H5Pclose(prop_id);
+  H5Tclose(dtype_id);
+  if (dset_id < 0) error("Unable to create dataset %s", name);
+
+  /* Write attributes */
+  io_write_attribute_i(dset_id, "nside", map->nside);
+  io_write_attribute_ll(dset_id, "number_of_pixels", (long long) map->total_nr_pix);
+  io_write_attribute_s(dset_id, "pixel_ordering_scheme", "ring");
+  io_write_attribute_d(dset_id, "comoving_inner_radius",
+                       map->r_min * length_conversion_factor);
+  io_write_attribute_d(dset_id, "comoving_outer_radius",
+                       map->r_max * length_conversion_factor);
+
+  /* Write unit conversion factors for this data set */
+  char buffer[FIELD_BUFFER_SIZE] = {0};
+  units_cgs_conversion_string(buffer, snapshot_units, map->type.units, 0.f);
+  float baseUnitsExp[5];
+  units_get_base_unit_exponents_array(baseUnitsExp, map->type.units);
+  io_write_attribute_f(dset_id, "U_M exponent", baseUnitsExp[UNIT_MASS]);
+  io_write_attribute_f(dset_id, "U_L exponent", baseUnitsExp[UNIT_LENGTH]);
+  io_write_attribute_f(dset_id, "U_t exponent", baseUnitsExp[UNIT_TIME]);
+  io_write_attribute_f(dset_id, "U_I exponent", baseUnitsExp[UNIT_CURRENT]);
+  io_write_attribute_f(dset_id, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]);
+  io_write_attribute_f(dset_id, "h-scale exponent", 0.f);
+  io_write_attribute_f(dset_id, "a-scale exponent", 0.f);
+  io_write_attribute_s(dset_id, "Expression for physical CGS units", buffer);
+
+  /* Write the actual number this conversion factor corresponds to */
+  const double cgs_factor =
+      units_cgs_conversion_factor(snapshot_units, map->type.units);
+  io_write_attribute_d(
+      dset_id,
+      "Conversion factor to CGS (not including cosmological corrections)",
+      cgs_factor);
+
+#ifdef LIGHTCONE_MAP_CHECK_TOTAL
+  /* Consistency check: will write out expected sum over pixels */
+  double total = map->total;
+#ifdef WITH_MPI
+  MPI_Allreduce(&map->total, &total, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
+#endif
+  total *= map_conversion_factor;
+  io_write_attribute_d(dset_id, "expected_sum", total);
+#endif
+
+  /* Set up property list for the write */
+  hid_t h_plist_id = H5Pcreate(H5P_DATASET_XFER);
+#if defined(WITH_MPI)
+#ifdef HAVE_PARALLEL_HDF5
+  if (collective) {
+    if (H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE) < 0)
+      error("Unable to set collective transfer mode");
+  }
+#else
+  if (collective)
+    error("Writing lightcone maps with MPI requires parallel HDF5");
+#endif
+#endif
+
+  /* Write the data */
+  if (H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, mem_space_id, file_space_id,
+               h_plist_id, map->data) < 0)
+    error("Unable to write dataset %s", name);
+
+  /* Tidy up */
+  H5Dclose(dset_id);
+  H5Sclose(mem_space_id);
+  H5Sclose(file_space_id);
+  H5Pclose(h_plist_id);
+}
+#endif /* HAVE_HDF5*/
diff --git a/src/lightcone/lightcone_map.h b/src/lightcone/lightcone_map.h
new file mode 100644
index 0000000000000000000000000000000000000000..d3c03d6affda4cf91f8c313600fc145fcb06bfbc
--- /dev/null
+++ b/src/lightcone/lightcone_map.h
@@ -0,0 +1,115 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_MAP_H
+#define SWIFT_LIGHTCONE_MAP_H
+
+/* Define this to write the expected sum of the map to the
+   output file. This is to check that the SPH smoothing and
+   communication code is conserving the quantity added to
+   the map. */
+#define LIGHTCONE_MAP_CHECK_TOTAL
+
+/* Standard headers */
+#include <limits.h>
+#include <math.h>
+
+/* Config parameters. */
+#include "../config.h"
+
+/* HDF5 */
+#ifdef HAVE_HDF5
+#include <hdf5.h>
+#endif
+
+/* Local headers */
+#include "io_compression.h"
+#include "lightcone/lightcone_map_types.h"
+#include "lightcone/pixel_index.h"
+#include "units.h"
+
+/**
+ * @brief Struct to store a single lightcone healpix map
+ */
+struct lightcone_map {
+
+  /*! Healpix nside parameter */
+  int nside;
+
+  /*! Total pixels in the map */
+  pixel_index_t total_nr_pix;
+
+  /*! Number of pixels stored on this node */
+  pixel_index_t local_nr_pix;
+
+  /*! Offset of the first pixel stored on this rank */
+  pixel_index_t local_pix_offset;
+
+  /*! Number of pixels per rank (last node has any extra) */
+  pixel_index_t pix_per_rank;
+
+  /*! Local healpix map data */
+  double *data;
+
+  /*! Inner radius */
+  double r_min;
+
+  /*! Outer radius */
+  double r_max;
+
+  /*! Type of this map */
+  struct lightcone_map_type type;
+
+  /*! Factor for retrieving values from the update buffer */
+  double buffer_scale_factor_inv;
+
+#ifdef LIGHTCONE_MAP_CHECK_TOTAL
+  /*! Total quantity accumulated to this map, for consistency check */
+  double total;
+#endif
+};
+
+void lightcone_map_init(struct lightcone_map *map, const int nside,
+                        const pixel_index_t total_nr_pix, const pixel_index_t pix_per_rank,
+                        const pixel_index_t local_nr_pix,
+                        const pixel_index_t local_pix_offset, const double r_min,
+                        const double r_max, struct lightcone_map_type type);
+
+void lightcone_map_clean(struct lightcone_map *map);
+
+void lightcone_map_struct_dump(const struct lightcone_map *map, FILE *stream);
+
+void lightcone_map_struct_restore(struct lightcone_map *map, FILE *stream);
+
+void lightcone_map_allocate_pixels(struct lightcone_map *map,
+                                   const int zero_pixels);
+
+void lightcone_map_free_pixels(struct lightcone_map *map);
+
+#ifdef HAVE_HDF5
+void lightcone_map_write(struct lightcone_map *map, const hid_t loc_id,
+                         const char *name,
+                         const struct unit_system *internal_units,
+                         const struct unit_system *snapshot_units,
+                         const int collective, const int maps_gzip_level,
+                         const int chunk_size,
+                         enum lossy_compression_schemes compression);
+#endif
+
+#endif /* #ifndef SWIFT_LIGHTCONE_MAP_H */
diff --git a/src/lightcone/lightcone_map_types.c b/src/lightcone/lightcone_map_types.c
new file mode 100644
index 0000000000000000000000000000000000000000..00098a57d8ba098d45523eed64da31e4c4cc493e
--- /dev/null
+++ b/src/lightcone/lightcone_map_types.c
@@ -0,0 +1,355 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "black_holes.h"
+#include "engine.h"
+#include "gravity.h"
+#include "hydro.h"
+#include "lightcone/lightcone_map.h"
+#include "neutrino.h"
+#include "part.h"
+#include "star_formation.h"
+#include "stars.h"
+
+/* This object's header */
+#include "lightcone/lightcone_map_types.h"
+
+/* Required for the xrays */
+#include "extra_io.h"
+#include "io_properties.h"
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_gas_only(int ptype) {
+
+  switch (ptype) {
+    case swift_type_gas:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_total_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_gas:
+    case swift_type_stars:
+    case swift_type_black_hole:
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background:
+    case swift_type_neutrino:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of projected mass in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_total_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  /* const struct xpart *xparts = s->xparts; */ /* Currently not used */
+  const struct spart *sparts = s->sparts;
+  const struct bpart *bparts = s->bparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      return p->mass;
+    } break;
+    case swift_type_stars: {
+      const struct spart *sp = &sparts[-gp->id_or_neg_offset];
+      return sp->mass;
+    } break;
+    case swift_type_black_hole: {
+      const struct bpart *bp = &bparts[-gp->id_or_neg_offset];
+      return bp->mass;
+    } break;
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background:
+    case swift_type_neutrino: {
+      return gp->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * Return background neutrino density to add to the total mass map
+ *
+ * The neutrino particles trace density perturbations relative
+ * to a constant background so we need to add in the background
+ * to get the total mass.
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param map The lightcone map
+ *
+ */
+double lightcone_map_total_mass_baseline_value(
+    const struct cosmology *c, const struct lightcone_props *lightcone_props,
+    const struct lightcone_map *map) {
+  return lightcone_map_neutrino_baseline_value(c, lightcone_props, map);
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_gas_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_gas:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of projected gas mass in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_gas_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      return p->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_dark_matter_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of projected dark matter mass in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_dark_matter_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+  switch (gp->type) {
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background: {
+      return gp->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_stellar_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_stars:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of stellar mass in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_stellar_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct spart *sparts = s->sparts;
+
+  switch (gp->type) {
+    case swift_type_stars: {
+      const struct spart *sp = &sparts[-gp->id_or_neg_offset];
+      return sp->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_black_hole_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_black_hole:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of black hole mass in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_black_hole_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct bpart *bparts = s->bparts;
+
+  switch (gp->type) {
+    case swift_type_black_hole: {
+      const struct bpart *bp = &bparts[-gp->id_or_neg_offset];
+      return bp->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_sfr_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_gas:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of star formation rate in each pixel
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_sfr_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  /* Handle on the other particle types */
+  const struct space *s = e->s;
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+
+  switch (gp->type) {
+    case swift_type_gas: {
+      const struct part *p = &parts[-gp->id_or_neg_offset];
+      const struct xpart *xp = &xparts[-gp->id_or_neg_offset];
+      return star_formation_get_SFR(p, xp);
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
diff --git a/src/lightcone/lightcone_map_types.h b/src/lightcone/lightcone_map_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..e648625adca358223c60a014583a6ea943197d46
--- /dev/null
+++ b/src/lightcone/lightcone_map_types.h
@@ -0,0 +1,213 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_MAP_TYPES_H
+#define SWIFT_LIGHTCONE_MAP_TYPES_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers */
+#include "io_compression.h"
+#include "parser.h"
+#include "part_type.h"
+#include "units.h"
+
+/* Avoid cyclic inclusions */
+struct cosmology;
+struct engine;
+struct lightcone_map;
+struct lightcone_props;
+struct gpart;
+
+/* Type to store pointer to function for updating a healpix map */
+typedef double (*map_update_function_t)(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/* Type to store pointer to function for providing baseline map value */
+typedef double (*map_baseline_function_t)(
+    const struct cosmology *c, const struct lightcone_props *lightcone_props,
+    const struct lightcone_map *map);
+
+/* Type to store pointer to function to check which types contribute to a map */
+typedef int (*map_contrib_function_t)(int ptype);
+
+enum lightcone_map_smoothing { map_unsmoothed, map_smoothed };
+
+/**
+ * @brief Struct to store information on one type of lightcone map
+ */
+struct lightcone_map_type {
+  char name[PARSER_MAX_LINE_SIZE];
+  map_update_function_t update_map;
+  map_contrib_function_t ptype_contributes;
+  map_baseline_function_t baseline_func;
+  enum unit_conversion_factor units;
+  enum lightcone_map_smoothing smoothing;
+  enum lossy_compression_schemes compression;
+  double buffer_scale_factor;
+};
+
+/*
+  Function used for defining maps which only include gas (e.g. EAGLE x-ray
+  outputs)
+*/
+int lightcone_map_gas_only(int ptype);
+
+/*
+   Healpix map of total mass
+*/
+int lightcone_map_total_mass_type_contributes(int ptype);
+
+double lightcone_map_total_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+double lightcone_map_total_mass_baseline_value(
+    const struct cosmology *c, const struct lightcone_props *lightcone_props,
+    const struct lightcone_map *map);
+
+/*
+   Healpix map of gas mass
+*/
+int lightcone_map_gas_mass_type_contributes(int ptype);
+
+double lightcone_map_gas_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of dark matter mass
+*/
+int lightcone_map_dark_matter_mass_type_contributes(int ptype);
+
+double lightcone_map_dark_matter_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of stellar mass
+*/
+int lightcone_map_stellar_mass_type_contributes(int ptype);
+
+double lightcone_map_stellar_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of black hole mass
+*/
+int lightcone_map_black_hole_mass_type_contributes(int ptype);
+
+double lightcone_map_black_hole_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+/*
+   Healpix map of star formation rate
+*/
+int lightcone_map_sfr_type_contributes(int ptype);
+
+double lightcone_map_sfr_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]);
+
+/* This associates map names to the appropriate update function and unit info */
+static const struct lightcone_map_type lightcone_map_types[] = {
+    {
+        .name = "TotalMass",
+        .update_map = lightcone_map_total_mass_get_value,
+        .ptype_contributes = lightcone_map_total_mass_type_contributes,
+        .baseline_func = lightcone_map_total_mass_baseline_value,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "SmoothedGasMass",
+        .update_map = lightcone_map_gas_mass_get_value,
+        .ptype_contributes = lightcone_map_gas_mass_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_smoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "UnsmoothedGasMass",
+        .update_map = lightcone_map_gas_mass_get_value,
+        .ptype_contributes = lightcone_map_gas_mass_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "DarkMatterMass",
+        .update_map = lightcone_map_dark_matter_mass_get_value,
+        .ptype_contributes = lightcone_map_dark_matter_mass_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "StellarMass",
+        .update_map = lightcone_map_stellar_mass_get_value,
+        .ptype_contributes = lightcone_map_stellar_mass_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "BlackHoleMass",
+        .update_map = lightcone_map_black_hole_mass_get_value,
+        .ptype_contributes = lightcone_map_black_hole_mass_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        .name = "StarFormationRate",
+        .update_map = lightcone_map_sfr_get_value,
+        .ptype_contributes = lightcone_map_sfr_type_contributes,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_SFR,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        /* NULL functions indicate end of array */
+        .name = "",
+        .update_map = NULL,
+        .ptype_contributes = NULL,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+};
+
+#endif
diff --git a/src/lightcone/lightcone_particle_io.c b/src/lightcone/lightcone_particle_io.c
new file mode 100644
index 0000000000000000000000000000000000000000..cddbc1f2977770c14cec19d05ea3b3fd18f81b22
--- /dev/null
+++ b/src/lightcone/lightcone_particle_io.c
@@ -0,0 +1,878 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <hdf5.h>
+
+/* This object's header. */
+#include "lightcone/lightcone_particle_io.h"
+
+/* Local headers */
+#include "black_holes.h"
+#include "chemistry.h"
+#include "chemistry_struct.h"
+#include "cooling.h"
+#include "engine.h"
+#include "error.h"
+#include "gravity.h"
+#include "lightcone/lightcone.h"
+#include "particle_buffer.h"
+#include "stars.h"
+
+void lightcone_io_field_list_init(struct lightcone_io_field_list *list) {
+
+  list->first = NULL;
+  list->last = NULL;
+  list->num_fields = 0;
+}
+
+void lightcone_io_field_list_clean(struct lightcone_io_field_list *list) {
+
+  struct lightcone_io_field *current;
+  struct lightcone_io_field *next;
+
+  current = list->first;
+  while (current) {
+    next = current->next;
+    free(current);
+    current = next;
+  }
+
+  list->first = NULL;
+  list->last = NULL;
+  list->num_fields = 0;
+}
+
+void lightcone_io_field_list_append(struct lightcone_io_field_list *list,
+                                    char *name, enum IO_DATA_TYPE type,
+                                    int dimension, size_t offset,
+                                    enum unit_conversion_factor units,
+                                    float scale_factor_exponent,
+                                    char *compression) {
+
+  /* Make the new lightcone_io_field struct */
+  struct lightcone_io_field *r = malloc(sizeof(struct lightcone_io_field));
+  bzero(r, sizeof(struct lightcone_io_field));
+  strcpy(r->name, name);
+  r->type = type;
+  r->dimension = dimension;
+  r->offset = offset;
+  r->units = units;
+  r->scale_factor_exponent = scale_factor_exponent;
+  r->compression = compression_scheme_from_name(compression);
+  r->next = NULL;
+
+  /* Append to the linked list */
+  if (list->last) {
+    list->last->next = r;
+  } else {
+    list->first = r;
+  }
+  list->last = r;
+  list->num_fields += 1;
+}
+
+/**
+ * @brief Make a linked list of output fields for gas particles
+ */
+void lightcone_io_append_gas_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x) offsetof(struct lightcone_gas_data, x)
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "Masses", FLOAT, 1, OFFSET(mass),
+                                 UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "SmoothingLengths", FLOAT, 1, OFFSET(h),
+                                 UNIT_CONV_LENGTH, 0.0, "on");
+  lightcone_io_field_list_append(list, "Densities", FLOAT, 1, OFFSET(rho),
+                                 UNIT_CONV_DENSITY, -3.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "Temperatures", FLOAT, 1,
+                                 OFFSET(temperature), UNIT_CONV_TEMPERATURE,
+                                 0.0, "FMantissa9");
+#ifdef CHEMISTRY_EAGLE
+  lightcone_io_field_list_append(list, "SmoothedElementMassFractions", FLOAT,
+                                 chemistry_element_count,
+                                 OFFSET(smoothed_metal_mass_fraction),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "SmoothedMetalMassFractions", FLOAT, 1,
+                                 OFFSET(smoothed_metal_mass_fraction_total),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "MetalMassFractions", FLOAT, 1,
+                                 OFFSET(metal_mass_fraction_total),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+#endif
+#ifdef COOLING_COLIBRE
+  lightcone_io_field_list_append(list, "ElectronNumberDensities", DOUBLE, 1,
+                                 OFFSET(electron_density),
+                                 UNIT_CONV_NUMBER_DENSITY, 0.0, "DMantissa9");
+  lightcone_io_field_list_append(list, "ComptonYParameters", DOUBLE, 1,
+                                 OFFSET(ycompton), UNIT_CONV_AREA, 0.0,
+                                 "DMantissa9");
+#endif
+#ifdef WITH_FOF
+  lightcone_io_field_list_append(list, "FOFGroupIDs", LONGLONG, 1,
+                                 OFFSET(group_id), UNIT_CONV_NO_UNITS, 0.0,
+                                 "on");
+#endif
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  lightcone_io_field_list_append(list, "LastAGNFeedbackScaleFactors", FLOAT, 1,
+                                 OFFSET(last_AGN_injection_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "BFloat16");
+#endif
+#ifdef STAR_FORMATION_EAGLE
+  lightcone_io_field_list_append(list, "StarFormationRates", FLOAT, 1,
+                                 OFFSET(sfr), UNIT_CONV_SFR, 0.0, "on");
+#endif
+#undef OFFSET
+}
+
+/**
+ * @brief Make a linked list of output fields for DM particles
+ */
+void lightcone_io_append_dark_matter_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x) offsetof(struct lightcone_dark_matter_data, x)
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "Masses", FLOAT, 1, OFFSET(mass),
+                                 UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#undef OFFSET
+}
+
+/**
+ * @brief Make a linked list of output fields for DM background particles
+ */
+void lightcone_io_append_dark_matter_background_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x)                             \
+  offsetof(struct lightcone_dark_matter_data, \
+           x) /* Uses same struct as dark matter */
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "Masses", FLOAT, 1, OFFSET(mass),
+                                 UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#undef OFFSET
+}
+
+/**
+ * @brief Make a linked list of output fields for star particles
+ */
+void lightcone_io_append_stars_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x) offsetof(struct lightcone_stars_data, x)
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "Masses", FLOAT, 1, OFFSET(mass),
+                                 UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#ifdef WITH_FOF
+  lightcone_io_field_list_append(list, "FOFGroupIDs", LONGLONG, 1,
+                                 OFFSET(group_id), UNIT_CONV_NO_UNITS, 0.0,
+                                 "on");
+#endif
+#ifdef STARS_EAGLE
+  lightcone_io_field_list_append(list, "InitialMasses", FLOAT, 1,
+                                 OFFSET(mass_init), UNIT_CONV_MASS, 0.0,
+                                 "FMantissa9");
+  lightcone_io_field_list_append(list, "BirthScaleFactors", FLOAT, 1,
+                                 OFFSET(birth_scale_factor), UNIT_CONV_NO_UNITS,
+                                 0.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "BirthDensities", FLOAT, 1,
+                                 OFFSET(birth_density), UNIT_CONV_DENSITY, 0.0,
+                                 "BFloat16");
+  lightcone_io_field_list_append(list, "Luminosities", FLOAT,
+                                 luminosity_bands_count, OFFSET(luminosities),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+#endif
+#ifdef CHEMISTRY_EAGLE
+  lightcone_io_field_list_append(list, "SmoothedElementMassFractions", FLOAT,
+                                 chemistry_element_count,
+                                 OFFSET(smoothed_metal_mass_fraction),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "SmoothedMetalMassFractions", FLOAT, 1,
+                                 OFFSET(smoothed_metal_mass_fraction_total),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+  lightcone_io_field_list_append(list, "MetalMassFractions", FLOAT, 1,
+                                 OFFSET(metal_mass_fraction_total),
+                                 UNIT_CONV_NO_UNITS, 0.0, "FMantissa9");
+#endif
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  lightcone_io_field_list_append(list, "LastAGNFeedbackScaleFactors", FLOAT, 1,
+                                 OFFSET(last_AGN_injection_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "BFloat16");
+#endif
+#undef OFFSET
+}
+
+/**
+ * @brief Make a linked list of output fields for black hole particles
+ */
+void lightcone_io_append_black_hole_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x) offsetof(struct lightcone_black_hole_data, x)
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "DynamicalMasses", FLOAT, 1,
+                                 OFFSET(mass), UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#ifdef BLACK_HOLES_EAGLE
+  lightcone_io_field_list_append(list, "SubgridMasses", FLOAT, 1,
+                                 OFFSET(subgrid_mass), UNIT_CONV_MASS, 0.0,
+                                 "on");
+  lightcone_io_field_list_append(list, "FormationScaleFactors", FLOAT, 1,
+                                 OFFSET(formation_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "AccretionRates", FLOAT, 1,
+                                 OFFSET(accretion_rate),
+                                 UNIT_CONV_MASS_PER_UNIT_TIME, 0.0, "on");
+  lightcone_io_field_list_append(list, "TotalAccretedMasses", FLOAT, 1,
+                                 OFFSET(total_accreted_mass), UNIT_CONV_MASS,
+                                 0.0, "on");
+  lightcone_io_field_list_append(list, "LastMinorMergerScaleFactors", FLOAT, 1,
+                                 OFFSET(last_minor_merger_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "LastMajorMergerScaleFactors", FLOAT, 1,
+                                 OFFSET(last_major_merger_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "NumberOfMergers", INT, 1,
+                                 OFFSET(number_of_mergers), UNIT_CONV_NO_UNITS,
+                                 0.0, "on");
+  lightcone_io_field_list_append(list, "LastAGNFeedbackScaleFactors", FLOAT, 1,
+                                 OFFSET(last_AGN_event_scale_factor),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "NumberOfAGNEvents", INT, 1,
+                                 OFFSET(AGN_number_of_AGN_events),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(list, "NumberOfHeatingEvents", INT, 1,
+                                 OFFSET(AGN_number_of_energy_injections),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+  lightcone_io_field_list_append(
+      list, "LastHighEddingtonFractionScaleFactors", FLOAT, 1,
+      OFFSET(last_high_Eddington_fraction_scale_factor), UNIT_CONV_NO_UNITS,
+      0.0, "on");
+  lightcone_io_field_list_append(list, "CumulativeNumberOfSeeds", INT, 1,
+                                 OFFSET(cumulative_number_seeds),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#ifdef WITH_FOF
+  lightcone_io_field_list_append(list, "FOFGroupIDs", LONGLONG, 1,
+                                 OFFSET(group_id), UNIT_CONV_NO_UNITS, 0.0,
+                                 "on");
+#endif
+#endif
+#undef OFFSET
+}
+
+/**
+ * @brief Make a linked list of output fields for neutrino particles
+ */
+void lightcone_io_append_neutrino_output_fields(
+    struct lightcone_io_field_list *list) {
+
+#define OFFSET(x) offsetof(struct lightcone_neutrino_data, x)
+  lightcone_io_field_list_append(list, "ParticleIDs", LONGLONG, 1, OFFSET(id),
+                                 UNIT_CONV_NO_UNITS, 0.0, "Nbit40");
+  lightcone_io_field_list_append(list, "Coordinates", DOUBLE, 3, OFFSET(x),
+                                 UNIT_CONV_LENGTH, 1.0, "DScale5");
+  lightcone_io_field_list_append(list, "Velocities", FLOAT, 3, OFFSET(vel),
+                                 UNIT_CONV_SPEED, 0.0, "DScale1");
+  lightcone_io_field_list_append(list, "Masses", FLOAT, 1, OFFSET(mass),
+                                 UNIT_CONV_MASS, 0.0, "on");
+  lightcone_io_field_list_append(list, "ExpansionFactors", FLOAT, 1, OFFSET(a),
+                                 UNIT_CONV_NO_UNITS, 0.0, "on");
+#undef OFFSET
+}
+
+/*
+  Functions to store particle properties in the lightcone_*_data structs.
+
+  These should determine whether the particle should be included in the
+  lightcone and, if so, copy the needed quantities into the struct and
+  return 1. If the particle should be discarded the function should
+  return 0.
+
+ */
+
+/**
+ * @brief Store gas properties to write to the lightcone
+ *
+ * If the particle should be included in the lightcone output this function
+ * copies its information to the lightcone_gas_data struct and returns 1.
+ * If the particle should not be output the function returns 0.
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart which crossed the lightcone
+ * @param p the #part associated with this #gpart
+ * @param xp the #xpart associated with this #gpart
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ * @param the #lightcone_gas_data struct to update
+ */
+int lightcone_store_gas(const struct engine *e, struct lightcone_props *props,
+                        const struct gpart *gp, const struct part *p,
+                        const struct xpart *xp, const double a_cross,
+                        const double x_cross[3],
+                        struct lightcone_gas_data *data) {
+
+  /*! Check if we're filtering gas particles */
+  if (props->gas_filtering_enabled) {
+    if (a_cross < props->max_a_for_gas_filtering) {
+
+      /* Check hydrogen number density of this particle */
+#ifdef CHEMISTRY_EAGLE
+      const double density = p->rho;
+      const double proton_mass = e->physical_constants->const_proton_mass;
+      const double hydrogen_fraction =
+          p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+      const double nh = density * hydrogen_fraction / proton_mass;
+      if (nh < props->min_nh_for_filtered_gas * pow(a_cross, -4.0)) return 0;
+#else
+      error(
+          "Lightcone gas particle filtering is only implemented for EAGLE "
+          "chemistry");
+#endif
+      /* Check temperature of this particle */
+      const double T = cooling_get_temperature(
+          e->physical_constants, e->hydro_properties, e->internal_units,
+          e->cosmology, e->cooling_func, p, xp);
+      if (T < props->min_temp_for_filtered_gas) return 0;
+    }
+  }
+
+  data->id = p->id;
+  data->x[0] = x_cross[0];
+  data->x[1] = x_cross[1];
+  data->x[2] = x_cross[2];
+  data->vel[0] =
+      xp->v_full[0] / a_cross;  // TODO: extrapolate velocities to a_cross?
+  data->vel[1] = xp->v_full[1] / a_cross;
+  data->vel[2] = xp->v_full[2] / a_cross;
+  data->mass = p->mass;
+  data->a = a_cross;
+  data->h = p->h;
+  data->rho = p->rho;
+  data->temperature = cooling_get_temperature(
+      e->physical_constants, e->hydro_properties, e->internal_units,
+      e->cosmology, e->cooling_func, p, xp);
+#ifdef WITH_FOF
+  data->group_id = (long long)gp->fof_data.group_id;
+#endif
+
+#ifdef CHEMISTRY_EAGLE
+  for (int i = 0; i < chemistry_element_count; i += 1)
+    data->smoothed_metal_mass_fraction[i] =
+        p->chemistry_data.smoothed_metal_mass_fraction[i];
+  data->metal_mass_fraction_total = p->chemistry_data.metal_mass_fraction_total;
+  data->smoothed_metal_mass_fraction_total =
+      p->chemistry_data.smoothed_metal_mass_fraction_total;
+#endif
+
+#ifdef COOLING_COLIBRE
+  data->electron_density = cooling_get_electron_density(
+      e->physical_constants, e->hydro_properties, e->internal_units,
+      e->cosmology, e->cooling_func, p, xp);
+  data->ycompton = cooling_get_ycompton(e->physical_constants,
+                                        e->hydro_properties, e->internal_units,
+                                        e->cosmology, e->cooling_func, p, xp);
+#endif
+
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  data->last_AGN_injection_scale_factor =
+      xp->tracers_data.last_AGN_injection_scale_factor;
+#endif
+
+#ifdef STAR_FORMATION_EAGLE
+  data->sfr = xp->sf_data.SFR;
+#endif
+
+  return 1;
+}
+
+/**
+ * @brief Store dark matter properties to write to the lightcone
+ *
+ * If the particle should be included in the lightcone output this function
+ * copies its information to the lightcone_dark_matter_data struct and returns
+ * 1. If the particle should not be output the function returns 0.
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart which crossed the lightcone
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ * @param the #lightcone_dark_matter_data struct to update
+ */
+int lightcone_store_dark_matter(const struct engine *e,
+                                struct lightcone_props *props,
+                                const struct gpart *gp, const double a_cross,
+                                const double x_cross[3],
+                                struct lightcone_dark_matter_data *data) {
+  data->id = gp->id_or_neg_offset;
+  data->x[0] = x_cross[0];
+  data->x[1] = x_cross[1];
+  data->x[2] = x_cross[2];
+  data->vel[0] =
+      gp->v_full[0] / a_cross;  // TODO: extrapolate velocities to a_cross?
+  data->vel[1] = gp->v_full[1] / a_cross;
+  data->vel[2] = gp->v_full[2] / a_cross;
+  data->mass = gp->mass;
+  data->a = a_cross;
+
+  return 1;
+}
+
+/**
+ * @brief Store star properties to write to the lightcone
+ *
+ * If the particle should be included in the lightcone output this function
+ * copies its information to the lightcone_star_data struct and returns
+ * 1. If the particle should not be output the function returns 0.
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart which crossed the lightcone
+ * @param sp the #spart associated with the #gpart
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ * @param the #lightcone_stars_data struct to update
+ */
+int lightcone_store_stars(const struct engine *e, struct lightcone_props *props,
+                          const struct gpart *gp, const struct spart *sp,
+                          const double a_cross, const double x_cross[3],
+                          struct lightcone_stars_data *data) {
+  data->id = sp->id;
+  data->x[0] = x_cross[0];
+  data->x[1] = x_cross[1];
+  data->x[2] = x_cross[2];
+  data->vel[0] =
+      sp->v[0] / a_cross;  // TODO: extrapolate velocities to a_cross?
+  data->vel[1] = sp->v[1] / a_cross;
+  data->vel[2] = sp->v[2] / a_cross;
+  data->mass = sp->mass;
+  data->a = a_cross;
+
+#ifdef WITH_FOF
+  data->group_id = (long long)gp->fof_data.group_id;
+#endif
+
+#ifdef STARS_EAGLE
+  data->mass_init = sp->mass_init;
+  data->birth_scale_factor = sp->birth_scale_factor;
+  data->birth_density = sp->birth_density;
+  stars_get_luminosities(sp, e->policy & engine_policy_cosmology, e->cosmology,
+                         e->time, e->physical_constants, e->stars_properties,
+                         data->luminosities);
+#endif
+
+#ifdef CHEMISTRY_EAGLE
+  for (int i = 0; i < chemistry_element_count; i += 1)
+    data->smoothed_metal_mass_fraction[i] =
+        sp->chemistry_data.smoothed_metal_mass_fraction[i];
+  data->metal_mass_fraction_total =
+      sp->chemistry_data.metal_mass_fraction_total;
+  data->smoothed_metal_mass_fraction_total =
+      sp->chemistry_data.smoothed_metal_mass_fraction_total;
+#endif
+
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  data->last_AGN_injection_scale_factor =
+      sp->tracers_data.last_AGN_injection_scale_factor;
+#endif
+
+  return 1;
+}
+
+/**
+ * @brief Store black hole properties to write to the lightcone
+ *
+ * If the particle should be included in the lightcone output this function
+ * copies its information to the lightcone_black_hole_data struct and returns
+ * 1. If the particle should not be output the function returns 0.
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart which crossed the lightcone
+ * @param bp the #bpart associated with the #gpart
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ * @param the #lightcone_black_hole_data struct to update
+ */
+int lightcone_store_black_hole(const struct engine *e,
+                               struct lightcone_props *props,
+                               const struct gpart *gp, const struct bpart *bp,
+                               const double a_cross, const double x_cross[3],
+                               struct lightcone_black_hole_data *data) {
+  data->id = bp->id;
+  data->x[0] = x_cross[0];
+  data->x[1] = x_cross[1];
+  data->x[2] = x_cross[2];
+  data->vel[0] =
+      bp->v[0] / a_cross;  // TODO: extrapolate velocities to a_cross?
+  data->vel[1] = bp->v[1] / a_cross;
+  data->vel[2] = bp->v[2] / a_cross;
+  data->mass = bp->mass;
+  data->a = a_cross;
+#ifdef BLACK_HOLES_EAGLE
+  data->subgrid_mass = bp->subgrid_mass;
+  data->formation_scale_factor = bp->formation_scale_factor;
+  data->accretion_rate = bp->accretion_rate;
+  data->total_accreted_mass = bp->total_accreted_mass;
+  data->last_minor_merger_scale_factor = bp->last_minor_merger_scale_factor;
+  data->last_major_merger_scale_factor = bp->last_major_merger_scale_factor;
+  data->number_of_mergers = bp->number_of_mergers;
+  data->last_AGN_event_scale_factor = bp->last_AGN_event_scale_factor;
+  data->AGN_number_of_AGN_events = bp->AGN_number_of_AGN_events;
+  data->AGN_number_of_energy_injections = bp->AGN_number_of_energy_injections;
+  data->last_high_Eddington_fraction_scale_factor =
+      bp->last_high_Eddington_fraction_scale_factor;
+  data->cumulative_number_seeds = bp->cumulative_number_seeds;
+#ifdef WITH_FOF
+  data->group_id = (long long)gp->fof_data.group_id;
+#endif
+#endif
+  return 1;
+}
+
+/**
+ * @brief Store neutrino properties to write to the lightcone
+ *
+ * If the particle should be included in the lightcone output this function
+ * copies its information to the lightcone_neutrino_data struct and returns
+ * 1. If the particle should not be output the function returns 0.
+ *
+ * @param e the #engine structure
+ * @param gp the #gpart which crossed the lightcone
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ * @param the #lightcone_neutrino_data struct to update
+ */
+int lightcone_store_neutrino(const struct engine *e,
+                             struct lightcone_props *props,
+                             const struct gpart *gp, const double a_cross,
+                             const double x_cross[3],
+                             struct lightcone_neutrino_data *data) {
+  data->id = gp->id_or_neg_offset;
+  data->x[0] = x_cross[0];
+  data->x[1] = x_cross[1];
+  data->x[2] = x_cross[2];
+  data->vel[0] =
+      gp->v_full[0] / a_cross;  // TODO: extrapolate velocities to a_cross?
+  data->vel[1] = gp->v_full[1] / a_cross;
+  data->vel[2] = gp->v_full[2] / a_cross;
+  data->mass = gp->mass;
+  data->a = a_cross;
+
+  return 1;
+}
+
+/**
+ * @brief Write data to a HDF5 dataset, appending along first axis if it already
+ * exists
+ */
+void append_dataset(const struct unit_system *snapshot_units,
+                    enum unit_conversion_factor units,
+                    float scale_factor_exponent, hid_t loc_id, const char *name,
+                    hid_t mem_type_id, hsize_t chunk_size,
+                    int lossy_compression,
+                    enum lossy_compression_schemes compression_scheme,
+                    int gzip_level, const int rank, const hsize_t *dims,
+                    const hsize_t num_written, const void *data) {
+
+  const int max_rank = 2;
+  if (rank > max_rank)
+    error("HDF5 dataset has too may dimensions. Increase max_rank.");
+  if (rank < 1) error("HDF5 dataset must be at least one dimensional");
+
+  /* If we have zero elements to append, there's nothing to do */
+  if (dims[0] == 0) return;
+
+  /* Determine size of the dataset after we append our data */
+  hsize_t full_dims[max_rank];
+  for (int i = 0; i < rank; i += 1) full_dims[i] = dims[i];
+  full_dims[0] += num_written;
+
+  /* Determine maximum size in each dimension */
+  hsize_t max_dims[max_rank];
+  for (int i = 1; i < rank; i += 1) max_dims[i] = full_dims[i];
+  max_dims[0] = H5S_UNLIMITED;
+
+  /* Determine chunk size in each dimension */
+  hsize_t chunk_dims[max_rank];
+  for (int i = 1; i < rank; i += 1) chunk_dims[i] = full_dims[i];
+  chunk_dims[0] = (hsize_t)chunk_size;
+
+  /* Find offset to region to write in each dimension */
+  hsize_t offset[max_rank];
+  for (int i = 1; i < rank; i += 1) offset[i] = 0;
+  offset[0] = num_written;
+
+  hid_t dataset_id;
+  hid_t file_space_id;
+  if (num_written == 0) {
+
+    /* We need to create a new dataset */
+    file_space_id = H5Screate_simple(rank, full_dims, max_dims);
+    hid_t prop_id = H5Pcreate(H5P_DATASET_CREATE);
+
+    /* Type of the dataset to create - this is initially the same as the type
+       in memory but may be modified by lossy compression. */
+    hid_t file_type_id = H5Tcopy(mem_type_id);
+
+    /* Set chunk size and lossy compression scheme, if any  */
+    H5Pset_chunk(prop_id, rank, chunk_dims);
+    char filter_name[32];
+    if (lossy_compression && (compression_scheme != compression_write_lossless))
+      set_hdf5_lossy_compression(&prop_id, &file_type_id, compression_scheme,
+                                 name, filter_name);
+
+    /* Set lossless compression, if any */
+    if (gzip_level > 0) {
+      H5Pset_shuffle(prop_id);
+      H5Pset_deflate(prop_id, gzip_level);
+    }
+
+    /* Create the dataset */
+    dataset_id = H5Dcreate(loc_id, name, file_type_id, file_space_id,
+                           H5P_DEFAULT, prop_id, H5P_DEFAULT);
+    if (dataset_id < 0) error("Failed to create new dataset: %s", name);
+    H5Pclose(prop_id);
+    H5Tclose(file_type_id);
+
+    /* Write unit conversion factors for this data set */
+    char buffer[FIELD_BUFFER_SIZE] = {0};
+    units_cgs_conversion_string(buffer, snapshot_units, units,
+                                scale_factor_exponent);
+    float baseUnitsExp[5];
+    units_get_base_unit_exponents_array(baseUnitsExp, units);
+    io_write_attribute_f(dataset_id, "U_M exponent", baseUnitsExp[UNIT_MASS]);
+    io_write_attribute_f(dataset_id, "U_L exponent", baseUnitsExp[UNIT_LENGTH]);
+    io_write_attribute_f(dataset_id, "U_t exponent", baseUnitsExp[UNIT_TIME]);
+    io_write_attribute_f(dataset_id, "U_I exponent",
+                         baseUnitsExp[UNIT_CURRENT]);
+    io_write_attribute_f(dataset_id, "U_T exponent",
+                         baseUnitsExp[UNIT_TEMPERATURE]);
+    io_write_attribute_f(dataset_id, "h-scale exponent", 0.f);
+    io_write_attribute_f(dataset_id, "a-scale exponent", scale_factor_exponent);
+    io_write_attribute_s(dataset_id, "Expression for physical CGS units",
+                         buffer);
+
+    /* Write the actual number this conversion factor corresponds to */
+    const double factor = units_cgs_conversion_factor(snapshot_units, units);
+    io_write_attribute_d(
+        dataset_id,
+        "Conversion factor to CGS (not including cosmological corrections)",
+        factor);
+
+    /* Note that we can't write the conversion factor including cosmological
+       corrections as an attribute because it will be different for each
+       particle. */
+
+  } else {
+
+    /* We're appending to an existing dataset */
+    dataset_id = H5Dopen(loc_id, name, H5P_DEFAULT);
+    if (dataset_id < 0) error("Failed to open existing dataset: %s", name);
+    if (H5Dset_extent(dataset_id, full_dims) < 0)
+      error("Unable to extend dataset: %s", name);
+    file_space_id = H5Dget_space(dataset_id);
+  }
+
+  /* Create memory dataspace */
+  hid_t mem_space_id = H5Screate_simple(rank, dims, NULL);
+
+  /* Select region to write in the file */
+  if (H5Sselect_hyperslab(file_space_id, H5S_SELECT_SET, offset, NULL, dims,
+                          NULL) < 0)
+    error("Failed to select region in dataset: %s", name);
+
+  /* Write the data */
+  if (H5Dwrite(dataset_id, mem_type_id, mem_space_id, file_space_id,
+               H5P_DEFAULT, data) < 0)
+    error("Failed to write dataset: %s", name);
+
+  /* Clean up*/
+  H5Sclose(file_space_id);
+  H5Sclose(mem_space_id);
+  H5Dclose(dataset_id);
+}
+
+hid_t init_write(struct lightcone_props *props, hid_t file_id, int ptype,
+                 size_t *num_written, size_t *num_to_write) {
+
+  /* Number of particles already written to the file */
+  *num_written = props->num_particles_written_to_file[ptype];
+
+  /* Number of buffered particles */
+  *num_to_write = particle_buffer_num_elements(&props->buffer[ptype]);
+
+  /* Create or open the HDF5 group for this particle type */
+  const char *name = part_type_names[ptype];
+  hid_t group_id;
+  if (*num_written > 0) {
+    group_id = H5Gopen(file_id, name, H5P_DEFAULT);
+    if (group_id < 0) error("Failed to open existing group: %s", name);
+  } else {
+    group_id = H5Gcreate(file_id, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    if (group_id < 0) error("Failed to create new group: %s", name);
+  }
+  return group_id;
+}
+
+/**
+ * @brief Append buffered particles to the output file.
+ */
+void lightcone_write_particles(struct lightcone_props *props,
+                               const struct unit_system *internal_units,
+                               const struct unit_system *snapshot_units,
+                               int ptype, hid_t file_id) {
+
+  if (props->particle_fields[ptype].num_fields > 0) {
+
+    /* Open group and get number and offset of particles to write */
+    size_t num_written, num_to_write;
+    hid_t group_id =
+        init_write(props, file_id, ptype, &num_written, &num_to_write);
+
+    /* Get size of the data struct for this type */
+    const size_t data_struct_size = lightcone_io_struct_size(ptype);
+
+    /* Loop over output fields */
+    struct lightcone_io_field *f = props->particle_fields[ptype].first;
+    while (f) {
+
+      /* Find output field info */
+      hid_t dtype_id = io_hdf5_type(f->type);     /* HDF5 data type */
+      size_t type_size = io_sizeof_type(f->type); /* Bytes per value */
+      const size_t field_size =
+          f->dimension * type_size; /* Bytes per particle */
+      const enum lossy_compression_schemes compression_scheme =
+          f->compression; /* Compression scheme */
+
+      /* Find unit conversion factor for this quantity */
+      const double conversion_factor =
+          units_conversion_factor(internal_units, snapshot_units, f->units);
+
+      /* Allocate output buffer */
+      char *outbuf = malloc(num_to_write * field_size);
+      if (!outbuf) error("Unable to allocate lightcone output buffer");
+      char *outptr = outbuf;
+
+      /* Loop over blocks of buffered particles and copy to output array */
+      size_t num_elements;
+      struct particle_buffer_block *block = NULL;
+      char *block_data;
+      do {
+        particle_buffer_iterate(&props->buffer[ptype], &block, &num_elements,
+                                (void **)&block_data);
+        for (size_t i = 0; i < num_elements; i += 1) {
+          char *src = block_data + i * data_struct_size + f->offset;
+          char *dest = outptr;
+          memcpy(dest, src, field_size);
+          outptr += field_size;
+        }
+      } while (block);
+
+      /* Convert units if necessary */
+      if (conversion_factor != 1.0) {
+        const size_t nr_values = num_to_write * f->dimension;
+        switch (f->type) {
+          case INT: {
+            int *values = (int *)outbuf;
+            for (size_t i = 0; i < nr_values; i += 1)
+              values[i] *= conversion_factor;
+          } break;
+          case LONGLONG: {
+            long long *values = (long long *)outbuf;
+            for (size_t i = 0; i < nr_values; i += 1)
+              values[i] *= conversion_factor;
+          } break;
+          case FLOAT: {
+            float *values = (float *)outbuf;
+            for (size_t i = 0; i < nr_values; i += 1)
+              values[i] *= conversion_factor;
+          } break;
+          case DOUBLE: {
+            double *values = (double *)outbuf;
+            for (size_t i = 0; i < nr_values; i += 1)
+              values[i] *= conversion_factor;
+          } break;
+          default:
+            error("Unhandled data type");
+        }
+      }
+
+      /* Write the data */
+      const hsize_t chunk_size = props->hdf5_chunk_size;
+      hsize_t dims[] = {(hsize_t)num_to_write, (hsize_t)f->dimension};
+      int rank = 1;
+      if (f->dimension > 1) rank = 2;
+      append_dataset(snapshot_units, f->units, f->scale_factor_exponent,
+                     group_id, f->name, dtype_id, chunk_size,
+                     props->particles_lossy_compression, compression_scheme,
+                     props->particles_gzip_level, rank, dims, num_written,
+                     outbuf);
+
+      /* Free the output buffer */
+      free(outbuf);
+
+      /* Advance to next output field */
+      f = f->next;
+    }
+
+    /* If all fields are done, we can close the particle type group */
+    H5Gclose(group_id);
+  }
+}
diff --git a/src/lightcone/lightcone_particle_io.h b/src/lightcone/lightcone_particle_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..ce21a279632e3a0243bcac9a3f513af87dd6e538
--- /dev/null
+++ b/src/lightcone/lightcone_particle_io.h
@@ -0,0 +1,278 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_PARTICLE_IO_H
+#define SWIFT_LIGHTCONE_PARTICLE_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <hdf5.h>
+#include <string.h>
+
+/* Local headers. */
+#include "chemistry.h"
+#include "common_io.h"
+#include "error.h"
+#include "io_compression.h"
+#include "part_type.h"
+#include "stars.h"
+#include "units.h"
+
+/* Forward declarations */
+struct gpart;
+struct part;
+struct xpart;
+struct spart;
+struct bpart;
+struct lightcone_props;
+struct engine;
+
+/*
+ * Struct to describe an output field in the lightcone
+ */
+struct lightcone_io_field {
+
+  /* Name */
+  char name[FIELD_BUFFER_SIZE];
+
+  /* Type of the field */
+  enum IO_DATA_TYPE type;
+
+  /* Dimension (1D, 3D, ...) */
+  int dimension;
+
+  /* Offset to this field in the data struct  */
+  size_t offset;
+
+  /* Units of this quantity */
+  enum unit_conversion_factor units;
+
+  /* Scale-factor exponent to apply for unit conversion to physical */
+  float scale_factor_exponent;
+
+  /* Lossy compression to use for this field */
+  enum lossy_compression_schemes compression;
+
+  /* Pointer to the next field */
+  struct lightcone_io_field *next;
+};
+
+/*
+ * Struct to store a linked list of lightcone_io_props
+ */
+struct lightcone_io_field_list {
+
+  /* Pointer to the first field */
+  struct lightcone_io_field *first;
+
+  /* Pointer to the last field */
+  struct lightcone_io_field *last;
+
+  /* Number of fields */
+  int num_fields;
+};
+
+/**
+ * @brief Gas particle data for lightcone output
+ */
+struct lightcone_gas_data {
+  long long id;
+  double x[3];
+  float vel[3];
+  float mass;
+  float a;
+  float h;
+  float rho;
+  float temperature;
+#ifdef CHEMISTRY_EAGLE
+  float smoothed_metal_mass_fraction[chemistry_element_count];
+  float metal_mass_fraction_total;
+  float smoothed_metal_mass_fraction_total;
+#endif
+#ifdef COOLING_COLIBRE
+  double electron_density;
+  double ycompton;
+#endif
+#ifdef WITH_FOF
+  long long group_id;
+#endif
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  float last_AGN_injection_scale_factor;
+#endif
+#ifdef STAR_FORMATION_EAGLE
+  float sfr;
+#endif
+};
+
+int lightcone_store_gas(const struct engine *e, struct lightcone_props *props,
+                        const struct gpart *gp, const struct part *p,
+                        const struct xpart *xp, const double a_cross,
+                        const double x_cross[3],
+                        struct lightcone_gas_data *data);
+
+/**
+ * @brief Dark matter particle data for lightcone output
+ */
+struct lightcone_dark_matter_data {
+  long long id;
+  double x[3];
+  float vel[3];
+  float mass;
+  float a;
+};
+
+int lightcone_store_dark_matter(const struct engine *e,
+                                struct lightcone_props *props,
+                                const struct gpart *gp, const double a_cross,
+                                const double x_cross[3],
+                                struct lightcone_dark_matter_data *data);
+
+/**
+ * @brief Star particle data for lightcone output
+ */
+struct lightcone_stars_data {
+  long long id;
+  double x[3];
+  float vel[3];
+  float mass;
+  float a;
+#ifdef WITH_FOF
+  long long group_id;
+#endif
+#ifdef STARS_EAGLE
+  float mass_init;
+  float birth_scale_factor;
+  float birth_density;
+  float luminosities[luminosity_bands_count];
+#endif
+#ifdef CHEMISTRY_EAGLE
+  float smoothed_metal_mass_fraction[chemistry_element_count];
+  float metal_mass_fraction_total;
+  float smoothed_metal_mass_fraction_total;
+#endif
+#if defined(TRACERS_EAGLE) || defined(TRACERS_FLAMINGO)
+  float last_AGN_injection_scale_factor;
+#endif
+};
+
+int lightcone_store_stars(const struct engine *e, struct lightcone_props *props,
+                          const struct gpart *gp, const struct spart *sp,
+                          const double a_cross, const double x_cross[3],
+                          struct lightcone_stars_data *data);
+
+/**
+ * @brief Black hole particle data for lightcone output
+ */
+struct lightcone_black_hole_data {
+  long long id;
+  double x[3];
+  float vel[3];
+  float mass;
+  float a;
+#ifdef BLACK_HOLES_EAGLE
+  float subgrid_mass;
+  float formation_scale_factor;
+  float accretion_rate;
+  float total_accreted_mass;
+  float last_minor_merger_scale_factor;
+  float last_major_merger_scale_factor;
+  int number_of_mergers;
+  float last_AGN_event_scale_factor;
+  int AGN_number_of_AGN_events;
+  int AGN_number_of_energy_injections;
+  float last_high_Eddington_fraction_scale_factor;
+  int cumulative_number_seeds;
+#ifdef WITH_FOF
+  long long group_id;
+#endif
+#endif
+};
+
+int lightcone_store_black_hole(const struct engine *e,
+                               struct lightcone_props *props,
+                               const struct gpart *gp, const struct bpart *bp,
+                               const double a_cross, const double x_cross[3],
+                               struct lightcone_black_hole_data *data);
+
+/**
+ * @brief Neutrino particle data for lightcone output
+ */
+struct lightcone_neutrino_data {
+  long long id;
+  double x[3];
+  float vel[3];
+  float mass;
+  float a;
+};
+
+int lightcone_store_neutrino(const struct engine *e,
+                             struct lightcone_props *props,
+                             const struct gpart *gp, const double a_cross,
+                             const double x_cross[3],
+                             struct lightcone_neutrino_data *data);
+
+void lightcone_write_particles(struct lightcone_props *props,
+                               const struct unit_system *internal_units,
+                               const struct unit_system *snapshot_units,
+                               int ptype, hid_t file_id);
+
+inline static size_t lightcone_io_struct_size(int ptype) {
+  switch (ptype) {
+    case swift_type_dark_matter:
+    case swift_type_dark_matter_background:
+      return sizeof(struct lightcone_dark_matter_data);
+    case swift_type_gas:
+      return sizeof(struct lightcone_gas_data);
+    case swift_type_stars:
+      return sizeof(struct lightcone_stars_data);
+    case swift_type_black_hole:
+      return sizeof(struct lightcone_black_hole_data);
+    case swift_type_neutrino:
+      return sizeof(struct lightcone_neutrino_data);
+    default:
+      error("Unhandled particle type");
+      return 0;
+  }
+}
+
+void lightcone_io_field_list_init(struct lightcone_io_field_list *list);
+void lightcone_io_field_list_clean(struct lightcone_io_field_list *list);
+void lightcone_io_field_list_append(struct lightcone_io_field_list *list,
+                                    char *name, enum IO_DATA_TYPE type,
+                                    int dimension, size_t offset,
+                                    enum unit_conversion_factor units,
+                                    float scale_factor_exponent,
+                                    char *compression);
+
+void lightcone_io_append_gas_output_fields(
+    struct lightcone_io_field_list *list);
+void lightcone_io_append_dark_matter_output_fields(
+    struct lightcone_io_field_list *list);
+void lightcone_io_append_dark_matter_background_output_fields(
+    struct lightcone_io_field_list *list);
+void lightcone_io_append_stars_output_fields(
+    struct lightcone_io_field_list *list);
+void lightcone_io_append_black_hole_output_fields(
+    struct lightcone_io_field_list *list);
+void lightcone_io_append_neutrino_output_fields(
+    struct lightcone_io_field_list *list);
+
+#endif
diff --git a/src/lightcone/lightcone_replications.c b/src/lightcone/lightcone_replications.c
new file mode 100644
index 0000000000000000000000000000000000000000..f65044814c71fb72298bf3da83404f8ac9c8ef09
--- /dev/null
+++ b/src/lightcone/lightcone_replications.c
@@ -0,0 +1,286 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include "lightcone/lightcone_replications.h"
+
+#include "align.h"
+#include "cell.h"
+#include "error.h"
+#include "memuse.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+/**
+ * @brief Comparison function for sorting replications
+ *
+ * a The first replication
+ * b The second replication
+ *
+ */
+static int compare_replication_rmin(const void *a, const void *b) {
+  const struct replication *rep_a = (struct replication *)a;
+  const struct replication *rep_b = (struct replication *)b;
+  if (rep_a->rmin2 < rep_b->rmin2)
+    return -1;
+  else if (rep_a->rmin2 > rep_b->rmin2)
+    return 1;
+  else
+    return 0;
+}
+
+/**
+ * @brief Make a list of periodic box replications which overlap
+ *        the specified distance range from an observer.
+ *
+ * @param boxsize Size of the cubic simulation box.
+ * @param observer_position Location of the observer.
+ * @param lightcone_rmin Minimum distance from the observer.
+ * @param lightcone_rmax Maximum distance from the observer.
+ *        of particles.
+ * @param replication_list Pointer to the struct to initialise.
+ */
+
+void replication_list_init(struct replication_list *replication_list,
+                           double boxsize, double cell_width,
+                           double observer_position[3], double lightcone_rmin,
+                           double lightcone_rmax) {
+
+  /* Find range of replications to examine in each dimension */
+  int rep_min[3];
+  int rep_max[3];
+  for (int i = 0; i < 3; i += 1) {
+    rep_min[i] = (int)floor(
+        (observer_position[i] - lightcone_rmax - 0.5 * cell_width) / boxsize);
+    rep_max[i] = (int)floor(
+        (observer_position[i] + lightcone_rmax + 0.5 * cell_width) / boxsize);
+  }
+
+  /* On first pass just count replications */
+  for (int ipass = 0; ipass < 2; ipass += 1) {
+
+    replication_list->nrep = 0;
+
+    /* Loop over periodic replications */
+    for (int i = rep_min[0]; i <= rep_max[0]; i += 1) {
+      for (int j = rep_min[1]; j <= rep_max[1]; j += 1) {
+        for (int k = rep_min[2]; k <= rep_max[2]; k += 1) {
+
+          /* Find centre of this replication relative to observer */
+          double cx = boxsize * i + 0.5 * boxsize - observer_position[0];
+          double cy = boxsize * j + 0.5 * boxsize - observer_position[1];
+          double cz = boxsize * k + 0.5 * boxsize - observer_position[2];
+
+          /* Find distance to closest possible particle in this replication  */
+          double dx, dy, dz;
+          dx = fabs(cx) - 0.5 * boxsize - 0.5 * cell_width;
+          if (dx < 0) dx = 0;
+          dy = fabs(cy) - 0.5 * boxsize - 0.5 * cell_width;
+          if (dy < 0) dy = 0;
+          dz = fabs(cz) - 0.5 * boxsize - 0.5 * cell_width;
+          if (dz < 0) dz = 0;
+          double rep_rmin = sqrt(dx * dx + dy * dy + dz * dz);
+
+          /* Find distance to most distant possible particle in this replication
+           */
+          dx = fabs(cx) + 0.5 * boxsize + 0.5 * cell_width;
+          dy = fabs(cy) + 0.5 * boxsize + 0.5 * cell_width;
+          dz = fabs(cz) + 0.5 * boxsize + 0.5 * cell_width;
+          double rep_rmax = sqrt(dx * dx + dy * dy + dz * dz);
+
+          /* Flag if any point in this replication could be in the lightcone */
+          int in_lightcone = 1;
+
+          /* Check distance limits */
+          if (rep_rmax < lightcone_rmin || rep_rmin > lightcone_rmax)
+            in_lightcone = 0;
+
+          if (in_lightcone) {
+            /* Store replications on second pass */
+            if (ipass == 1) {
+              /* Get a pointer to the next replication */
+              const int nrep = replication_list->nrep;
+              struct replication *rep = replication_list->replication + nrep;
+              /* Store info about this replication */
+              rep->rmin2 = pow(rep_rmin, 2.0);
+              rep->rmax2 = pow(rep_rmax, 2.0);
+              rep->coord[0] = i * boxsize;
+              rep->coord[1] = j * boxsize;
+              rep->coord[2] = k * boxsize;
+            }
+            replication_list->nrep += 1;
+          }
+        } /* Next replication in z */
+      }   /* Next replication in y */
+    }     /* Next replication in x */
+
+    /* Allocate storage after first pass */
+    if (ipass == 0) {
+      const int nrep = replication_list->nrep;
+      if (swift_memalign(
+              "lightcone_replications", (void **)&replication_list->replication,
+              SWIFT_STRUCT_ALIGNMENT, sizeof(struct replication) * nrep) != 0) {
+        error("Failed to allocate lightcone replication list");
+      }
+    }
+  } /* Next pass */
+
+  /* Now sort replications by minimum distance */
+  qsort(replication_list->replication, (size_t)replication_list->nrep,
+        sizeof(struct replication), compare_replication_rmin);
+
+  /* Record the distance limits we used - may need these to refine the list
+   * later */
+  replication_list->lightcone_rmin = lightcone_rmin;
+  replication_list->lightcone_rmax = lightcone_rmax;
+}
+
+/**
+ * @brief Make an empty replication list
+ *
+ * @param replication_list Pointer to the struct to initialise.
+ */
+void replication_list_init_empty(struct replication_list *replication_list) {
+
+  const int nrep = 0;
+  if (swift_memalign(
+          "lightcone_replications", (void **)&replication_list->replication,
+          SWIFT_STRUCT_ALIGNMENT, sizeof(struct replication) * nrep) != 0) {
+    error("Failed to allocate lightcone replication list");
+  }
+  replication_list->lightcone_rmin = 0.0;
+  replication_list->lightcone_rmax = 0.0;
+  replication_list->nrep = 0;
+}
+
+/**
+ * @brief Deallocate a replication list
+ *
+ * @param replication_list Pointer to the struct to deallocate.
+ */
+void replication_list_clean(struct replication_list *replication_list) {
+  swift_free("lightcone_replications", replication_list->replication);
+  replication_list->replication = NULL;
+  replication_list->nrep = 0;
+}
+
+/**
+ * @brief Write a replication list to a file as text
+ *
+ * @param replication_list The replication list
+ * @param fd The file to write to
+ */
+
+void replication_list_write(struct replication_list *replication_list,
+                            FILE *fd) {
+
+  for (int i = 0; i < replication_list->nrep; i += 1) {
+    fprintf(fd, "%e, %e, %e, %e, %e\n",
+            replication_list->replication[i].coord[0],
+            replication_list->replication[i].coord[1],
+            replication_list->replication[i].coord[2],
+            sqrt(replication_list->replication[i].rmin2),
+            sqrt(replication_list->replication[i].rmax2));
+  }
+}
+
+/**
+ * Determine subset of replications which overlap a #cell
+ *
+ * @param rep_in The input replication list
+ * @param cell The input cell
+ * @param rep_out The output replication list
+ *
+ * Initializes rep_out, which must then be freed with
+ * replication_list_clean().
+ *
+ */
+void replication_list_subset_for_cell(const struct replication_list *rep_in,
+                                      const struct cell *cell,
+                                      const double observer_position[3],
+                                      struct replication_list *rep_out) {
+
+  /* Find centre coordinates of this cell */
+  const double cell_centre[] = {cell->loc[0] + 0.5 * cell->width[0],
+                                cell->loc[1] + 0.5 * cell->width[1],
+                                cell->loc[2] + 0.5 * cell->width[2]};
+
+  /* Find 'effective' width of this cell - particles can wander out of the cell
+   * by up to half a cell width */
+  const double cell_eff_width[] = {2.0 * cell->width[0], 2.0 * cell->width[1],
+                                   2.0 * cell->width[2]};
+
+  /* Allocate array of replications for the new list */
+  const int nrep_max = rep_in->nrep;
+  if (swift_memalign("lightcone_replications", (void **)&rep_out->replication,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     sizeof(struct replication) * nrep_max) != 0) {
+    error("Failed to allocate pruned lightcone replication list");
+  }
+
+  /* Get distance limits (squared) used to make the input list */
+  const double lightcone_rmin2 = pow(rep_in->lightcone_rmin, 2.0);
+  const double lightcone_rmax2 = pow(rep_in->lightcone_rmax, 2.0);
+
+  /* Loop over all replications */
+  rep_out->nrep = 0;
+  for (int i = 0; i < nrep_max; i += 1) {
+
+    /* Get a pointer to this input replication */
+    const struct replication *rep = rep_in->replication + i;
+
+    /* Find coordinates of centre of this replication of the cell relative to
+     * the observer */
+    double cell_rep_centre[3];
+    for (int j = 0; j < 3; j += 1) {
+      cell_rep_centre[j] =
+          rep->coord[j] + cell_centre[j] - observer_position[j];
+    }
+
+    /* Compute minimum possible distance squared from observer to this
+     * replication of this cell */
+    double cell_rmin2 = 0.0;
+    for (int j = 0; j < 3; j += 1) {
+      double dx = fabs(cell_rep_centre[j]) - 0.5 * cell_eff_width[j];
+      if (dx < 0.0) dx = 0.0;
+      cell_rmin2 += dx * dx;
+    }
+
+    /* Compute maximum possible distance squared from observer to this
+     * replication of this cell */
+    double cell_rmax2 = 0.0;
+    for (int j = 0; j < 3; j += 1) {
+      double dx = fabs(cell_rep_centre[j]) + 0.5 * cell_eff_width[j];
+      cell_rmax2 += dx * dx;
+    }
+
+    /* Decide whether this cell could contribute to this replication */
+    if (cell_rmax2 >= lightcone_rmin2 && cell_rmin2 <= lightcone_rmax2) {
+      memcpy(rep_out->replication + rep_out->nrep, rep,
+             sizeof(struct replication));
+      rep_out->nrep += 1;
+    }
+    /* Next input replication */
+  }
+
+  /* Not used currently */
+  rep_out->lightcone_rmin = rep_in->lightcone_rmin;
+  rep_out->lightcone_rmax = rep_in->lightcone_rmax;
+}
diff --git a/src/lightcone/lightcone_replications.h b/src/lightcone/lightcone_replications.h
new file mode 100644
index 0000000000000000000000000000000000000000..e65cbb47e6597d86df79c6f06414978a6ec5eedb
--- /dev/null
+++ b/src/lightcone/lightcone_replications.h
@@ -0,0 +1,75 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include <stdio.h>
+
+#ifndef SWIFT_PERIODIC_REPLICATIONS_H
+#define SWIFT_PERIODIC_REPLICATIONS_H
+
+/* Forward declarations */
+struct cell;
+
+/* Struct to store information about one periodic replication of the simulation
+ * box */
+struct replication {
+
+  /* Minimum distance squared from the observer to any point in the replication
+   */
+  double rmin2;
+
+  /* Maximum distance squared from the observer to any point in the replication
+   */
+  double rmax2;
+
+  /* Coordinates of the replication */
+  double coord[3];
+};
+
+/* Struct to store an array of periodic replications  */
+struct replication_list {
+
+  /* Number of replications*/
+  int nrep;
+
+  /* Distance limits used to make this replication list */
+  double lightcone_rmin;
+  double lightcone_rmax;
+
+  /* Array of replications with nrep elements */
+  struct replication *replication;
+};
+
+void replication_list_init(struct replication_list *replication_list,
+                           double boxsize, double cell_width,
+                           double observer_position[3], double lightcone_rmin,
+                           double lightcone_rmax);
+
+void replication_list_init_empty(struct replication_list *replication_list);
+
+void replication_list_clean(struct replication_list *replication_list);
+
+void replication_list_write(struct replication_list *replication_list,
+                            FILE *fd);
+
+void replication_list_subset_for_cell(const struct replication_list *rep_in,
+                                      const struct cell *cell,
+                                      const double observer_position[3],
+                                      struct replication_list *rep_out);
+
+#endif
diff --git a/src/lightcone/lightcone_shell.c b/src/lightcone/lightcone_shell.c
new file mode 100644
index 0000000000000000000000000000000000000000..969334f8960045513e14d9b1d8a45f1301ba0152
--- /dev/null
+++ b/src/lightcone/lightcone_shell.c
@@ -0,0 +1,1037 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <stdio.h>
+#include <stdlib.h>
+
+/* HEALPix C API */
+#ifdef HAVE_CHEALPIX
+#include <chealpix.h>
+#endif
+
+/* Local headers */
+#include "cosmology.h"
+#include "engine.h"
+#include "exchange_structs.h"
+#include "lightcone/healpix_util.h"
+#include "hydro.h"
+
+/* This object's header. */
+#include "lightcone/lightcone_shell.h"
+
+/**
+ * @brief Read in shell radii for lightcone healpix maps
+ *
+ * Allocates the output array, shell_out.
+ *
+ * @param cosmo the #cosmology structure
+ * @param radius_file name of the file with shell radii
+ * @param nr_shells returns the number of shells
+ * @param shell_out returns the array of shells
+ */
+static void read_shell_radii(const struct cosmology *cosmo,
+                             const char *radius_file, int *nr_shells,
+                             struct lightcone_shell **shell_out) {
+
+  /* Allow shell radii to be specified in several different units */
+  enum shell_units {
+    not_known = 0,
+    comoving_distance = 1,
+    redshift = 2,
+    expansion_factor = 3
+  };
+
+  FILE *fd = fopen(radius_file, "r");
+  if (!fd) error("Failed to open lightcone radius file %s", radius_file);
+
+  /* Count number of non-zero length lines */
+  size_t len = 0;
+  char *line = NULL;
+  int nr_lines = 0;
+  while (getline(&line, &len, fd) != -1 && strlen(line) > 0) nr_lines += 1;
+  rewind(fd);
+
+  /* Allocate output array */
+  struct lightcone_shell *shell =
+      malloc(sizeof(struct lightcone_shell) * (nr_lines - 1));
+
+  /* Check header */
+  enum shell_units units = not_known;
+  if (getline(&line, &len, fd) != -1) {
+    if (strcmp(line,
+               "# Minimum comoving distance, Maximum comoving distance\n") ==
+        0) {
+      units = comoving_distance;
+    } else if (strcmp(line, "# Minimum redshift, Maximum redshift\n") == 0) {
+      units = redshift;
+    } else if (strcmp(
+                   line,
+                   "# Maximum expansion factor, Minimum expansion factor\n") ==
+               0) {
+      units = expansion_factor;
+    } else {
+      error("Unrecognized header in radius file");
+    }
+  } else {
+    error("Unable to read header in radius file");
+  }
+
+  /* Read lines */
+  for (int i = 0; i < nr_lines - 1; i += 1) {
+    if (fscanf(fd, "%le, %le\n", &shell[i].rmin, &shell[i].rmax) != 2)
+      error("Failed to read line from radius file");
+  }
+  fclose(fd);
+  *nr_shells = nr_lines - 1;
+  const int nr = *nr_shells;
+  free(line);
+
+  /* Convert units */
+  switch (units) {
+    case comoving_distance:
+      /* Input is already comoving distance */
+      break;
+    case redshift:
+      /* Convert redshift to comoving distance */
+      for (int i = 0; i < nr; i += 1) {
+        const double a_at_rmin = 1.0 / (1.0 + shell[i].rmin);
+        shell[i].rmin = cosmology_get_comoving_distance(cosmo, a_at_rmin);
+        const double a_at_rmax = 1.0 / (1.0 + shell[i].rmax);
+        shell[i].rmax = cosmology_get_comoving_distance(cosmo, a_at_rmax);
+      }
+      break;
+    case expansion_factor:
+      /* Convert expansion factor to comoving distance */
+      for (int i = 0; i < nr; i += 1) {
+        shell[i].rmin = cosmology_get_comoving_distance(cosmo, shell[i].rmin);
+        shell[i].rmax = cosmology_get_comoving_distance(cosmo, shell[i].rmax);
+      }
+      break;
+    default:
+      error("unknown unit type");
+  }
+
+  /* Do some sanity checks on the radii */
+  /* All values should be monotonically increasing */
+  for (int i = 1; i < nr; i += 1) {
+    if (shell[i].rmin <= shell[i - 1].rmin)
+      error("Minimum radii should be monotonically increasing");
+    if (shell[i].rmax <= shell[i - 1].rmax)
+      error("Maximum radii should be monotonically increasing");
+  }
+
+  /* Maximum radius should be greater than minimum */
+  for (int i = 0; i < nr; i += 1)
+    if (shell[i].rmin >= shell[i].rmax)
+      error("Maximum radius should be greater than minimum");
+
+  /* Shells should not overlap */
+  for (int i = 1; i < nr; i += 1)
+    if (shell[i].rmin < shell[i - 1].rmax) error("Shells should not overlap");
+
+  /* Return pointer to array */
+  *shell_out = shell;
+}
+
+/**
+ * @brief Creates an array of struct lightcone_shell
+ *
+ * Returns a pointer to the newly allocated array. Each shell
+ * contains one #lightcone_map for each healpix map to be produced
+ * by this lightcone.
+ *
+ * @param cosmo the #cosmology structure
+ * @param radius_file file with the shell radii
+ * @param nr_maps number of lightcone_maps per shell
+ * @param map_type specifies the types of healpix maps to make
+ * @param nside healpix resolution parameter
+ * @param total_nr_pix number of pixels in each map
+ * @param part_type specifies which particle types update which maps
+ * @param elements_per_block size of blocks used in the update buffers
+ * @param nr_shells_out returns the number of lightcone shells in the array
+ *
+ */
+struct lightcone_shell *lightcone_shell_array_init(
+    const struct cosmology *cosmo, const char *radius_file, int nr_maps,
+    struct lightcone_map_type *map_type, int nside, pixel_index_t total_nr_pix,
+    struct lightcone_particle_type *part_type, size_t elements_per_block,
+    int *nr_shells_out) {
+
+  /* Read in the shell radii */
+  int nr_shells = 0;
+  struct lightcone_shell *shell = NULL;
+  if (engine_rank == 0)
+    read_shell_radii(cosmo, radius_file, &nr_shells, &shell);
+#ifdef WITH_MPI
+  MPI_Bcast(&nr_shells, 1, MPI_INT, 0, MPI_COMM_WORLD);
+  if (engine_rank != 0)
+    shell = malloc(sizeof(struct lightcone_shell) * nr_shells);
+  MPI_Bcast(shell, sizeof(struct lightcone_shell) * nr_shells, MPI_BYTE, 0,
+            MPI_COMM_WORLD);
+#endif
+
+  /* Compute expansion factor at shell edges */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    /* Inner edge of the shell */
+    shell[shell_nr].amax = cosmology_scale_factor_at_comoving_distance(
+        cosmo, shell[shell_nr].rmin);
+    /* Outer edge of the shell */
+    shell[shell_nr].amin = cosmology_scale_factor_at_comoving_distance(
+        cosmo, shell[shell_nr].rmax);
+  }
+
+  /* Set initial state of the lightcone shells */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1)
+    shell[shell_nr].state = shell_uninitialized;
+
+  /* Allocate lightcone_map structs for each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    shell[shell_nr].nr_maps = nr_maps;
+    shell[shell_nr].map = malloc(nr_maps * sizeof(struct lightcone_map));
+  }
+
+  int comm_rank = 0, comm_size = 1;
+#ifdef WITH_MPI
+  MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+  MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+#endif
+
+  /* Determine how healpix maps will be distributed between MPI ranks */
+  const pixel_index_t pix_per_rank = total_nr_pix / comm_size;
+  if (pix_per_rank == 0) error("Must have healpix npix > number of MPI ranks!");
+  const pixel_index_t local_pix_offset = comm_rank * pix_per_rank;
+  pixel_index_t local_nr_pix;
+  if (comm_rank < comm_size - 1)
+    local_nr_pix = pix_per_rank;
+  else
+    local_nr_pix = total_nr_pix - (comm_size - 1) * pix_per_rank;
+
+  /* Store this information in the shells */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    shell[shell_nr].nside = nside;
+    shell[shell_nr].total_nr_pix = total_nr_pix;
+    shell[shell_nr].pix_per_rank = total_nr_pix / comm_size;
+    shell[shell_nr].local_nr_pix = local_nr_pix;
+    shell[shell_nr].local_pix_offset = local_pix_offset;
+  }
+
+  /* Initialize lightcone_maps for each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      lightcone_map_init(&shell[shell_nr].map[map_nr], nside, total_nr_pix,
+                         pix_per_rank, local_nr_pix, local_pix_offset,
+                         shell[shell_nr].rmin, shell[shell_nr].rmax,
+                         map_type[map_nr]);
+    }
+  }
+
+  /* Initialize data buffers for map updates - one per particle type per shell
+   */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      particle_buffer_init(&shell[shell_nr].buffer[ptype],
+                           part_type[ptype].buffer_element_size,
+                           elements_per_block, "lightcone_map_updates");
+    }
+  }
+
+  /* Return the array of shells */
+  *nr_shells_out = nr_shells;
+  return shell;
+}
+
+/**
+ * @brief Free an array of struct lightcone_shell
+ *
+ * This also cleans up the lightcone_maps in the shell and the
+ * update buffers.
+ *
+ * @param shell pointer to the array of lightcone_shells
+ * @param nr_shells number of shells in the array
+ */
+void lightcone_shell_array_free(struct lightcone_shell *shell, int nr_shells) {
+
+  /* Free the lightcone healpix maps for each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    const int nr_maps = shell[shell_nr].nr_maps;
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      lightcone_map_clean(&shell[shell_nr].map[map_nr]);
+    }
+  }
+
+  /* Free the arrays of lightcone_map structs for each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    free(shell[shell_nr].map);
+  }
+
+  /* Free the buffers associated with each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      particle_buffer_free(&shell[shell_nr].buffer[ptype]);
+    }
+  }
+
+  /* Free the array of shells */
+  free(shell);
+}
+
+/**
+ * @brief Dump the shell array to a restart file
+ *
+ * @param shell pointer to the array of lightcone_shells
+ * @param nr_shells number of shells in the array
+ * @param stream the output stream to write to
+ */
+void lightcone_shell_array_dump(const struct lightcone_shell *shell,
+                                int nr_shells, FILE *stream) {
+
+  /* Dump the array of shell structs  */
+  restart_write_blocks((void *)shell, sizeof(struct lightcone_shell), nr_shells,
+                       stream, "lightcone_shells", "lightcone_shells");
+
+  /* Dump the lightcone maps associated with each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    const int nr_maps = shell[shell_nr].nr_maps;
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      lightcone_map_struct_dump(&shell[shell_nr].map[map_nr], stream);
+    }
+  }
+}
+
+/**
+ * @brief Restore the shell array from a restart file
+ *
+ * @param stream the output stream to write to
+ * @param nr_shells number of shells in the array
+ * @param part_type specifies which particle types update which maps
+ * @param elements_per_block size of blocks used in the update buffers
+ *
+ */
+struct lightcone_shell *lightcone_shell_array_restore(
+    FILE *stream, int nr_shells, struct lightcone_particle_type *part_type,
+    size_t elements_per_block) {
+
+  /* Restore the array of lightcone_shell structs */
+  struct lightcone_shell *shell =
+      malloc(sizeof(struct lightcone_shell) * nr_shells);
+  restart_read_blocks((void *)shell, sizeof(struct lightcone_shell), nr_shells,
+                      stream, NULL, "lightcone_shells");
+
+  /* Restore the lightcone maps associated with each shell */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    const int nr_maps = shell[shell_nr].nr_maps;
+    shell[shell_nr].map = malloc(sizeof(struct lightcone_map) * nr_maps);
+    for (int map_nr = 0; map_nr < nr_maps; map_nr += 1) {
+      lightcone_map_struct_restore(&shell[shell_nr].map[map_nr], stream);
+    }
+  }
+
+  /* Initialise the map update buffers */
+  for (int shell_nr = 0; shell_nr < nr_shells; shell_nr += 1) {
+    for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+      particle_buffer_init(&shell[shell_nr].buffer[ptype],
+                           part_type[ptype].buffer_element_size,
+                           elements_per_block, "lightcone_map_updates");
+    }
+  }
+
+  return shell;
+}
+
+struct healpix_smoothing_mapper_data {
+
+  /*! MPI rank */
+  int comm_rank, comm_size;
+
+  /*! Pointer to the lightcone shell we're updating */
+  struct lightcone_shell *shell;
+
+  /*! Information about the particle type we're updating */
+  struct lightcone_particle_type *part_type;
+
+  /*! Pointer to the send buffer for communication */
+  union lightcone_map_buffer_entry *sendbuf;
+
+  /*! Pointer to the projected kernel table */
+  struct projected_kernel_table *kernel_table;
+};
+
+#ifdef HAVE_CHEALPIX
+static pixel_index_t angle_to_pixel(int nside, double theta, double phi) {
+  int64_t ipring;
+  ang2pix_ring64(nside, theta, phi, &ipring);
+  return ipring;
+}
+#endif
+
+#ifdef WITH_MPI
+
+struct buffer_block_info {
+
+  /*! Pointer to the buffer block */
+  struct particle_buffer_block *block;
+
+  /*! Number of elements from this block to go to each MPI rank */
+  size_t *count;
+
+  /*! Offsets at which to write elements in the send buffer */
+  size_t *offset;
+
+  /*! First destination rank each entry is to be sent to */
+  int *first_dest;
+
+  /*! Last destination rank each entry is to be sent to */
+  int *last_dest;
+};
+
+#ifdef HAVE_CHEALPIX
+static int pixel_to_rank(int comm_size, pixel_index_t pix_per_rank, pixel_index_t pixel) {
+  int rank = pixel / pix_per_rank;
+  if (rank >= comm_size) rank = comm_size - 1;
+  return rank;
+}
+#endif
+
+/**
+ * @brief Count elements to send to each rank from each buffer block
+ *
+ * For each buffer_block_info in the input array, this counts how
+ * many lightcone map updates are to be sent to each MPI rank.
+ * It also determines the range of MPI ranks which each update
+ * needs to be sent to. Updates must be copied to several ranks
+ * if we're smoothing the maps and the smoothing kernel overlaps parts
+ * of the healpix map which are stored on different ranks.
+ *
+ * @param map_data Pointer to an array of buffer_block_info
+ * @param num_elements Number of elements buffer_block_info array
+ * @param extra_data Pointer to healpix_smoothing_mapper_data struct
+ *
+ */
+static void count_elements_to_send_mapper(void *map_data, int num_elements,
+                                          void *extra_data) {
+#ifdef HAVE_CHEALPIX
+
+  /* Unpack information about the array of blocks to process */
+  struct buffer_block_info *block_info = (struct buffer_block_info *)map_data;
+
+  /* Unpack extra input parameters we need */
+  struct healpix_smoothing_mapper_data *mapper_data =
+      (struct healpix_smoothing_mapper_data *)extra_data;
+  struct lightcone_particle_type *part_type = mapper_data->part_type;
+  struct lightcone_shell *shell = mapper_data->shell;
+
+  /* Number of healpix maps we're updating */
+  const int nr_maps = part_type->nr_maps;
+
+  /* Number of MPI ranks we have */
+  const int comm_size = mapper_data->comm_size;
+
+  /* Maximum radius of a HEALPix pixel */
+  const double max_pixrad = healpix_max_pixrad(shell->nside);
+
+  /* Loop over buffer blocks to process */
+  for (int block_nr = 0; block_nr < num_elements; block_nr += 1) {
+
+    /* Find the count and offset for this block */
+    size_t *count = block_info[block_nr].count;
+    size_t *offset = block_info[block_nr].offset;
+    int *first_dest = block_info[block_nr].first_dest;
+    int *last_dest = block_info[block_nr].last_dest;
+
+    /* Get a pointer to the block itself */
+    struct particle_buffer_block *block = block_info[block_nr].block;
+
+    /* Initialise count and offset into the send buffer for this block */
+    for (int i = 0; i < comm_size; i += 1) {
+      count[i] = 0;
+      offset[i] = 0;
+    }
+
+    /* Loop over lightcone map contributions in this block */
+    union lightcone_map_buffer_entry *update_data =
+        (union lightcone_map_buffer_entry *)block->data;
+    for (size_t i = 0; i < block->num_elements; i += 1) {
+
+      /* Find the particle angular coordinates and size for this update */
+      size_t index = i * (3 + nr_maps);
+      const double theta = int_to_angle(update_data[index + 0].i);
+      const double phi = int_to_angle(update_data[index + 1].i);
+      const double radius = update_data[index + 2].f;
+
+      /* Determine which MPI ranks this contribution needs to go to */
+      pixel_index_t first_pixel, last_pixel;
+        
+      /* Compute search radius: the angle at which the kernel function reaches zero */
+      const double search_radius = radius * kernel_gamma;
+      if(search_radius < max_pixrad) {
+          
+        /* If the radius is small, we'll just assign the contribution to one pixel */
+        first_pixel = last_pixel = angle_to_pixel(shell->nside, theta, phi);
+        
+      } else {
+
+        /* If the radius is large we will update a range of pixels */
+        double vec[3];
+        ang2vec(theta, phi, vec);
+        pixel_index_t pix_min, pix_max;
+        healpix_query_disc_range(shell->nside, vec, search_radius,
+                                 &pix_min, &pix_max, NULL, NULL);
+        first_pixel = pix_min;
+        last_pixel  = pix_max;
+      }
+      
+      first_dest[i] = pixel_to_rank(comm_size, shell->pix_per_rank, first_pixel);
+      last_dest[i] = pixel_to_rank(comm_size, shell->pix_per_rank, last_pixel);
+
+      /* Update the counts for this block */
+      for (int dest = first_dest[i]; dest <= last_dest[i]; dest += 1)
+        count[dest] += 1;
+    }
+
+    /* Next block */
+  }
+#else
+  error("Need HEALPix C API for lightcone maps");
+#endif
+}
+
+/**
+ * @brief Store elements to send to each MPI rank from each buffer block
+ *
+ * This stores the updates to be sent to MPI ranks in order of which
+ * rank they need to be sent to. It also duplicates updates which need
+ * to go to multiple ranks.
+ *
+ * @param map_data Pointer to an array of buffer_block_info
+ * @param num_elements Number of elements buffer_block_info array
+ * @param extra_data Pointer to healpix_smoothing_mapper_data struct
+ *
+ */
+static void store_elements_to_send_mapper(void *map_data, int num_elements,
+                                          void *extra_data) {
+
+  /* Unpack input data */
+  struct buffer_block_info *block_info = (struct buffer_block_info *)map_data;
+  struct healpix_smoothing_mapper_data *mapper_data =
+      (struct healpix_smoothing_mapper_data *)extra_data;
+  struct lightcone_particle_type *part_type = mapper_data->part_type;
+
+  /* Find the send buffer where we will place the updates from this block */
+  union lightcone_map_buffer_entry *sendbuf = mapper_data->sendbuf;
+
+  /* Find how many elements we have per update */
+  const int nr_elements_per_update = 3 + part_type->nr_maps;
+
+  /* Loop over blocks to process on this call */
+  for (int block_nr = 0; block_nr < num_elements; block_nr += 1) {
+
+    /* Find the offset into the send buffer where we will place the
+       the first element from this block to go to each MPI rank.
+       Offset is in units of number of updates. */
+    size_t *offset = block_info[block_nr].offset;
+
+    /* Find range of MPI ranks to send each element in this block to */
+    int *first_dest_rank = block_info[block_nr].first_dest;
+    int *last_dest_rank = block_info[block_nr].last_dest;
+
+    /* Get a pointer to the block itself */
+    struct particle_buffer_block *block = block_info[block_nr].block;
+
+    /* Loop over lightcone map updates in this block */
+    union lightcone_map_buffer_entry *update_data =
+        (union lightcone_map_buffer_entry *)block->data;
+    for (size_t i = 0; i < block->num_elements; i += 1) {
+
+      /* Find the data to send for this update */
+      union lightcone_map_buffer_entry *block_data =
+          &update_data[i * nr_elements_per_update];
+
+      /* Store this contribution to the send buffer (possibly multiple times) */
+      for (int rank = first_dest_rank[i]; rank <= last_dest_rank[i];
+           rank += 1) {
+
+        /* Find where in the send buffer to write the update */
+        union lightcone_map_buffer_entry *dest =
+            sendbuf + (offset[rank] * nr_elements_per_update);
+
+        /* Copy the update to the send buffer */
+        memcpy(
+            dest, block_data,
+            sizeof(union lightcone_map_buffer_entry) * nr_elements_per_update);
+        offset[rank] += 1;
+      }
+
+      /* Next element in this block */
+    }
+    /* Next block */
+  }
+}
+#endif
+
+/**
+ * @brief Mapper function for updating the healpix map
+ *
+ * map_data is a pointer to an array of doubles. If there are
+ * N lightcone maps to update and M updates to apply then the array
+ * contains (3+N)*M doubles. Each group of 3+N doubles consists of
+ * (theta, phi, radius, value1, value2, ...) where theta and phi
+ * are angular coordinates of the particle, radius is the angular
+ * smoothing length and the values are the quantities to add to the
+ * healpix maps.
+ *
+ * @param map_data Pointer to an array of doubles
+ * @param num_elements Number of elements in map_data
+ * @param extra_data Pointer to healpix_smoothing_mapper_data struct
+ *
+ */
+void healpix_smoothing_mapper(void *map_data, int num_elements,
+                              void *extra_data) {
+
+#ifdef HAVE_CHEALPIX
+
+  /* Unpack pointers to the lightcone shell and particle_type structs */
+  struct healpix_smoothing_mapper_data *mapper_data =
+      (struct healpix_smoothing_mapper_data *)extra_data;
+  struct lightcone_shell *shell = mapper_data->shell;
+  struct lightcone_particle_type *part_type = mapper_data->part_type;
+  struct projected_kernel_table *kernel_table = mapper_data->kernel_table;
+
+  /* Get maximum radius of any pixel in the map */
+  const double max_pixrad = healpix_max_pixrad(shell->nside);
+ 
+  /* Find the array of updates to apply to the healpix maps */
+  union lightcone_map_buffer_entry *update_data =
+      (union lightcone_map_buffer_entry *)map_data;
+
+  /* Find range of pixel indexes stored locally. Here we assume all maps
+     have the same number of pixels and distribution between MPI ranks */
+  if (shell->nr_maps < 1)
+    error("called on lightcone_shell which contributes to no maps");
+  pixel_index_t local_pix_offset = shell->map[0].local_pix_offset;
+  pixel_index_t local_nr_pix = shell->map[0].local_nr_pix;
+
+  /* Loop over updates to apply */
+  for (int i = 0; i < num_elements; i += 1) {
+
+    /* Find the data for this update */
+    size_t index = i * (3 + part_type->nr_maps);
+    const double theta = int_to_angle(update_data[index + 0].i);
+    const double phi = int_to_angle(update_data[index + 1].i);
+    const double radius = update_data[index + 2].f;
+    const union lightcone_map_buffer_entry *value = &update_data[index + 3];
+
+    if (radius < max_pixrad) {
+
+      /*
+        Small particles are added to the maps directly regardless of
+        whether the map is smoothed. Find the pixel index.
+      */
+      pixel_index_t global_pix = angle_to_pixel(shell->nside, theta, phi);
+
+      /* Check the pixel is stored on this MPI rank */
+      if ((global_pix >= local_pix_offset) &&
+          (global_pix < local_pix_offset + local_nr_pix)) {
+
+        /* Find local index of the pixel to update */
+        const pixel_index_t local_pix = global_pix - local_pix_offset;
+
+        /* Add this particle to all healpix maps */
+        for (int j = 0; j < part_type->nr_maps; j += 1) {
+          const int map_index = part_type->map_index[j];
+          const double buffered_value = value[j].f;
+          const double fac_inv = shell->map[map_index].buffer_scale_factor_inv;
+          const double value_to_add = buffered_value * fac_inv;
+          atomic_add_d(&shell->map[map_index].data[local_pix], value_to_add);
+        }
+      }
+
+    } else {
+
+      /*
+         Large particles are SPH smoothed onto smoothed maps and just added
+         to the appropriate pixel in un-smoothed maps.
+
+         First do the smoothed maps
+      */
+      if (part_type->nr_smoothed_maps > 0) {
+
+        /* Get array of ranges of pixels to update */
+        double part_vec[3];
+        ang2vec(theta, phi, part_vec);
+        pixel_index_t pix_min, pix_max;
+        int nr_ranges;
+        struct pixel_range *range;
+        const double search_radius = radius * kernel_gamma;
+        healpix_query_disc_range(shell->nside, part_vec, search_radius,
+                                 &pix_min, &pix_max, &nr_ranges, &range);
+        
+        /* Compute total weight of pixels to update */
+        double total_weight = 0;
+        for(int range_nr=0; range_nr < nr_ranges; range_nr +=1) {
+          for(pixel_index_t pix=range[range_nr].first; pix<=range[range_nr].last; pix+=1) {
+            
+            /* Get vector at the centre of this pixel */
+            double pixel_vec[3];
+            pix2vec_ring64(shell->nside, pix, pixel_vec);
+
+            /* Find angle between this pixel centre and the particle.
+               Dot product may be a tiny bit greater than one due to rounding error */
+            const double dp = (pixel_vec[0]*part_vec[0] +
+                               pixel_vec[1]*part_vec[1] +
+                               pixel_vec[2]*part_vec[2]);
+            const double angle = dp < 1.0 ? acos(dp) : 0.0;
+            
+            /* Evaluate the kernel at this radius */
+            total_weight += projected_kernel_eval(kernel_table, angle/radius);
+          }
+        }
+
+        /* Update the pixels */
+        for(int range_nr=0; range_nr < nr_ranges; range_nr +=1) {
+          for(pixel_index_t pix=range[range_nr].first; pix<=range[range_nr].last; pix+=1) {
+            
+            /* Check if this pixel is stored locally */
+            pixel_index_t global_pix = pix;
+            if ((global_pix >= local_pix_offset) &&
+                (global_pix < local_pix_offset + local_nr_pix)) {
+                
+              /* Get vector at the centre of this pixel */
+              double pixel_vec[3];
+              pix2vec_ring64(shell->nside, pix, pixel_vec);
+
+              /* Find angle between this pixel centre and the particle.
+                 Dot product may be a tiny bit greater than one due to rounding error */
+              const double dp = (pixel_vec[0]*part_vec[0] +
+                                 pixel_vec[1]*part_vec[1] +
+                                 pixel_vec[2]*part_vec[2]);
+              const double angle = dp < 1.0 ? acos(dp) : 0.0;
+              
+              /* Evaluate the kernel at this radius */
+              const double weight = projected_kernel_eval(kernel_table, angle/radius) / total_weight;
+              
+              /* Find local index of the pixel to update */
+              const pixel_index_t local_pix = global_pix - local_pix_offset;
+
+              /* Update the smoothed healpix maps */
+              for (int j = 0; j < part_type->nr_smoothed_maps; j += 1) {
+                const int map_index = part_type->map_index[j];
+                const double buffered_value = value[j].f;
+                const double fac_inv =
+                  shell->map[map_index].buffer_scale_factor_inv;
+                const double value_to_add = buffered_value * fac_inv;
+                atomic_add_d(&shell->map[map_index].data[local_pix],
+                             value_to_add * weight);
+              } /* Next smoothed map */
+            }
+          } /* Next pixel in this range */
+        } /* Next range of pixels */
+
+          /* Free array of pixel ranges */
+        free(range);
+
+      } /* if nr_smoothed_maps > 0*/
+
+      /* Then do any un-smoothed maps */
+      if (part_type->nr_unsmoothed_maps > 0) {
+
+        /* Find the index of the pixel containing the particle */
+        pixel_index_t global_pix = angle_to_pixel(shell->nside, theta, phi);
+
+        /* Check the pixel is stored on this MPI rank */
+        if ((global_pix >= local_pix_offset) &&
+            (global_pix < local_pix_offset + local_nr_pix)) {
+
+          /* Find local index of the pixel to update */
+          const pixel_index_t local_pix = global_pix - local_pix_offset;
+
+          /* Update the un-smoothed healpix maps */
+          for (int j = part_type->nr_smoothed_maps; j < part_type->nr_maps;
+               j += 1) {
+            const int map_index = part_type->map_index[j];
+            const double buffered_value = value[j].f;
+            const double fac_inv =
+                shell->map[map_index].buffer_scale_factor_inv;
+            const double value_to_add = buffered_value * fac_inv;
+            atomic_add_d(&shell->map[map_index].data[local_pix], value_to_add);
+          }
+        }
+      } /* if part_type->nr_unsmoothed_maps > 0 */
+    }
+  } /* End loop over updates to apply */
+#else
+  error("Need HEALPix C API for lightcone maps");
+#endif
+}
+
+/**
+ * @brief Apply updates for one particle type to all lightcone maps in a shell
+ *
+ * When a particle of type ptype crosses the lightcone it generates an entry
+ * in shell->buffer[ptype] which contains the angular position and size of
+ * the particle and the values it contributes to the lightcone_maps in the
+ * shell. This function applies these buffered updates to the lightcone
+ * map pixel data.
+ *
+ * We carry out all the updates for one particle type at the same time so that
+ * we avoid repeating the healpix neighbour search for every healpix map.
+ *
+ * Applying the updates involves copying them to a send buffer then a receive
+ * buffer, so if there are a lot we process them in chunks of up to
+ * max_map_update_send_size_mb megabytes to save memory.
+ *
+ * @param shell the #lightcone_shell to update
+ * @param tp the #threadpool used to execute the updates
+ * @param part_type contains information about each particle type to be updated
+ * @param smoothing_info contains parameters relating to smoothing onto the
+ * sphere
+ * @param ptype index of the particle type to update
+ * @param max_map_update_send_size_mb maximum amount of data each ranks sends
+ *
+ */
+void lightcone_shell_flush_map_updates_for_type(
+    struct lightcone_shell *shell, struct threadpool *tp,
+    struct lightcone_particle_type *part_type,
+    int ptype, const double max_map_update_send_size_mb,
+    struct projected_kernel_table *kernel_table, int verbose) {
+
+  int comm_rank = 0, comm_size = 1;
+#ifdef WITH_MPI
+  MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+  MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+#endif
+
+  /* Information needed by mapper functions */
+  struct healpix_smoothing_mapper_data mapper_data;
+  mapper_data.shell = shell;
+  mapper_data.part_type = &part_type[ptype];
+  mapper_data.comm_rank = comm_rank;
+  mapper_data.comm_size = comm_size;
+  mapper_data.sendbuf = NULL;
+  mapper_data.kernel_table = kernel_table;
+
+#ifdef WITH_MPI
+
+  /* Count data blocks and ensure number of elements is in range */
+  int nr_blocks = 0;
+  struct particle_buffer *buffer = &shell->buffer[ptype];
+  struct particle_buffer_block *block = buffer->first_block;
+  while (block) {
+    if (block->num_elements > buffer->elements_per_block)
+      block->num_elements = buffer->elements_per_block;
+    nr_blocks += 1;
+    block = block->next;
+  }
+
+  /* Allocate array with counts and offsets for each block */
+  struct buffer_block_info *block_info =
+      malloc(sizeof(struct buffer_block_info) * nr_blocks);
+
+  /* Initialize array of blocks */
+  nr_blocks = 0;
+  block = buffer->first_block;
+  while (block) {
+    block_info[nr_blocks].block = block;
+    block_info[nr_blocks].count = malloc(sizeof(size_t) * comm_size);
+    block_info[nr_blocks].offset = malloc(sizeof(size_t) * comm_size);
+    block_info[nr_blocks].first_dest =
+        malloc(sizeof(int) * block->num_elements);
+    block_info[nr_blocks].last_dest = malloc(sizeof(int) * block->num_elements);
+    nr_blocks += 1;
+    block = block->next;
+  }
+
+  /* To minimize memory usage we don't process all of the blocks at once.
+     Determine the maximum number of blocks on any rank. */
+  int max_nr_blocks;
+  MPI_Allreduce(&nr_blocks, &max_nr_blocks, 1, MPI_INT, MPI_MAX,
+                MPI_COMM_WORLD);
+
+  /* Determine the maximum number of blocks to process per iteration */
+  size_t max_bytes = max_map_update_send_size_mb * 1024.0 * 1024.0;
+  int max_blocks_per_iteration =
+      max_bytes / (buffer->element_size * buffer->elements_per_block);
+  if (max_blocks_per_iteration < 1)
+    error("max_map_update_send_size_mb is too small to process even one block");
+
+  /* Determine how many iterations we need */
+  int nr_iterations = max_nr_blocks / max_blocks_per_iteration;
+  if (max_nr_blocks % max_blocks_per_iteration != 0) nr_iterations += 1;
+  if (engine_rank == 0 && nr_iterations > 0 && verbose)
+    message("will require %d iterations with %d blocks per iteration",
+            nr_iterations, max_blocks_per_iteration);
+
+  /* Loop over iterations */
+  int nr_blocks_done = 0;
+  for (int iter = 0; iter < nr_iterations; iter += 1) {
+
+    /* Find number of blocks to do on this iteration (may be zero) */
+    int nr_blocks_iter = nr_blocks - nr_blocks_done;
+    if (nr_blocks_iter > max_blocks_per_iteration)
+      nr_blocks_iter = max_blocks_per_iteration;
+
+    /* Get a pointer to the blocks to do on this iteration */
+    struct buffer_block_info *block_info_iter = block_info + nr_blocks_done;
+
+    /* For each block, count how many elements are to be sent to each MPI rank
+     */
+    threadpool_map(tp, count_elements_to_send_mapper, block_info_iter,
+                   nr_blocks_iter, sizeof(struct buffer_block_info), 1,
+                   &mapper_data);
+
+    /* Find total number of elements to go to each rank */
+    size_t *send_count = malloc(sizeof(size_t) * comm_size);
+    for (int i = 0; i < comm_size; i += 1) send_count[i] = 0;
+    for (int block_nr = 0; block_nr < nr_blocks_iter; block_nr += 1) {
+      for (int i = 0; i < comm_size; i += 1)
+        send_count[i] += block_info_iter[block_nr].count[i];
+    }
+
+    /* Find offset to the first element to go to each rank if we sort them by
+     * destination */
+    size_t *send_offset = malloc(sizeof(size_t) * comm_size);
+    send_offset[0] = 0;
+    for (int i = 1; i < comm_size; i += 1) {
+      send_offset[i] = send_offset[i - 1] + send_count[i - 1];
+    }
+
+    /* For each block, find the location in the send buffer where we need to
+       place the first element to go to each MPI rank */
+    for (int block_nr = 0; block_nr < nr_blocks_iter; block_nr += 1) {
+      for (int i = 0; i < comm_size; i += 1) {
+        if (block_nr == 0) {
+          /* This is the first block */
+          block_info_iter[block_nr].offset[i] = send_offset[i];
+        } else {
+          /* Not first, so elements are written after those of the previous
+           * block */
+          block_info_iter[block_nr].offset[i] =
+              block_info_iter[block_nr - 1].offset[i] +
+              block_info_iter[block_nr - 1].count[i];
+        }
+      }
+    }
+
+    /* Find the total number of elements to be sent */
+    size_t total_nr_send = 0;
+    for (int i = 0; i < comm_size; i += 1) total_nr_send += send_count[i];
+
+    /* Allocate the send buffer */
+    union lightcone_map_buffer_entry *sendbuf =
+        malloc(part_type[ptype].buffer_element_size * total_nr_send);
+    mapper_data.sendbuf = sendbuf;
+
+    /* Populate the send buffer */
+    threadpool_map(tp, store_elements_to_send_mapper, block_info_iter,
+                   nr_blocks_iter, sizeof(struct buffer_block_info), 1,
+                   &mapper_data);
+
+    /* Determine number of elements to receive */
+    size_t *recv_count = malloc(comm_size * sizeof(size_t));
+    MPI_Alltoall(send_count, sizeof(size_t), MPI_BYTE, recv_count,
+                 sizeof(size_t), MPI_BYTE, MPI_COMM_WORLD);
+    size_t total_nr_recv = 0;
+    for (int i = 0; i < comm_size; i += 1) total_nr_recv += recv_count[i];
+
+    /* Allocate receive buffer */
+    union lightcone_map_buffer_entry *recvbuf =
+        malloc(part_type[ptype].buffer_element_size * total_nr_recv);
+
+    /* Exchange data */
+    exchange_structs(send_count, sendbuf, recv_count, recvbuf,
+                     part_type[ptype].buffer_element_size);
+
+    /* Apply received updates to the healpix map */
+    threadpool_map(tp, healpix_smoothing_mapper, recvbuf, total_nr_recv,
+                   part_type[ptype].buffer_element_size,
+                   threadpool_auto_chunk_size, &mapper_data);
+
+    /* Tidy up */
+    free(send_count);
+    free(send_offset);
+    free(sendbuf);
+    free(recv_count);
+    free(recvbuf);
+
+    /* Advance to next set of blocks */
+    nr_blocks_done += nr_blocks_iter;
+  }
+  if (nr_blocks_done != nr_blocks)
+    error("not all map update blocks were processed");
+
+  /* We no longer need the array of blocks */
+  for (int block_nr = 0; block_nr < nr_blocks; block_nr += 1) {
+    free(block_info[block_nr].count);
+    free(block_info[block_nr].offset);
+    free(block_info[block_nr].first_dest);
+    free(block_info[block_nr].last_dest);
+  }
+  free(block_info);
+
+  /* Empty the particle buffer now that we copied the data from it */
+  particle_buffer_empty(buffer);
+
+#else
+
+  /* If not using MPI, we can update the healpix maps directly from the buffer
+   */
+  struct particle_buffer_block *block = NULL;
+  size_t num_elements;
+  double *update_data;
+  do {
+    particle_buffer_iterate(&shell->buffer[ptype], &block, &num_elements,
+                            (void **)&update_data);
+    threadpool_map(tp, healpix_smoothing_mapper, update_data, num_elements,
+                   part_type[ptype].buffer_element_size,
+                   threadpool_auto_chunk_size, &mapper_data);
+  } while (block);
+  particle_buffer_empty(&shell->buffer[ptype]);
+
+#endif
+}
+
+/**
+ * @brief Apply buffered updates to all lightcone maps in a shell
+ *
+ * @param shell the #lightcone_shell to update
+ * @param tp the #threadpool used to execute the updates
+ * @param part_type contains information about each particle type to be updated
+ * sphere
+ *
+ */
+void lightcone_shell_flush_map_updates(
+    struct lightcone_shell *shell, struct threadpool *tp,
+    struct lightcone_particle_type *part_type,
+    const double max_map_update_send_size_mb,
+    struct projected_kernel_table *kernel_table, int verbose) {
+
+  if (shell->state != shell_current)
+    error("Attempt to flush updates for non-current shell!");
+
+  for (int ptype = 0; ptype < swift_type_count; ptype += 1) {
+    if ((shell->nr_maps > 0) && (part_type[ptype].nr_maps > 0)) {
+      lightcone_shell_flush_map_updates_for_type(
+          shell, tp, part_type, ptype, max_map_update_send_size_mb,
+          kernel_table, verbose);
+    }
+  }
+}
diff --git a/src/lightcone/lightcone_shell.h b/src/lightcone/lightcone_shell.h
new file mode 100644
index 0000000000000000000000000000000000000000..d14fd8ac4e8914745c2aeba0a9b737f9b49b5fba
--- /dev/null
+++ b/src/lightcone/lightcone_shell.h
@@ -0,0 +1,182 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_LIGHTCONE_SHELL_H
+#define SWIFT_LIGHTCONE_SHELL_H
+
+/* Standard headers */
+#include <stdio.h>
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers */
+#include "cosmology.h"
+#include "error.h"
+#include "lightcone/lightcone_map.h"
+#include "lightcone/lightcone_map_types.h"
+#include "lightcone/pixel_index.h"
+#include "lightcone/projected_kernel.h"
+#include "particle_buffer.h"
+
+enum lightcone_shell_state {
+  shell_uninitialized,
+  shell_current,
+  shell_complete,
+};
+
+union lightcone_map_buffer_entry {
+  int i;
+  float f;
+};
+
+/**
+ * @brief Encode an angle in the range 0 to 2pi as an int
+ *
+ * @param angle the angle to encode
+ */
+__attribute__((always_inline)) INLINE static int angle_to_int(
+    const double angle) {
+
+  if (angle < 0.0 || angle > 2 * M_PI) error("angle is out of range!");
+  const double fac = ((1 << 30) - 1) / M_PI;
+  return (int)(angle * fac);
+}
+
+/**
+ * @brief Convert an encoded angle back to a double
+ *
+ * @param i the int containing the angle
+ */
+__attribute__((always_inline)) INLINE static double int_to_angle(const int i) {
+
+  const double fac = M_PI / ((1 << 30) - 1);
+  return i * fac;
+}
+
+/**
+ * @brief Information about a particle type contributing to the lightcone
+ *
+ * For each Swift particle type we store how many lightcone maps that type
+ * contributes to and their indexes in the array of lightcone_maps structs
+ * associated with each lightcone_shell.
+ *
+ * We also record the number of bytes needed to store one update: updates
+ * consist of the angular coordinates of the particle, its angular smoothing
+ * radius, and the quantities contributed to the lightcone maps.
+ *
+ */
+struct lightcone_particle_type {
+
+  /*! Number of lightcone maps this particle type contributes to */
+  int nr_maps;
+
+  /*! Number of smoothed this particle type contributes to */
+  int nr_smoothed_maps;
+
+  /*! Number of un-smoothed this particle type contributes to */
+  int nr_unsmoothed_maps;
+
+  /*! Indices of the lightcone maps this particle type contributes to.
+    Smoothed maps will be stored first in the array. */
+  int *map_index;
+
+  /*! Amount of data to store per particle: theta, phi, radius and the value to
+   * add to each healpix map */
+  size_t buffer_element_size;
+};
+
+/**
+ * @brief Information about each lightcone shell
+ *
+ * Each shell contains one lightcone_map for each healpix map
+ * we're making. This is where the pixel data is stored while
+ * the current simulation timestep overlaps the shell's redshift
+ * range.
+ *
+ * Each shell also contains one particle_buffer per particle type,
+ * which stores the updates to be applied to the pixel data.
+ * Updates are accumulated in the buffers during each time step
+ * and applied at the end of the step.
+ *
+ */
+struct lightcone_shell {
+
+  /*! State of this shell */
+  enum lightcone_shell_state state;
+
+  /*! Inner radius of shell */
+  double rmin;
+
+  /*! Outer radius of shell */
+  double rmax;
+
+  /*! Minimum expansion factor for this shell */
+  double amin;
+
+  /*! Maximum expansion factor for this shell */
+  double amax;
+
+  /*! Number of maps associated with this shell */
+  int nr_maps;
+
+  /*! Array of lightcone maps for this shell */
+  struct lightcone_map *map;
+
+  /*! Buffers to store the map updates for each particle type */
+  struct particle_buffer buffer[swift_type_count];
+
+  /*! Healpix nside parameter */
+  int nside;
+
+  /*! Total pixels in the maps */
+  pixel_index_t total_nr_pix;
+
+  /*! Number of pixels per map stored on this node */
+  pixel_index_t local_nr_pix;
+
+  /*! Offset of the first pixel stored on this rank */
+  pixel_index_t local_pix_offset;
+
+  /*! Number of pixels per rank (last node has any extra) */
+  pixel_index_t pix_per_rank;
+};
+
+struct lightcone_shell *lightcone_shell_array_init(
+    const struct cosmology *cosmo, const char *radius_file, int nr_maps,
+    struct lightcone_map_type *map_type, int nside, pixel_index_t total_nr_pix,
+    struct lightcone_particle_type *part_type, size_t elements_per_block,
+    int *nr_shells_out);
+
+void lightcone_shell_array_free(struct lightcone_shell *shell, int nr_shells);
+
+void lightcone_shell_array_dump(const struct lightcone_shell *shell,
+                                int nr_shells, FILE *stream);
+
+struct lightcone_shell *lightcone_shell_array_restore(
+    FILE *stream, int nr_shells, struct lightcone_particle_type *part_type,
+    size_t elements_per_block);
+
+void lightcone_shell_flush_map_updates(
+    struct lightcone_shell *shell, struct threadpool *tp,
+    struct lightcone_particle_type *part_type,
+    const double max_map_update_send_size_mb,
+    struct projected_kernel_table *kernel_table, int verbose);
+
+#endif /* SWIFT_LIGHTCONE_SHELL_H */
diff --git a/src/lightcone/pixel_index.h b/src/lightcone/pixel_index.h
new file mode 100644
index 0000000000000000000000000000000000000000..85ca09dfee193f1f786a9686a83d2806d235d9c5
--- /dev/null
+++ b/src/lightcone/pixel_index.h
@@ -0,0 +1,38 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_PIXEL_INDEX_H
+#define SWIFT_PIXEL_INDEX_H
+
+#include <limits.h>
+#include <stdint.h>
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Type to use for HEALPix pixel indexes */
+typedef int64_t pixel_index_t;
+
+/* Maximum pixel index (determines maximum map size) */
+#define MAX_PIXEL_INDEX INT64_MAX
+
+/* Corresponding MPI type */
+#define MPI_PIXEL_INDEX_T MPI_INT64_T
+
+#endif
diff --git a/src/lightcone/projected_kernel.c b/src/lightcone/projected_kernel.c
new file mode 100644
index 0000000000000000000000000000000000000000..152a4067305358a70890e84082ae4cc3a85e0b00
--- /dev/null
+++ b/src/lightcone/projected_kernel.c
@@ -0,0 +1,154 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* This object's header. */
+#include "projected_kernel.h"
+
+/* Local headers */
+#include "error.h"
+#include "kernel_hydro.h"
+#include "math.h"
+
+#ifdef HAVE_LIBGSL
+#include <gsl/gsl_integration.h>
+#include <gsl/gsl_interp.h>
+#endif
+
+/**
+ * @brief Integrand used in evaluating the projected kernel
+ *
+ * See section 4.3.1 in Price et al. 2007:
+ * https://ui.adsabs.harvard.edu/abs/2007PASA...24..159P/abstract
+ *
+ * This function is used to carry out the integral in equation 30.
+ *
+ * @param qz z coordinate at which to evaluate the kernel, in units of h
+ * @param param Ratio of distance in the xy plane in units of h
+ */
+static double projected_kernel_integrand(double qz, void *param) {
+
+  const double qxy = *((double *)param);
+  const double q = sqrt(pow(qxy, 2.0) + pow(qz, 2.0));
+  double W;
+  kernel_eval_double(q, &W);
+  return W;
+}
+
+/**
+ * @brief Computes 2D projection of the 3D kernel function.
+ *
+ * Given a distance in the xy plane, we integrate along the
+ * z axis to evaluate the projected kernel.
+ *
+ * @param u The ratio of the (2D) distance to the smoothing length
+ */
+double projected_kernel_integrate(double u) {
+
+#ifdef HAVE_LIBGSL
+
+  /* Swift's hydro kernel can be evaluated with kernel_eval(u, W)
+     where u = r / h and W returns the result. The kernel goes to
+     zero at u=kernel_gamma. Projection is only implemented in 3D.*/
+#ifndef HYDRO_DIMENSION_3D
+  error("projected_kernel_eval() is only defined for the 3D case.");
+#endif
+
+  /* Initalise the GSL workspace */
+  const size_t workspace_size = 100000;
+  gsl_integration_workspace *space =
+      gsl_integration_workspace_alloc(workspace_size);
+
+  /* Compute the integral */
+  double result;
+  double abserr;
+  double qxy = u;
+  const double qz_max = sqrt(pow(kernel_gamma, 2.0) - pow(qxy, 2.0));
+  const double qz_min = -qz_max;
+  gsl_function F = {&projected_kernel_integrand, &qxy};
+  gsl_integration_qag(&F, qz_min, qz_max, 1.0e-10, 1.0e-10, workspace_size,
+                      GSL_INTEG_GAUSS61, space, &result, &abserr);
+
+  /* Free the workspace */
+  gsl_integration_workspace_free(space);
+
+  return result;
+
+#else
+  error("Need GSL library to evaluate the projected kernel");
+  return 0.0;
+#endif
+}
+
+/**
+ * @brief Tabulate the projected kernel
+ *
+ * @param tab The projected_kernel_table struct
+ */
+void projected_kernel_init(struct projected_kernel_table *tab) {
+
+  /* Allocate storage */
+  tab->n = PROJECTED_KERNEL_NTAB;
+  tab->value = malloc(sizeof(double) * tab->n);
+
+  /* Determine range to tabulate */
+  tab->u_max = kernel_gamma;
+  tab->du = tab->u_max / (tab->n - 1);
+  tab->inv_du = 1.0 / tab->du;
+
+  /* Evaluate the kernel at points in the table */
+  for (int i = 0; i < tab->n - 1; i += 1)
+    tab->value[i] = projected_kernel_integrate(i * tab->du);
+  tab->value[tab->n - 1] = 0.0;
+}
+
+/**
+ * @brief Deallocate the projected kernel table
+ */
+void projected_kernel_clean(struct projected_kernel_table *tab) {
+  free(tab->value);
+}
+
+void projected_kernel_dump(void) {
+
+  struct projected_kernel_table tab;
+  projected_kernel_init(&tab);
+
+  const int N = 5000;
+  const double du = kernel_gamma / (N - 1);
+  FILE *fd;
+
+  fd = fopen("projected_kernel.txt", "w");
+  fprintf(fd, "u, kernel, projected kernel\n");
+  for (int i = 0; i < N; i += 1) {
+    double u = i * du;
+    float kernel;
+    kernel_eval(u, &kernel);
+    double kernel_proj = projected_kernel_eval(&tab, u);
+    double kernel_proj_int = projected_kernel_integrate(u);
+
+    fprintf(fd, "%e, %e, %e, %e\n", u, (double)kernel, kernel_proj,
+            kernel_proj_int);
+  }
+
+  fclose(fd);
+  projected_kernel_clean(&tab);
+}
diff --git a/src/lightcone/projected_kernel.h b/src/lightcone/projected_kernel.h
new file mode 100644
index 0000000000000000000000000000000000000000..674172c975847ad84aeff039c4f65298d6ebc2ee
--- /dev/null
+++ b/src/lightcone/projected_kernel.h
@@ -0,0 +1,69 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#ifndef SWIFT_PROJECTED_KERNEL_H
+#define SWIFT_PROJECTED_KERNEL_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local headers */
+#include "error.h"
+#include "inline.h"
+
+#define PROJECTED_KERNEL_NTAB 1000
+
+struct projected_kernel_table {
+  int n;
+  double du;
+  double inv_du;
+  double u_max;
+  double *value;
+};
+
+/**
+ * @brief Computes 2D projection of the 3D kernel function.
+ *
+ * This version interpolates the value from the supplied
+ * look up table.
+ *
+ * @param u The ratio of the (2D) distance to the smoothing length
+ */
+__attribute__((always_inline)) INLINE static double projected_kernel_eval(
+    struct projected_kernel_table *tab, double u) {
+
+  /* Check u is in range */
+  if (u >= tab->u_max) return 0.0;
+  if (u < 0.0) error("Negative u in projected kernel!");
+
+  /* Determine which interval we're in */
+  int i = u * tab->inv_du;
+
+  /* Find where we are in the interval */
+  double f = (u - i * tab->du) * tab->inv_du;
+
+  /* Linear interpolation */
+  return (1.0 - f) * tab->value[i] + f * tab->value[i + 1];
+}
+
+void projected_kernel_init(struct projected_kernel_table *tab);
+void projected_kernel_clean(struct projected_kernel_table *tab);
+void projected_kernel_dump(void);
+
+#endif
diff --git a/src/mesh_gravity_mpi.c b/src/mesh_gravity_mpi.c
index fb7c5710dcfd4b0d715cc2c97efc0c7142d3ff9d..cfa963dbdd0331a0c5e66a076c1777a27ef4fc5f 100644
--- a/src/mesh_gravity_mpi.c
+++ b/src/mesh_gravity_mpi.c
@@ -33,6 +33,7 @@
 #include "debug.h"
 #include "engine.h"
 #include "error.h"
+#include "exchange_structs.h"
 #include "lock.h"
 #include "mesh_gravity_patch.h"
 #include "mesh_gravity_sort.h"
@@ -254,106 +255,6 @@ void mesh_patches_to_sorted_array(const struct pm_mesh_patch *local_patches,
   if (count != size) error("Error flattening the mesh patches!");
 }
 
-/**
- * @brief Given an array of structs of size element_size, send
- * nr_send[i] elements to each node i. Allocates the receive
- * buffer recvbuf to the appropriate size and returns its size
- * in nr_recv_tot.
- *
- * TODO: can/should we replace this with a call to engine_do_redistribute()?
- *
- * @param nr_send Number of elements to send to each node
- * @param nr_recv Number of elements to receive from each node
- * @param sendbuf The elements to send
- * @param recvbuf The output buffer
- *
- */
-void exchange_structs(size_t *nr_send, char *sendbuf, size_t *nr_recv,
-                      char *recvbuf, size_t element_size) {
-
-#if defined(WITH_MPI) && defined(HAVE_MPI_FFTW)
-
-  /* Determine rank, number of ranks */
-  int nr_nodes, nodeID;
-  MPI_Comm_size(MPI_COMM_WORLD, &nr_nodes);
-  MPI_Comm_rank(MPI_COMM_WORLD, &nodeID);
-
-  /* Compute send offsets */
-  size_t *send_offset = (size_t *)malloc(nr_nodes * sizeof(size_t));
-  send_offset[0] = 0;
-  for (int i = 1; i < nr_nodes; i++) {
-    send_offset[i] = send_offset[i - 1] + nr_send[i - 1];
-  }
-
-  /* Compute receive offsets */
-  size_t *recv_offset = (size_t *)malloc(nr_nodes * sizeof(size_t));
-  recv_offset[0] = 0;
-  for (int i = 1; i < nr_nodes; i++) {
-    recv_offset[i] = recv_offset[i - 1] + nr_recv[i - 1];
-  }
-
-  /* Allocate request objects (one send and receive per node) */
-  MPI_Request *request =
-      (MPI_Request *)malloc(2 * sizeof(MPI_Request) * nr_nodes);
-
-  /* Make type to communicate mesh_key_value struct */
-  MPI_Datatype mesh_key_value_mpi_type;
-  if (MPI_Type_contiguous(element_size, MPI_BYTE, &mesh_key_value_mpi_type) !=
-          MPI_SUCCESS ||
-      MPI_Type_commit(&mesh_key_value_mpi_type) != MPI_SUCCESS) {
-    error("Failed to create MPI type for mesh_key_value struct.");
-  }
-
-  /*
-   * Post the send operations. This is an alltoallv really but
-   * we want to avoid the limits imposed by int counts and offsets
-   * in MPI_Alltoallv.
-   */
-  for (int i = 0; i < nr_nodes; i++) {
-    if (nr_send[i] > 0) {
-
-      /* TODO: handle very large messages */
-      if (nr_send[i] > INT_MAX)
-        error("exchange_structs() fails if nr_send > INT_MAX!");
-
-      MPI_Isend(&(sendbuf[send_offset[i] * element_size]), (int)nr_send[i],
-                mesh_key_value_mpi_type, i, 0, MPI_COMM_WORLD, &(request[i]));
-    } else {
-      request[i] = MPI_REQUEST_NULL;
-    }
-  }
-
-  /* Post the receives */
-  for (int i = 0; i < nr_nodes; i++) {
-    if (nr_recv[i] > 0) {
-
-      /* TODO: handle very large messages */
-      if (nr_recv[i] > INT_MAX)
-        error("exchange_structs() fails if nr_recv > INT_MAX!");
-
-      MPI_Irecv(&(recvbuf[recv_offset[i] * element_size]), (int)nr_recv[i],
-                mesh_key_value_mpi_type, i, 0, MPI_COMM_WORLD,
-                &(request[i + nr_nodes]));
-    } else {
-      request[i + nr_nodes] = MPI_REQUEST_NULL;
-    }
-  }
-
-  /* Wait for everything to complete */
-  MPI_Waitall(2 * nr_nodes, request, MPI_STATUSES_IGNORE);
-
-  /* Done with the MPI type */
-  MPI_Type_free(&mesh_key_value_mpi_type);
-
-  /* Tidy up */
-  free(recv_offset);
-  free(send_offset);
-  free(request);
-#else
-  error("FFTW MPI not found - unable to use distributed mesh");
-#endif
-}
-
 /**
  * @brief Convert the array of local patches to a slab-distributed 3D mesh
  *
diff --git a/src/neutrino/Default/neutrino.c b/src/neutrino/Default/neutrino.c
index b51e2521d3838b2c086bb1686e8456da9a09ee02..708fc12c44425439839e9afa4cfb83feefbad223 100644
--- a/src/neutrino/Default/neutrino.c
+++ b/src/neutrino/Default/neutrino.c
@@ -23,6 +23,10 @@
 /* Standard headers */
 #include <math.h>
 
+/* Local includes */
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_map_types.h"
+
 /* Compute the dimensionless neutrino momentum (units of kb*T).
  *
  * @param v The internal 3-velocity
@@ -339,3 +343,81 @@ void neutrino_check_cosmology(const struct space *s,
           cosmo->Omega_nu_0, Omega_particles_nu);
   }
 }
+
+/*
+  Lightcone map of neutrino mass perturbation
+*/
+
+/**
+ * @brief Determine if a particle type contributes to this map type
+ *
+ * @param part_type the particle type
+ */
+int lightcone_map_neutrino_mass_type_contributes(int ptype) {
+
+  switch (ptype) {
+    case swift_type_neutrino:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+/**
+ * @brief Make a healpix map of the neutrino mass perturbation
+ *
+ * When a neutrino particle crosses the lightcone this function
+ * should return the value to accumulate to the corresponding
+ * pixel in the healpix map.
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param gp the #gpart to add to the map
+ * @param a_cross expansion factor at which the particle crosses the lightcone
+ * @param x_cross comoving coordinates at which the particle crosses the
+ * lightcone
+ */
+double lightcone_map_neutrino_mass_get_value(
+    const struct engine *e, const struct lightcone_props *lightcone_props,
+    const struct gpart *gp, const double a_cross, const double x_cross[3]) {
+
+  switch (gp->type) {
+    case swift_type_neutrino: {
+      return gp->mass;
+    } break;
+    default:
+      error("lightcone map function called on wrong particle type");
+      return -1.0; /* Prevent 'missing return' error */
+  }
+}
+
+/**
+ * @brief Return baseline value for neutrino mass lightcone maps.
+ *
+ * This is the mean neutrino density integrated over the volume of the pixel.
+ *
+ * @param e the #engine structure
+ * @param lightcone_props properties of the lightcone to update
+ * @param map The lightcone map
+ */
+double lightcone_map_neutrino_baseline_value(
+    const struct cosmology *c, const struct lightcone_props *lightcone_props,
+    const struct lightcone_map *map) {
+
+  /* Fetch the area of healpix pixels */
+  const double area = lightcone_props->pixel_area_steradians;
+
+  /* Fetch the inner and outer radii */
+  const double r_inner = map->r_min;
+  const double r_outer = map->r_max;
+  const double r_inner_3 = r_inner * r_inner * r_inner;
+  const double r_outer_3 = r_outer * r_outer * r_outer;
+
+  /* The volume mapped into a healpix pixel */
+  const double volume = area * (r_outer_3 - r_inner_3);
+
+  /* The mean comoving neutrino density at z = 0 */
+  const double rho_nu_0 = c->critical_density_0 * c->Omega_nu_0;
+
+  return rho_nu_0 * volume;
+}
diff --git a/src/neutrino/Default/neutrino.h b/src/neutrino/Default/neutrino.h
index e728274d6b2af7bd0c187ad93b3d9884c5750cb1..d160e4ad534dd064a1dc89fc0b69eb36d98a12f4 100644
--- a/src/neutrino/Default/neutrino.h
+++ b/src/neutrino/Default/neutrino.h
@@ -155,5 +155,7 @@ void neutrino_check_cosmology(const struct space *s,
                               struct swift_params *params,
                               const struct neutrino_props *neutrino_props,
                               const int rank, const int verbose);
-
+double lightcone_map_neutrino_baseline_value(
+    const struct cosmology *c, const struct lightcone_props *lightcone_props,
+    const struct lightcone_map *map);
 #endif /* SWIFT_DEFAULT_NEUTRINO_H */
diff --git a/src/neutrino/Default/neutrino_io.h b/src/neutrino/Default/neutrino_io.h
index 312b3a042ef6571847e6511245d712ef15afbb0b..40ceb01084e55b3afe397e0785b146cbd15c2dee 100644
--- a/src/neutrino/Default/neutrino_io.h
+++ b/src/neutrino/Default/neutrino_io.h
@@ -23,6 +23,9 @@
 
 /* Local includes */
 #include "fermi_dirac.h"
+#include "io_properties.h"
+#include "lightcone/lightcone.h"
+#include "lightcone/lightcone_map_types.h"
 #include "neutrino.h"
 #include "neutrino_properties.h"
 
@@ -163,4 +166,37 @@ __attribute__((always_inline)) INLINE static int neutrino_write_particles(
   return 3;
 }
 
+/*
+  Lightcone map of neutrino mass perturbation
+*/
+
+int lightcone_map_neutrino_mass_type_contributes(int ptype);
+double lightcone_map_neutrino_mass_get_value(
+    const struct engine* e, const struct lightcone_props* lightcone_props,
+    const struct gpart* gp, const double a_cross, const double x_cross[3]);
+
+static const struct lightcone_map_type neutrino_lightcone_map_types[] = {
+    {
+        .name = "NeutrinoMass",
+        .update_map = lightcone_map_neutrino_mass_get_value,
+        .ptype_contributes = lightcone_map_neutrino_mass_type_contributes,
+        .baseline_func = lightcone_map_neutrino_baseline_value,
+        .units = UNIT_CONV_MASS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+    {
+        /* NULL functions indicate end of array */
+        .name = "",
+        .update_map = NULL,
+        .ptype_contributes = NULL,
+        .baseline_func = NULL,
+        .units = UNIT_CONV_NO_UNITS,
+        .smoothing = map_unsmoothed,
+        .compression = compression_write_lossless,
+        .buffer_scale_factor = 1.0,
+    },
+};
+
 #endif /* SWIFT_DEFAULT_NEUTRINO_IO_H */
diff --git a/src/output_list.c b/src/output_list.c
index be06e585032e7b726f8020cb57e429456c410cbd..1c93bf6c71bf0027d1ef4aaf61886f44a66139f9 100644
--- a/src/output_list.c
+++ b/src/output_list.c
@@ -90,6 +90,8 @@ void output_list_read_file(struct output_list *output_list,
     type = OUTPUT_LIST_AGE;
   } else if (strcasecmp(line, "# Scale Factor") == 0) {
     type = OUTPUT_LIST_SCALE_FACTOR;
+  } else if (strcasecmp(line, "# Comoving Distance") == 0) {
+    type = OUTPUT_LIST_COMOVING_DISTANCE;
   } else if (strcasecmp(line, "# Redshift, Select Output") == 0) {
     type = OUTPUT_LIST_REDSHIFT;
     output_list->select_output_on = 1;
@@ -99,6 +101,9 @@ void output_list_read_file(struct output_list *output_list,
   } else if (strcasecmp(line, "# Scale Factor, Select Output") == 0) {
     type = OUTPUT_LIST_SCALE_FACTOR;
     output_list->select_output_on = 1;
+  } else if (strcasecmp(line, "# Comoving Distance, Select Output") == 0) {
+    type = OUTPUT_LIST_COMOVING_DISTANCE;
+    output_list->select_output_on = 1;
   } else if (strcasecmp(line, "# Redshift, Select Output, Label") == 0) {
     type = OUTPUT_LIST_REDSHIFT;
     output_list->select_output_on = 1;
@@ -111,12 +116,18 @@ void output_list_read_file(struct output_list *output_list,
     type = OUTPUT_LIST_SCALE_FACTOR;
     output_list->select_output_on = 1;
     output_list->alternative_labels_on = 1;
+  } else if (strcasecmp(line, "# Comoving Distance, Select Output, Label") ==
+             0) {
+    type = OUTPUT_LIST_COMOVING_DISTANCE;
+    output_list->select_output_on = 1;
+    output_list->alternative_labels_on = 1;
   } else {
     error("Unable to interpret the header (%s) in file '%s'", line, filename);
   }
 
   if (!cosmo &&
-      (type == OUTPUT_LIST_SCALE_FACTOR || type == OUTPUT_LIST_REDSHIFT))
+      (type == OUTPUT_LIST_SCALE_FACTOR || type == OUTPUT_LIST_REDSHIFT ||
+       type == OUTPUT_LIST_COMOVING_DISTANCE))
     error(
         "Unable to compute a redshift or a scale factor without cosmology. "
         "Please change the header in '%s'",
@@ -162,6 +173,9 @@ void output_list_read_file(struct output_list *output_list,
     if (cosmo && type == OUTPUT_LIST_AGE)
       *time = cosmology_get_scale_factor(cosmo, *time);
 
+    if (cosmo && type == OUTPUT_LIST_COMOVING_DISTANCE)
+      *time = cosmology_scale_factor_at_comoving_distance(cosmo, *time);
+
     /* Search to find index for select output - select_output_index is the index
      * in the select_output_names array that corresponds to this select output
      * name. */
@@ -212,6 +226,11 @@ void output_list_read_file(struct output_list *output_list,
     if ((type == OUTPUT_LIST_SCALE_FACTOR) &&
         (output_list->times[i] <= output_list->times[i - 1]))
       error("Output list not having monotonically increasing scale-factors.");
+
+    if ((type == OUTPUT_LIST_COMOVING_DISTANCE) &&
+        (output_list->times[i] <= output_list->times[i - 1]))
+      error(
+          "Output list not having monotonically decreasing comoving distance.");
   }
 
   /* set current indice to 0 */
diff --git a/src/output_list.h b/src/output_list.h
index 14c7a6a1d07be8af85c7669dc9164e5852586a89..2fa7394ad4ce69a18ccfb8c52d15c5c5630c875a 100644
--- a/src/output_list.h
+++ b/src/output_list.h
@@ -43,6 +43,7 @@ enum output_list_type {
   OUTPUT_LIST_AGE,
   OUTPUT_LIST_REDSHIFT,
   OUTPUT_LIST_SCALE_FACTOR,
+  OUTPUT_LIST_COMOVING_DISTANCE,
 };
 
 /**
diff --git a/src/particle_buffer.c b/src/particle_buffer.c
new file mode 100644
index 0000000000000000000000000000000000000000..b3e778ba5fada7610296b2214aee91f3245ecf73
--- /dev/null
+++ b/src/particle_buffer.c
@@ -0,0 +1,273 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include "particle_buffer.h"
+
+#include "align.h"
+#include "error.h"
+#include "memuse.h"
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/**
+ * @brief Initialize a particle buffer.
+ *
+ * Stores a sequence of data objects of size element_size,
+ * allowing new elements to be appended by multiple threads
+ * simultaneously. Note that ONLY the append operation is
+ * thread safe.
+ *
+ * Objects are stored in a linked list of blocks and new blocks
+ * are allocated as needed.
+ *
+ * @param buffer The #particle_buffer
+ * @param element_size Size of a single element
+ * @param elements_per_block Number of elements to store in each block
+ * @param name Name to use when logging memory allocations
+ *
+ */
+void particle_buffer_init(struct particle_buffer *buffer, size_t element_size,
+                          size_t elements_per_block, char *name) {
+
+  buffer->element_size = element_size;
+  buffer->elements_per_block = elements_per_block;
+  buffer->first_block = NULL;
+  buffer->last_block = NULL;
+  lock_init(&buffer->lock);
+
+  int len = snprintf(buffer->name, PARTICLE_BUFFER_NAME_LENGTH, "%s", name);
+  if (len >= PARTICLE_BUFFER_NAME_LENGTH || len < 0)
+    error("Buffer name truncated or encoding error");
+}
+
+/**
+ * @brief Deallocate a particle buffer.
+ *
+ * The buffer is no longer in a usable state after this.
+ *
+ * @param buffer The #particle_buffer
+ *
+ */
+void particle_buffer_free(struct particle_buffer *buffer) {
+
+  struct particle_buffer_block *block = buffer->first_block;
+  while (block) {
+    struct particle_buffer_block *next = block->next;
+    swift_free(buffer->name, block->data);
+    free(block);
+    block = next;
+  }
+  buffer->first_block = NULL;
+  buffer->last_block = NULL;
+  if (lock_destroy(&buffer->lock) != 0)
+    error("Failed to destroy lock on particle buffer");
+}
+
+/**
+ * @brief Empty a particle buffer
+ *
+ * This leaves the buffer ready to accept new elements.
+ *
+ * @param buffer The #particle_buffer
+ *
+ */
+void particle_buffer_empty(struct particle_buffer *buffer) {
+
+  const size_t element_size = buffer->element_size;
+  const size_t elements_per_block = buffer->elements_per_block;
+  char name[PARTICLE_BUFFER_NAME_LENGTH];
+  strncpy(name, buffer->name, PARTICLE_BUFFER_NAME_LENGTH);
+  particle_buffer_free(buffer);
+  particle_buffer_init(buffer, element_size, elements_per_block, name);
+}
+
+/**
+ * @brief Allocate a new particle buffer block
+ *
+ * @param buffer The #particle_buffer
+ * @param previous_block The previous final block in the linked list
+ */
+static struct particle_buffer_block *allocate_block(
+    struct particle_buffer *buffer,
+    struct particle_buffer_block *previous_block) {
+
+  const size_t element_size = buffer->element_size;
+  const size_t elements_per_block = buffer->elements_per_block;
+
+  /* Allocate the struct */
+  struct particle_buffer_block *block =
+      malloc(sizeof(struct particle_buffer_block));
+  if (!block)
+    error("Failed to allocate new particle buffer block: %s", buffer->name);
+
+  /* Allocate data buffer */
+  char *data;
+  if (swift_memalign(buffer->name, (void **)&data, SWIFT_STRUCT_ALIGNMENT,
+                     element_size * elements_per_block) != 0) {
+    error("Failed to allocate particle buffer data block: %s", buffer->name);
+  }
+
+  /* Initalise the struct */
+  block->data = data;
+  block->num_elements = 0;
+  block->next = NULL;
+
+  if (previous_block) previous_block->next = block;
+
+  return block;
+}
+
+/**
+ * @brief Append an element to a particle buffer.
+ *
+ * May be called from multiple threads simultaneously.
+ *
+ * @param buffer The #particle_buffer
+ * @param data The element to append
+ *
+ */
+void particle_buffer_append(struct particle_buffer *buffer, void *data) {
+
+  const size_t element_size = buffer->element_size;
+  const size_t elements_per_block = buffer->elements_per_block;
+
+  while (1) {
+
+    /* Find the current block (atomic because last_block may be modified by
+     * other threads) */
+    struct particle_buffer_block *block =
+        __atomic_load_n(&buffer->last_block, __ATOMIC_SEQ_CST);
+
+    /* It may be that no blocks exist yet */
+    if (!block) {
+      lock_lock(&buffer->lock);
+      /* Check no-one else allocated the first block before we got the lock */
+      if (!buffer->last_block) {
+        block = allocate_block(buffer, NULL);
+        buffer->first_block = block;
+        __atomic_thread_fence(__ATOMIC_SEQ_CST);
+        /* After this store other threads will write to the new block,
+           so all initialization must complete before this. */
+        __atomic_store_n(&buffer->last_block, block, __ATOMIC_SEQ_CST);
+      }
+      if (lock_unlock(&buffer->lock) != 0)
+        error("Failed to unlock particle buffer");
+      /* Now try again */
+      continue;
+    }
+
+    /* Find next available index in current block */
+    size_t index =
+        __atomic_fetch_add(&block->num_elements, 1, __ATOMIC_SEQ_CST);
+
+    if (index < elements_per_block) {
+      /* We reserved a valid index, so copy the data */
+      memcpy(block->data + index * element_size, data, element_size);
+      return;
+    } else {
+      /* No space left, so we need to allocate a new block */
+      lock_lock(&buffer->lock);
+      /* Check no-one else already did it before we got the lock */
+      if (!block->next) {
+        /* Allocate and initialize the new block */
+        struct particle_buffer_block *new_block = allocate_block(buffer, block);
+        __atomic_thread_fence(__ATOMIC_SEQ_CST);
+        /* After this store other threads will write to the new block,
+           so all initialization must complete before this. */
+        __atomic_store_n(&buffer->last_block, new_block, __ATOMIC_SEQ_CST);
+      }
+      if (lock_unlock(&buffer->lock) != 0)
+        error("Failed to unlock particle buffer");
+      /* Now we have space, will try again */
+    }
+  }
+}
+
+/**
+ * @brief Iterate over data blocks in particle buffer.
+ *
+ * @param buffer The #particle_buffer
+ * @param block Initially null, returns pointer to next data block
+ * @param num_elements Returns number of elements in this block
+ * @param data Returns pointer to data in this block
+ *
+ */
+void particle_buffer_iterate(struct particle_buffer *buffer,
+                             struct particle_buffer_block **block,
+                             size_t *num_elements, void **data) {
+
+  if (!*block) {
+    *block = buffer->first_block;
+  } else {
+    *block = (*block)->next;
+  }
+
+  if (*block) {
+    *data = (*block)->data;
+    *num_elements = (*block)->num_elements;
+    if (*num_elements > buffer->elements_per_block)
+      *num_elements = buffer->elements_per_block;
+  } else {
+    *data = NULL;
+    *num_elements = 0;
+  }
+}
+
+/**
+ * @brief Return number of elements in particle buffer.
+ *
+ * @param buffer The #particle_buffer
+ *
+ */
+size_t particle_buffer_num_elements(struct particle_buffer *buffer) {
+
+  size_t num_elements = 0;
+  struct particle_buffer_block *block = buffer->first_block;
+  while (block) {
+    if (block->num_elements < buffer->elements_per_block) {
+      /* Non-full block, so block->num_elements is correct */
+      num_elements += block->num_elements;
+    } else {
+      /* Full block, so block->num_elements may be out of range */
+      num_elements += buffer->elements_per_block;
+    }
+    block = block->next;
+  }
+  return num_elements;
+}
+
+/**
+ * @brief Return memory used by a particle buffer.
+ *
+ * @param buffer The #particle_buffer
+ *
+ */
+size_t particle_buffer_memory_use(struct particle_buffer *buffer) {
+
+  size_t num_bytes = 0;
+  struct particle_buffer_block *block = buffer->first_block;
+  while (block) {
+    num_bytes += (buffer->elements_per_block * buffer->element_size);
+    block = block->next;
+  }
+  return num_bytes;
+}
diff --git a/src/particle_buffer.h b/src/particle_buffer.h
new file mode 100644
index 0000000000000000000000000000000000000000..08217b5afa8b54d6e454c0ef460fd0286eb09cd9
--- /dev/null
+++ b/src/particle_buffer.h
@@ -0,0 +1,60 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2021 John Helly (j.c.helly@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+#include "lock.h"
+#include "threadpool.h"
+
+#ifndef SWIFT_PARTICLE_BUFFER_H
+#define SWIFT_PARTICLE_BUFFER_H
+
+#define PARTICLE_BUFFER_NAME_LENGTH 100
+
+struct particle_buffer_block {
+  size_t num_elements;
+  char *data;
+  struct particle_buffer_block *next;
+};
+
+struct particle_buffer {
+  size_t element_size;
+  size_t elements_per_block;
+  struct particle_buffer_block *first_block;
+  struct particle_buffer_block *last_block;
+  swift_lock_type lock;
+  char name[PARTICLE_BUFFER_NAME_LENGTH];
+};
+
+void particle_buffer_init(struct particle_buffer *buffer, size_t element_size,
+                          size_t elements_per_block, char *name);
+
+void particle_buffer_free(struct particle_buffer *buffer);
+
+void particle_buffer_empty(struct particle_buffer *buffer);
+
+void particle_buffer_append(struct particle_buffer *buffer, void *data);
+
+void particle_buffer_iterate(struct particle_buffer *buffer,
+                             struct particle_buffer_block **block,
+                             size_t *num_elements, void **data);
+
+size_t particle_buffer_num_elements(struct particle_buffer *buffer);
+
+size_t particle_buffer_memory_use(struct particle_buffer *buffer);
+
+#endif /* SWIFT_PARTICLE_BUFFER_H */
diff --git a/src/runner_drift.c b/src/runner_drift.c
index db1918de8c7012de6780e5e2afa2fd44f1c2dc16..cd5175daed80765d2416c894c6feef879c68df25 100644
--- a/src/runner_drift.c
+++ b/src/runner_drift.c
@@ -42,7 +42,7 @@ void runner_do_drift_part(struct runner *r, struct cell *c, int timer) {
 
   TIMER_TIC;
 
-  cell_drift_part(c, r->e, 0);
+  cell_drift_part(c, r->e, 0, NULL);
 
   if (timer) TIMER_TOC(timer_drift_part);
 }
@@ -58,7 +58,7 @@ void runner_do_drift_gpart(struct runner *r, struct cell *c, int timer) {
 
   TIMER_TIC;
 
-  cell_drift_gpart(c, r->e, 0);
+  cell_drift_gpart(c, r->e, 0, NULL);
 
   if (timer) TIMER_TOC(timer_drift_gpart);
 }
@@ -74,7 +74,7 @@ void runner_do_drift_spart(struct runner *r, struct cell *c, int timer) {
 
   TIMER_TIC;
 
-  cell_drift_spart(c, r->e, 0);
+  cell_drift_spart(c, r->e, 0, NULL);
 
   if (timer) TIMER_TOC(timer_drift_spart);
 }
@@ -90,7 +90,7 @@ void runner_do_drift_bpart(struct runner *r, struct cell *c, int timer) {
 
   TIMER_TIC;
 
-  cell_drift_bpart(c, r->e, 0);
+  cell_drift_bpart(c, r->e, 0, NULL);
 
   if (timer) TIMER_TOC(timer_drift_bpart);
 }
diff --git a/src/swift.h b/src/swift.h
index b112766b5fbc09bd2ef6f0fdf47242e8eecd84f7..9c22bc3fa52b183d3092015f6d5a9ea8927bf41f 100644
--- a/src/swift.h
+++ b/src/swift.h
@@ -42,6 +42,7 @@
 #include "engine.h"
 #include "entropy_floor.h"
 #include "error.h"
+#include "extra_io.h"
 #include "feedback.h"
 #include "feedback_properties.h"
 #include "fof.h"
@@ -52,6 +53,7 @@
 #include "hydro.h"
 #include "hydro_properties.h"
 #include "ic_info.h"
+#include "lightcone/lightcone_array.h"
 #include "line_of_sight.h"
 #include "lock.h"
 #include "map.h"
diff --git a/src/tools.h b/src/tools.h
index 8f7a65b7aa9acd60fe3dc6d37fe2b1589e61b133..f7672ff7be7a54ba9df710572ce5d52e3b279e85 100644
--- a/src/tools.h
+++ b/src/tools.h
@@ -23,6 +23,7 @@
 #define SWIFT_TOOL_H
 
 #include "cell.h"
+#include "error.h"
 #include "gravity_properties.h"
 #include "part.h"
 #include "physical_constants.h"
@@ -66,4 +67,11 @@ char *trim_both(char *s);
 
 void safe_checkdir(const char *dir, int create);
 
+#define check_snprintf(s, n, format, ...)                   \
+  do {                                                      \
+    int _len = snprintf(s, n, format, __VA_ARGS__);         \
+    if ((_len < 0) || (_len >= n))                          \
+      error("truncation of string with format %s", format); \
+  } while (0)
+
 #endif /* SWIFT_TOOL_H */
diff --git a/src/units.c b/src/units.c
index b1592442d62bbead0fd53f16f0ce15be0a77650e..77a9a33061b277184ecde79a31e591e32e7da1f2 100644
--- a/src/units.c
+++ b/src/units.c
@@ -255,6 +255,7 @@ void units_get_base_unit_exponents_array(float baseUnitsExp[5],
 
     case UNIT_CONV_FREQUENCY:
     case UNIT_CONV_SSFR:
+    case UNIT_CONV_PHOTONS_PER_TIME:
       baseUnitsExp[UNIT_TIME] = -1.f;
       break;
 
@@ -450,17 +451,32 @@ void units_get_base_unit_exponents_array(float baseUnitsExp[5],
       baseUnitsExp[UNIT_TIME] = -1.f;
       break;
 
+    case UNIT_CONV_INV_AREA:
+      baseUnitsExp[UNIT_LENGTH] = -2.f;
+      break;
+
     case UNIT_CONV_POWER_DENSITY:
       baseUnitsExp[UNIT_MASS] = 1.f;
       baseUnitsExp[UNIT_LENGTH] = -1.f;
       baseUnitsExp[UNIT_TIME] = -3.f;
       break;
 
+    case UNIT_CONV_GASOLINE_DIFF_RATE:
     case UNIT_CONV_THERMAL_DIFFUSIVITY:
       baseUnitsExp[UNIT_LENGTH] = 2.f;
       baseUnitsExp[UNIT_TIME] = -1.f;
       break;
 
+    case UNIT_CONV_NUMBER_DENSITY_PER_TIME:
+      baseUnitsExp[UNIT_LENGTH] = -3.f;
+      baseUnitsExp[UNIT_TIME] = -1.f;
+      break;
+
+    case UNIT_CONV_PHOTON_FLUX_PER_UNIT_SURFACE:
+      baseUnitsExp[UNIT_LENGTH] = -2.f;
+      baseUnitsExp[UNIT_TIME] = -1.f;
+      break;
+
     default:
       error("Invalid choice of pre-defined units");
       break;
diff --git a/src/units.h b/src/units.h
index cf3da659ee84ea7f5e849fd474e1feea73087e79..ee2c771e873aac0723921df5758c7d0df43c3105 100644
--- a/src/units.h
+++ b/src/units.h
@@ -109,11 +109,16 @@ enum unit_conversion_factor {
   UNIT_CONV_MASS_PER_UNIT_TIME,
   UNIT_CONV_VELOCITY_SQUARED,
   UNIT_CONV_INV_TIME,
+  UNIT_CONV_INV_AREA,
   UNIT_CONV_RADIATION_FLUX,
   UNIT_CONV_ENERGY_FLUX_PER_UNIT_SURFACE,
   UNIT_CONV_ENERGY_FLUX_DENSITY,
   UNIT_CONV_POWER_DENSITY,
+  UNIT_CONV_GASOLINE_DIFF_RATE,
   UNIT_CONV_THERMAL_DIFFUSIVITY,
+  UNIT_CONV_NUMBER_DENSITY_PER_TIME,
+  UNIT_CONV_PHOTONS_PER_TIME,
+  UNIT_CONV_PHOTON_FLUX_PER_UNIT_SURFACE,
 };
 
 void units_init_cgs(struct unit_system*);
diff --git a/tests/Makefile.am b/tests/Makefile.am
index af64ee60df65d72dfe365e46a5d9ac78a900b3c6..1457316480b9be5fb4da5894e59e115c9150dec3 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -15,9 +15,9 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
-AM_CFLAGS = -I$(top_srcdir)/src $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(OPENMP_CFLAGS)
+AM_CFLAGS = -I$(top_srcdir)/src $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(OPENMP_CFLAGS) $(CHEALPIX_CFLAGS)
 
-AM_LDFLAGS = ../src/.libs/libswiftsim.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
+AM_LDFLAGS = ../src/.libs/libswiftsim.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS) $(CHEALPIX_LIBS)
 
 if HAVECSDS
 AM_LDFLAGS += ../csds/src/.libs/libcsds_writer.a
diff --git a/tests/test125cells.c b/tests/test125cells.c
index 2f3a647150826673aed05047a7b8053f5ef391bd..03c50577cc7671c592c97fa592131441088b5ffb 100644
--- a/tests/test125cells.c
+++ b/tests/test125cells.c
@@ -610,6 +610,10 @@ int main(int argc, char *argv[]) {
   struct runner runner;
   runner.e = &engine;
 
+  struct lightcone_array_props lightcone_array_properties;
+  lightcone_array_properties.nr_lightcones = 0;
+  engine.lightcone_array_properties = &lightcone_array_properties;
+
   /* Construct some cells */
   struct cell *cells[125];
   struct cell *inner_cells[27];
diff --git a/tests/test27cells.c b/tests/test27cells.c
index 5b0f3e8aa6b233f0bdef57fc640f7dfb995f5576..7b673ab705fbae11e5df959a4cb2cf6d4b781d72 100644
--- a/tests/test27cells.c
+++ b/tests/test27cells.c
@@ -494,6 +494,10 @@ int main(int argc, char *argv[]) {
   struct runner runner;
   runner.e = &engine;
 
+  struct lightcone_array_props lightcone_array_properties;
+  lightcone_array_properties.nr_lightcones = 0;
+  engine.lightcone_array_properties = &lightcone_array_properties;
+
   /* Construct some cells */
   struct cell *cells[27];
   struct cell *main_cell;
diff --git a/tests/test27cellsStars.c b/tests/test27cellsStars.c
index d926d67c32d6062f9fa7e117241cfc0b957524e1..75b33fc44bdaf41685bcfd336c2f39c9cbc1f04e 100644
--- a/tests/test27cellsStars.c
+++ b/tests/test27cellsStars.c
@@ -409,6 +409,10 @@ int main(int argc, char *argv[]) {
   struct runner runner;
   runner.e = &engine;
 
+  struct lightcone_array_props lightcone_array_properties;
+  lightcone_array_properties.nr_lightcones = 0;
+  engine.lightcone_array_properties = &lightcone_array_properties;
+
   /* Construct some cells */
   struct cell *cells[27];
   struct cell *main_cell;
diff --git a/tests/testPeriodicBC.c b/tests/testPeriodicBC.c
index ff90018153f6bb5c0f293a9f021553811019e677..af89b6288e89609ec217f1e42814599bf57e1e36 100644
--- a/tests/testPeriodicBC.c
+++ b/tests/testPeriodicBC.c
@@ -501,6 +501,10 @@ int main(int argc, char *argv[]) {
   cosmology_init_no_cosmo(&cosmo);
   engine.cosmology = &cosmo;
 
+  struct lightcone_array_props lightcone_array_properties;
+  lightcone_array_properties.nr_lightcones = 0;
+  engine.lightcone_array_properties = &lightcone_array_properties;
+
   /* Construct some cells */
   struct cell *cells[dim * dim * dim];
   static long long partId = 0;
diff --git a/tools/timed_functions.py b/tools/timed_functions.py
index d2976c65e487106aea77420f3f4aad20767d7014..ad1cc33e892159aa8cf2d9f58f37912f0984d2f6 100644
--- a/tools/timed_functions.py
+++ b/tools/timed_functions.py
@@ -72,4 +72,6 @@ labels = [
     ["csds_log_all_particles:", 0],
     ["csds_ensure_size:", 0],
     ["csds_init:", 0],
+    ["Applying lightcone map updates", 0],
+    ["Flushing particle buffers", 0],
 ]