diff --git a/.gitignore b/.gitignore index 1fd274009591c7ac98d71e05d536c98d4c17c485..9772f54a40e1663bf5441cb5fc8cddf03d22f127 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ examples/fof_mpi examples/*/*/*.xmf examples/*/*/*.dat examples/*/*/*.png +examples/*/*/*.pdf examples/*/*/*.mp4 examples/*/*/*.txt examples/*/*/*.rst diff --git a/README b/README index 5490ef8426c62529e6af81144e0701a409c8d4b7..ee7abd5a5709c81ecef1a89c1a651a925ce2f4a9 100644 --- a/README +++ b/README @@ -6,7 +6,7 @@ /____/ |__/|__/___/_/ /_/ SPH With Inter-dependent Fine-grained Tasking - Version : 0.8.3 + Version : 0.8.4 Website: www.swiftsim.com Twitter: @SwiftSimulation diff --git a/README.md b/README.md index b52f8595e2984f95f82f571db07a65631f7c3231..efffc9b4c43ff8f0821c4d7d49721ff7ff5949d0 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Runtime parameters /____/ |__/|__/___/_/ /_/ SPH With Inter-dependent Fine-grained Tasking - Version : 0.8.3 + Version : 0.8.4 Website: www.swiftsim.com Twitter: @SwiftSimulation diff --git a/configure.ac b/configure.ac index 0841f1c0490506a77cc1fd1b5a2d164669a3eb91..3f284a2f6a25fbf8b51aa8f27fd84d11f4e6ff56 100644 --- a/configure.ac +++ b/configure.ac @@ -16,7 +16,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. # Init the project. -AC_INIT([SWIFT],[0.8.3],[https://gitlab.cosma.dur.ac.uk/swift/swiftsim]) +AC_INIT([SWIFT],[0.8.4],[https://gitlab.cosma.dur.ac.uk/swift/swiftsim]) swift_config_flags="$*" # We want to stop when given unrecognised options. No subdirs so this is safe. diff --git a/doc/RTD/source/HydroSchemes/anarchy_sph.rst b/doc/RTD/source/HydroSchemes/anarchy_sph.rst index 8d09280039c25608d3664e1a18d9b5c28d3108b6..267842936bddaf70f36c4aa4f53c27eb8c802108 100644 --- a/doc/RTD/source/HydroSchemes/anarchy_sph.rst +++ b/doc/RTD/source/HydroSchemes/anarchy_sph.rst @@ -11,10 +11,22 @@ includes: + Pressure-Energy SPH + Thermal diffusion following Price (2012) + A simplified version of the 'Inviscid SPH' artificial viscosity - (Cullen & Denhen 2010). + (Cullen & Denhen 2010), with a Balsara switch. More information will be made available in a forthcoming publication. +The simplified version of the 'Inviscid SPH' artificial viscosity calculates +the time differential of the velocity divergence explicitly, using the value +from the previous step. We also use the Balsara switch instead of the improved +neighbour-based limiter from Cullen & Dehnen 2010, to avoid matrix calculations. + +To configure with this scheme, use + +.. code-block:: bash + + ./configure --with-hydro=anarchy-pu --with-kernel=quintic-spline --disable-hand-vec + + The scheme as-implemented in SWIFT is slightly different to the one implemented in the original EAGLE code: @@ -26,8 +38,88 @@ implemented in the original EAGLE code: + Recommended kernel changed from Wendland-C2 (with 100 Ngb) to Quintic Spline (with ~82 Ngb). +The parameters available for this scheme, and their defaults, are: + +.. code-block:: yaml + + SPH: + viscosity_alpha: 0.1 # Initial value for the alpha viscosity + viscosity_length: 0.25 # Viscosity decay length (in terms of sound-crossing time) + # These are enforced each time-step + viscosity_alpha_max: 2.0 # Maximal allowed value for the viscosity alpha + viscosity_alpha_min: 0.0 # Minimal allowed value for the viscosity alpha + + diffusion_alpha: 0.0 # Initial value for the diffusion alpha + diffusion_beta: 0.01 # Timescale to raise the diffusion coefficient over + # (decay is on the sound-crossing time) + # These are enforced each time-step + diffusion_alpha_max: 1.0 + diffusion_alpha_min: 0.0 + + +There is also a compile-time parameter, ``viscosity_beta`` that we set to +3.0. During feedback events, the viscosity is set to the compile-time +``hydro_props_default_viscosity_alpha_feedback_reset = 2.0`` and the +diffusion is set to ``hydro_props_default_diffusion_alpha_feedback_reset = +0.0``. These can be changed in ``src/hydro/AnarchyPU/hydro_parameters.h``. + + +ANARCHY-DU SPH +============== + +This is the new scheme that will be used in EAGLE-XL. This scheme includes: + ++ Durier & Dalla Vecchia (2012) time-step limiter ++ Density-Energy SPH ++ Thermal diffusion following Price (2012) ++ A simplified version of the 'Inviscid SPH' artificial viscosity + (Cullen & Dehnen 2010), with a Balsara switch ++ A diffusion limiter, used to prevent energy leakage out of EAGLE + supernovae (Borrow in prep). + +More information will be made available in a forthcoming publication. + +The simplified version of the 'Inviscid SPH' artificial viscosity calculates +the time differential of the velocity divergence explicitly, using the value +from the previous step. We also use the Balsara switch instead of the improved +neighbour-based limiter from Cullen & Dehnen 2010, to avoid matrix +calculations. + +The diffusion limiter is implemented to ensure that the diffusion is turned +ff in very viscous flows and works as follows: + +.. code-block:: C + + float new_diffusion_alpha = old_diffusion_alpha; + + const float viscous_diffusion_limit = + diffusion_alpha_max * + (1.f - maximum_alpha_visc_over_ngb / viscosity_alpha_max); + + new_diffusion_alpha = min(new_diffusion_alpha, viscous_diffusion_limit); + + +The parameters available for this scheme, and their defaults, are: + +.. code-block:: yaml + + SPH: + viscosity_alpha: 0.1 # Initial value for the alpha viscosity + viscosity_length: 0.25 # Viscosity decay length (in terms of sound-crossing time) + # These are enforced each time-step + viscosity_alpha_max: 2.0 # Maximal allowed value for the viscosity alpha + viscosity_alpha_min: 0.0 # Minimal allowed value for the viscosity alpha + + diffusion_alpha: 0.0 # Initial value for the diffusion alpha + diffusion_beta: 0.25 # Timescale to raise the diffusion coefficient over + # (decay is on the sound-crossing time) + # These are enforced each time-step + diffusion_alpha_max: 1.0 + diffusion_alpha_min: 0.0 -.. code-block:: bash - - ./configure --with-hydro=anarchy-pu --with-kernel=quintic-spline --disable-hand-vec +There is also a compile-time parameter, ``viscosity_beta`` that we set to +3.0. During feedback events, the viscosity is set to the compile-time +``hydro_props_default_viscosity_alpha_feedback_reset = 2.0`` and the +diffusion is set to ``hydro_props_default_diffusion_alpha_feedback_reset = +0.0``. These can be changed in ``src/hydro/AnarchyPU/hydro_parameters.h``. diff --git a/doc/RTD/source/HydroSchemes/gizmo.rst b/doc/RTD/source/HydroSchemes/gizmo.rst index bbfcae04e1abac57b1476e4533bf92e051e6769d..fe8555f9bb7318d197162b2ffd90f90cb8ebc5b4 100644 --- a/doc/RTD/source/HydroSchemes/gizmo.rst +++ b/doc/RTD/source/HydroSchemes/gizmo.rst @@ -25,3 +25,6 @@ this at compile-time with the following configuration flags: .. code-block:: bash ./configure --with-hydro="gizmo-mfm" --with-riemann-solver="hllc" + + +These schemes should be treated as experimental and not used for production runs. \ No newline at end of file diff --git a/doc/RTD/source/HydroSchemes/hopkins_sph.rst b/doc/RTD/source/HydroSchemes/hopkins_sph.rst index e4f1479230df96eabaa1fe16994960059858613b..53ae687d549651f897b7b23b629d02c0b0ecd4a5 100644 --- a/doc/RTD/source/HydroSchemes/hopkins_sph.rst +++ b/doc/RTD/source/HydroSchemes/hopkins_sph.rst @@ -9,8 +9,8 @@ Pressure-Entropy SPH :hidden: :caption: Contents: -A pressure-entropy SPH scheme is available in SWIFT, inspired by Hopkins 2013. -This includes a Monaghan AV scheme and a Balsara switch. +A Pressure-Entropy SPH scheme is available in SWIFT, inspired by Hopkins 2013. +This includes a fixed Monaghan AV scheme and a Balsara switch. .. code-block:: bash @@ -18,6 +18,15 @@ This includes a Monaghan AV scheme and a Balsara switch. ./configure --with-hydro="pressure-entropy" +The parameters available for this scheme, and their defaults, are: + +.. code-block:: yaml + + SPH: + viscosity_alpha: 0.8 # Fixed value for the alpha viscosity + + + Pressure-Energy SPH =================== @@ -29,8 +38,38 @@ scheme it includes a Monaghan AV scheme and a Balsara switch. ./configure --with-hydro="pressure-energy" -Both of the above schemes use a very simple, fixed artificial viscosity, only -the ``SPH:viscosity_alpha`` parameter has any effect for this scheme. This will -change the strength of the artificial viscosity throughout the simulation, and -has a default of 0.8. +The parameters available for this scheme, and their defaults, are: + +.. code-block:: yaml + + SPH: + viscosity_alpha: 0.8 # Fixed value for the alpha viscosity + + +There is a variant of this implementation that includes a Morris & Monaghan +(1997) variable artificial viscosity that aims to reduce disappation away +from strong shocks. This implementation also includes a Balsara switch. +To use this scheme, you should use: + +.. code-block:: bash + + ./configure --with-hydro="pressure-energy-monaghan" + + +The parameters available for this scheme, and their defaults, are: + +.. code-block:: yaml + + SPH: + viscosity_alpha: 0.8 # Initial value for the alpha viscosity + viscosity_length: 0.25 # Viscosity decay length (in terms of sound-crossing time) + # These are enforced each time-step + viscosity_alpha_max: 2.0 # Maximal allowed value for the viscosity alpha + viscosity_alpha_min: 0.1 # Minimal allowed value for the viscosity alpha + + +There is also a compile-time parameter, ``viscosity_beta`` that we set to +3.0. During feedback events, the viscosity is set to the compile-time +``hydro_props_default_viscosity_alpha_feedback_reset = 2.0``. These can be +changed in ``src/hydro/AnarchyPU/hydro_parameters.h``. diff --git a/doc/RTD/source/Snapshots/index.rst b/doc/RTD/source/Snapshots/index.rst index 30cdc0e1281ae0420b44d88001992ccbbe588136..32317aa19b8a216659317d4ae457a2dbfd040571 100644 --- a/doc/RTD/source/Snapshots/index.rst +++ b/doc/RTD/source/Snapshots/index.rst @@ -122,6 +122,72 @@ the OWLS and EAGLE extensions). The last column in the table gives the ``enum`` value from ``part_type.h`` corresponding to a given entry in the files. +Unit information for individual fields +-------------------------------------- + +Each particle field contains meta-data about the units and how to +convert it to CGS in physical or co-moving frames. The meta-data is in +part designed for users to directly read and in part for machine +reading of the information. Each field contains the exponent of the +scale-factor, reduced Hubble constant [#f1]_ and each of the 5 base units +that is required to convert the field values to physical CGS +units. These fields are: + ++----------------------+---------------------------------------+ +| Meta-data field name | Description | ++======================+=======================================+ +| ``U_L exponent`` | Power of the length unit | ++----------------------+---------------------------------------+ +| ``U_M exponent`` | Power of the mass unit | ++----------------------+---------------------------------------+ +| ``U_t exponent`` | Power of the time unit | ++----------------------+---------------------------------------+ +| ``U_I exponent`` | Power of the current unit | ++----------------------+---------------------------------------+ +| ``U_T exponent`` | Power of the temperature unit | ++----------------------+---------------------------------------+ +| ``a-scale exponent`` | Power of the scale-factor | ++----------------------+---------------------------------------+ +| ``h-scale exponent`` | Power of the reduced Hubble constant | ++----------------------+---------------------------------------+ + +These are used by the ``swiftsimio`` python library to read units and +we encourage users to use this meta-data directly in their automated +tools. + +As an example, the fluid densities (which are written in the co-moving +frame) have the following conversion factors: + + * ``U_L exponent``: -3 + * ``U_M exponent``: 1 + * ``a-scale exponent``: -3 + +This condensed information is stored in the string ``Expression for +physical CGS units``, which in the case of the densities would read +``a^-3 U_M U_L^-3 [ g cm^-3 ]``. The values of the ``U_x`` can be +found in the ``Units System`` group at the root of the snapshot (see +above). Note that only unit factors with non-zero exponents are +printed to this string. + +Additionally, the meta-data contains the numerical conversion factor +from the field to co-moving CGS and physical CGS assuming the units in +the ``Unit System`` group. These are: + + * ``Conversion factor to CGS (not including cosmological corrections`` + * ``Conversion factor to phyical CGS (including cosmological corrections)`` + +These are designed for the users to directly use if they don't want to +compute the individual exponents themselves. As an example, in the +case of the densities and assuming the usual system of units +(:math:`10^{10} \rm{M}_\odot`, :math:`100 \rm{km/s}`, :math:`\rm{Mpc}`) at redshift +0.1, the conversion factors are: + + * Conversion to CGS: :math:`6.76814403 \times 10^{-31}` + * Conversion to physical CGS: :math:`9.00808555 \times 10^{-31}` + +In the case of a non-cosmological simulation, these two expressions +are identical since :math:`a=1`. + Quick access to particles via hash-tables ----------------------------------------- @@ -197,3 +263,13 @@ position `[1, 1, 1]` one could use a piece of code similar to: For large simulations, this vastly reduces the amount of data that needs to be read from the disk. + +Note that this is all automated in the ``swiftsimio`` python library +and we highly encourage its use. + +.. [#f1] Note that all quantities in SWIFT are always "h-free" in the + sense that they are expressed in units withouy any h + terms. This implies that the ``h-scale exponent`` field value + is always 0. SWIFT nevertheless includes this field to be + comprehensive and to prevent confusion with other software + packages that express their quantities with h-full units. diff --git a/doc/RTD/source/conf.py b/doc/RTD/source/conf.py index 8b9df41fc476cf719f2f50b79b26360c144d6f4e..8cda2a011e2e12d8d1184224ff77c1739e8aacf8 100644 --- a/doc/RTD/source/conf.py +++ b/doc/RTD/source/conf.py @@ -25,7 +25,7 @@ author = 'SWIFT Team' # The short X.Y version version = '0.8' # The full version, including alpha/beta/rc tags -release = '0.8.3' +release = '0.8.4 # -- General configuration --------------------------------------------------- diff --git a/examples/Cooling/FeedbackEvent_3D/.gitignore b/examples/Cooling/FeedbackEvent_3D/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..3a8485fdcf1c2638f90686598a442bfef6f50f64 --- /dev/null +++ b/examples/Cooling/FeedbackEvent_3D/.gitignore @@ -0,0 +1,2 @@ +nocool_*/* +default_*/* diff --git a/examples/Cooling/FeedbackEvent_3D/README.md b/examples/Cooling/FeedbackEvent_3D/README.md index 236d03484c0a0afe846b464aef64573788fc639e..565990dd302ccb334c67ffa234294c031b8e6d9f 100644 --- a/examples/Cooling/FeedbackEvent_3D/README.md +++ b/examples/Cooling/FeedbackEvent_3D/README.md @@ -16,4 +16,12 @@ This test emulates what the EAGLE model does to particles for feedback, i.e. + Heats a single particle to 10^7.5 K + Does _not_ switch off cooling -+ Runs to completion. \ No newline at end of file ++ Runs to completion. + + +Running Multiple Tests +---------------------- + +If you would like to run a suite of tests, try the runs.sh script. You'll +need to set the directories in the parameter file to be one higher, i.e. +../coolingtables rather than ./coolingtables. diff --git a/examples/Cooling/FeedbackEvent_3D/feedback.yml b/examples/Cooling/FeedbackEvent_3D/feedback.yml index da624834dcd3347593f5d48c849a8c434cd70392..5e4ddfc618ca64e6633772f155abbdc021cedc0e 100644 --- a/examples/Cooling/FeedbackEvent_3D/feedback.yml +++ b/examples/Cooling/FeedbackEvent_3D/feedback.yml @@ -9,20 +9,20 @@ InternalUnitSystem: # Parameters governing the time integration TimeIntegration: time_begin: 0. # The starting time of the simulation (in internal units). - time_end: 1e-3 # The end time of the simulation (in internal units). + time_end: 1e-4 # The end time of the simulation (in internal units). dt_min: 1e-9 # The minimal time-step size of the simulation (in internal units). - dt_max: 1e-3 # The maximal time-step size of the simulation (in internal units). + dt_max: 1e-4 # The maximal time-step size of the simulation (in internal units). # Parameters governing the snapshots Snapshots: basename: feedback # Common part of the name of output files time_first: 0. # Time of the first output (in internal units) - delta_time: 1e-4 # Time difference between consecutive outputs (in internal units) + delta_time: 1e-5 # Time difference between consecutive outputs (in internal units) compression: 1 # Parameters governing the conserved quantities statistics Statistics: - delta_time: 1e-3 # Time between statistics output + delta_time: 1e-6 # Time between statistics output # Parameters for the hydrodynamics scheme SPH: @@ -75,4 +75,16 @@ EAGLECooling: H_reion_eV_p_H: 2.0 He_reion_z_centre: 3.5 He_reion_z_sigma: 0.5 - He_reion_eV_p_H: 2.0 \ No newline at end of file + He_reion_eV_p_H: 2.0 + +# Parameters for the EAGLE "equation of state" +EAGLEEntropyFloor: + Jeans_density_threshold_H_p_cm3: 0.1 # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3. + Jeans_over_density_threshold: 10. # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in. + Jeans_temperature_norm_K: 8000 # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin. + Jeans_gamma_effective: 1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor + Cool_density_threshold_H_p_cm3: 1e-5 # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3. + Cool_over_density_threshold: 10. # Overdensity above which the EAGLE Cool limiter entropy floor can kick in. + Cool_temperature_norm_K: 8000 # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin. + Cool_gamma_effective: 1. # Slope the of the EAGLE Cool limiter entropy floor + diff --git a/examples/Cooling/FeedbackEvent_3D/makeIC.py b/examples/Cooling/FeedbackEvent_3D/makeIC.py index 0002dc459730c53500ab75e88d8765701e4937a2..24052cee995675742808df6cc16ae4b523389351 100644 --- a/examples/Cooling/FeedbackEvent_3D/makeIC.py +++ b/examples/Cooling/FeedbackEvent_3D/makeIC.py @@ -30,43 +30,44 @@ import numpy as np # Parameters gamma = 5.0 / 3.0 initial_density = 0.1 * mh / (cm ** 3) -initial_temperature = 1e4 * K +initial_temperature = 2550 * (5/4) * K # Equilibrium temperature at solar abundance inject_temperature = 10 ** (7.5) * K mu = 0.5 particle_mass = 1e6 * msun -unit_system = cosmo_units -file_name = "feedback.hdf5" +if __name__ == "__main__": + unit_system = cosmo_units + file_name = "feedback.hdf5" -# Read in glass file -with h5py.File("glassCube_32.hdf5", "r") as handle: - positions = handle["/PartType0/Coordinates"][:] - h = handle["PartType0/SmoothingLength"][:] * 0.3 + # Read in glass file + with h5py.File("glassCube_32.hdf5", "r") as handle: + positions = handle["/PartType0/Coordinates"][:] + h = handle["PartType0/SmoothingLength"][:] * 0.3 -number_of_particles = len(h) -side_length = (number_of_particles * particle_mass / initial_density) ** (1 / 3) -side_length.convert_to_base(unit_system) + number_of_particles = len(h) + side_length = (number_of_particles * particle_mass / initial_density) ** (1 / 3) + side_length.convert_to_base(unit_system) -print(f"Your box has a side length of {side_length}") + print(f"Your box has a side length of {side_length}") -# Find the central particle -central_particle = np.sum((positions - 0.5) ** 2, axis=1).argmin() + # Find the central particle + central_particle = np.sum((positions - 0.5) ** 2, axis=1).argmin() -# Inject the feedback into that central particle -background_internal_energy = ( - (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * initial_temperature -) -heated_internal_energy = (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * inject_temperature -internal_energy = np.ones_like(h) * background_internal_energy -internal_energy[central_particle] = heated_internal_energy + # Inject the feedback into that central particle + background_internal_energy = ( + (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * initial_temperature + ) + heated_internal_energy = (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * inject_temperature + internal_energy = np.ones_like(h) * background_internal_energy + internal_energy[central_particle] = heated_internal_energy -# Now we have all the information we need to set up the initial conditions! -output = Writer(unit_system=unit_system, box_size=side_length) + # Now we have all the information we need to set up the initial conditions! + output = Writer(unit_system=unit_system, box_size=side_length) -output.gas.coordinates = positions * side_length -output.gas.velocities = np.zeros_like(positions) * cm / s -output.gas.smoothing_length = h * side_length -output.gas.internal_energy = internal_energy -output.gas.masses = np.ones_like(h) * particle_mass + output.gas.coordinates = positions * side_length + output.gas.velocities = np.zeros_like(positions) * cm / s + output.gas.smoothing_length = h * side_length + output.gas.internal_energy = internal_energy + output.gas.masses = np.ones_like(h) * particle_mass -output.write(file_name) + output.write(file_name) diff --git a/examples/Cooling/FeedbackEvent_3D/plotEnergy.py b/examples/Cooling/FeedbackEvent_3D/plotEnergy.py new file mode 100644 index 0000000000000000000000000000000000000000..6a199928b34d3d5f6ac163bfec70f9610ccafddf --- /dev/null +++ b/examples/Cooling/FeedbackEvent_3D/plotEnergy.py @@ -0,0 +1,79 @@ +""" +Plots the energy from the energy.txt file for this simulation. +""" + +import matplotlib.pyplot as plt +import numpy as np + +from swiftsimio import load + +from unyt import Gyr, erg, mh, kb + +from makeIC import gamma, initial_density, initial_temperature, inject_temperature, mu, particle_mass + +try: + plt.style.use("mnras_durham") +except: + pass + + +# Snapshot for grabbing the units. +snapshot = load("feedback_0000.hdf5") +units = snapshot.metadata.units +energy_units = units.mass * units.length ** 2 / (units.time ** 2) + +data = np.loadtxt("energy.txt").T + +# Assign correct units to each + +time = data[0] * units.time +mass = data[1] * units.mass +total_energy = data[2] * energy_units +kinetic_energy = data[3] * energy_units +thermal_energy = data[4] * energy_units +radiative_cool = data[8] * energy_units + +# Now we have to figure out how much energy we actually 'injected' +background_internal_energy = ( + (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * initial_temperature +) + +heated_internal_energy = (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * inject_temperature + +injected_energy = (heated_internal_energy - background_internal_energy) * particle_mass + +# Also want to remove the 'background' energy +n_parts = snapshot.metadata.n_gas +total_background_energy = background_internal_energy * n_parts * particle_mass + +# Now we can plot + +fig, ax = plt.subplots() + +ax.plot( + time.to(Gyr), + (kinetic_energy).to(erg), + label="Kinetic" +) + +ax.plot( + time.to(Gyr), + (thermal_energy - total_background_energy).to(erg), + label="Thermal" +) + +ax.plot( + time.to(Gyr), + (radiative_cool ).to(erg), + label="Lost to cooling" +) + +ax.set_xlim(0, 0.05 * Gyr) + +ax.set_xlabel("Time [Gyr]") +ax.set_ylabel("Energy [erg]") + +ax.legend() + +fig.tight_layout() +fig.savefig("Energy.pdf") \ No newline at end of file diff --git a/examples/Cooling/FeedbackEvent_3D/plotEnergyAll.py b/examples/Cooling/FeedbackEvent_3D/plotEnergyAll.py new file mode 100644 index 0000000000000000000000000000000000000000..be1ec94d93a8aad59c481a6a662a1462073baec6 --- /dev/null +++ b/examples/Cooling/FeedbackEvent_3D/plotEnergyAll.py @@ -0,0 +1,111 @@ +""" +Plots the energy from the energy.txt file for this simulation. +""" + +import matplotlib.pyplot as plt +import numpy as np + +from swiftsimio import load + +from unyt import Gyr, erg, mh, kb, Myr +from scipy.interpolate import interp1d + +from makeIC import gamma, initial_density, initial_temperature, inject_temperature, mu, particle_mass + +try: + plt.style.use("mnras_durham") +except: + pass + +time_to_plot = 25 * Myr +diffusion_parameters = [0.1 * x for x in range(11)] +plot_directory_name = "default_diffmax" + +kinetic_energy_at_time = [] +thermal_energy_at_time = [] +radiative_energy_at_time = [] + +for diffusion in diffusion_parameters: + directory_name = f"{plot_directory_name}_{diffusion:1.1f}" + +# Snapshot for grabbing the units. + snapshot = load(f"{directory_name}/feedback_0000.hdf5") + units = snapshot.metadata.units + energy_units = units.mass * units.length ** 2 / (units.time ** 2) + + data = np.loadtxt(f"{directory_name}/energy.txt").T + +# Assign correct units to each + + time = data[0] * units.time + mass = data[1] * units.mass + total_energy = data[2] * energy_units + kinetic_energy = data[3] * energy_units + thermal_energy = data[4] * energy_units + radiative_cool = data[8] * energy_units + +# Now we have to figure out how much energy we actually 'injected' + background_internal_energy = ( + (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * initial_temperature + ) + + heated_internal_energy = (1.0 / (mu * mh)) * (kb / (gamma - 1.0)) * inject_temperature + + injected_energy = (heated_internal_energy - background_internal_energy) * particle_mass + +# Also want to remove the 'background' energy + n_parts = snapshot.metadata.n_gas + total_background_energy = background_internal_energy * n_parts * particle_mass + + kinetic_energy_interpolated = interp1d( + time.to(Myr), + kinetic_energy.to(erg) + ) + + thermal_energy_interpolated = interp1d( + time.to(Myr), + (thermal_energy - total_background_energy).to(erg) + ) + + radiative_cool_interpolated = interp1d( + time.to(Myr), + radiative_cool.to(erg) + ) + + kinetic_energy_at_time.append(kinetic_energy_interpolated(time_to_plot.to(Myr))) + thermal_energy_at_time.append(thermal_energy_interpolated(time_to_plot.to(Myr))) + radiative_energy_at_time.append(radiative_cool_interpolated(time_to_plot.to(Myr))) + + +# Now we can plot + + +fig, ax = plt.subplots() + +ax.plot( + diffusion_parameters, + kinetic_energy_at_time, + label="Kinetic" +) + +ax.plot( + diffusion_parameters, + thermal_energy_at_time, + label="Thermal" +) + +ax.plot( + diffusion_parameters, + radiative_energy_at_time, + label="Lost to cooling" +) + +ax.set_xlim(0, 1.0) + +ax.set_xlabel(r"Diffusion $\alpha_{\rm max}$") +ax.set_ylabel(f"Energy in component at $t=${time_to_plot} [erg]") + +ax.legend() + +fig.tight_layout() +fig.savefig("EnergyFuncDiff.pdf") diff --git a/examples/Cooling/FeedbackEvent_3D/plotSolution.py b/examples/Cooling/FeedbackEvent_3D/plotSolution.py index b9cce959883dbfd62c57b0f25c0b515b24308ab9..fe6f93996dafee2b6e81a70d4786374d86355f6f 100644 --- a/examples/Cooling/FeedbackEvent_3D/plotSolution.py +++ b/examples/Cooling/FeedbackEvent_3D/plotSolution.py @@ -29,6 +29,7 @@ from scipy import stats from unyt import cm, s, km, kpc, Pa, msun, K, keV, mh kPa = 1000 * Pa +plot_radius = 7 * kpc from swiftsimio import load @@ -155,7 +156,7 @@ log = dict( v_r=False, v_phi=False, u=False, S=False, P=False, rho=False, visc=False, diff=False ) ylim = dict( - v_r=[-8, 5], u=[3500, 5500], rho=[0.02, 0.15], visc=[0, 2.0], diff=[0, 0.25], + v_r=[-4, 25], u=[4750, 6000], rho=[0.09, 0.16], visc=[0, 2.0], diff=[0, 0.25], P=[3e-18, 10e-18], S=[-0.5e60, 4e60] ) @@ -198,7 +199,7 @@ for key, label in plot.items(): axis.set_xlabel("Radius ($r$) [kpc]", labelpad=0) axis.set_ylabel(label, labelpad=0) - axis.set_xlim(0.0, 0.7 * boxSize.to(kpc).value) + axis.set_xlim(0.0, plot_radius.to(kpc)) try: axis.set_ylim(*ylim[key]) diff --git a/examples/Cooling/FeedbackEvent_3D/run.sh b/examples/Cooling/FeedbackEvent_3D/run.sh index b91c515266eb5fcc1ed86c21651050613188fe9a..9111b8e8cf2cb9b0b333a1b97c04dac3975fac5c 100755 --- a/examples/Cooling/FeedbackEvent_3D/run.sh +++ b/examples/Cooling/FeedbackEvent_3D/run.sh @@ -5,3 +5,4 @@ # Plot the solution python plotSolution.py 5 +python plotEnergy.py diff --git a/examples/Cooling/FeedbackEvent_3D/runs.sh b/examples/Cooling/FeedbackEvent_3D/runs.sh new file mode 100644 index 0000000000000000000000000000000000000000..5e3853f1e196a2bedfcb32579209fd2f614cfadc --- /dev/null +++ b/examples/Cooling/FeedbackEvent_3D/runs.sh @@ -0,0 +1,31 @@ +#!/bin/bash -l + +#SBATCH -J SWIFTDiffusionCalibration +#SBATCH -N 1 +#SBATCH -o swift_diffusion.out +#SBATCH -e swift_diffusion.err +#SBATCH -p cosma +#SBATCH -A durham +#SBATCH --exclusive + +#SBATCH -t 1:00:00 + + +for diffusion_alpha_max in 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0; +do + mkdir default_diffmax_$diffusion_alpha_max + + cd default_diffmax_$diffusion_alpha_max + + ../../../swift --hydro --cooling --limiter --threads=16 --param="SPH:diffusion_alpha_max:${diffusion_alpha_max}" ../feedback.yml 2>&1 | tee output.log + + cd .. + + mkdir nocool_diffmax_$diffusion_alpha_max + + cd nocool_diffmax_$diffusion_alpha_max + + ../../../swift --hydro --temperature --limiter --threads=16 --param="SPH:diffusion_alpha_max:${diffusion_alpha_max}" ../feedback.yml 2>&1 | tee output.log + + cd .. +done diff --git a/examples/EAGLE_ICs/EAGLE_12/run.sh b/examples/EAGLE_ICs/EAGLE_12/run.sh index 12c962c29ddd21290624906dfbcca166e171203b..86fe2b1db0a7725b617656d3b52f0dce89435ea3 100755 --- a/examples/EAGLE_ICs/EAGLE_12/run.sh +++ b/examples/EAGLE_ICs/EAGLE_12/run.sh @@ -7,5 +7,5 @@ then ./getIC.sh fi -../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_12.yml 2>&1 | tee output.log +../../swift --cosmology --hydro --self-gravity --stars --black-holes --cooling --star-formation --feedback --fof --threads=16 eagle_12.yml 2>&1 | tee output.log diff --git a/examples/EAGLE_ICs/EAGLE_25/run.sh b/examples/EAGLE_ICs/EAGLE_25/run.sh index 8b317183d850d5048253fedb8db7e24edd9c02da..11e93971767690c84921396842924853fa69ab23 100755 --- a/examples/EAGLE_ICs/EAGLE_25/run.sh +++ b/examples/EAGLE_ICs/EAGLE_25/run.sh @@ -7,5 +7,5 @@ then ./getIC.sh fi -../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_25.yml 2>&1 | tee output.log +../../swift --cosmology --hydro --self-gravity --stars --black-holes --cooling --star-formation --feedback --fof --threads=16 eagle_25.yml 2>&1 | tee output.log diff --git a/examples/EAGLE_ICs/EAGLE_50/run.sh b/examples/EAGLE_ICs/EAGLE_50/run.sh index 3ef38da33fe1cc820e3ee5e7afb620e7e9109196..492ac35ebe1af20392aff879aef078c0f9bd264f 100755 --- a/examples/EAGLE_ICs/EAGLE_50/run.sh +++ b/examples/EAGLE_ICs/EAGLE_50/run.sh @@ -7,5 +7,5 @@ then ./getIC.sh fi -../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_50.yml 2>&1 | tee output.log +../../swift --cosmology --hydro --self-gravity --stars --black-holes --cooling --star-formation --feedback --fof --threads=16 eagle_50.yml 2>&1 | tee output.log diff --git a/examples/HydroTests/BlobTest_3D/makeSliceMovie.py b/examples/HydroTests/BlobTest_3D/makeSliceMovie.py new file mode 100644 index 0000000000000000000000000000000000000000..726c4ac01b9ae5b046a8bb3b6c1b9206764907bb --- /dev/null +++ b/examples/HydroTests/BlobTest_3D/makeSliceMovie.py @@ -0,0 +1,190 @@ +""" +Makes a movie of the output of the blob test. + +Josh Borrow (joshua.borrow@durham.ac.uk) 2019 + +LGPLv3 +""" + +from swiftsimio import load +from swiftsimio.visualisation import slice + +from p_tqdm import p_map + +import matplotlib.pyplot as plt +import numpy as np + +from matplotlib.colors import LogNorm +from matplotlib.animation import FuncAnimation + +info_frames = 15 +start_frame = 0 +end_frame = 101 +resolution = 1024 +snapshot_name = "blob" +cmap = "Spectral_r" +text_args = dict(color="black") +# plot = "pressure" +# name = "Pressure $P$" +plot = "density" +name = "Fluid Density $\\rho$" + + +def get_image(n): + """ + Gets the image for snapshot n, and also returns the associated + SWIFT metadata object. + """ + filename = f"{snapshot_name}_{n:04d}.hdf5" + + data = load(filename) + boxsize = data.metadata.boxsize[0].value + + output = np.zeros((resolution, resolution * 4), dtype=float) + + x, y, z = data.gas.coordinates.value.T + + # This is an oblong box but we can only make squares! + for box, box_edges in enumerate([[0.0, 1.1], [0.9, 2.1], [1.9, 3.1], [2.9, 4.0]]): + mask = np.logical_and(x >= box_edges[0], x <= box_edges[1]) + masked_x = x[mask] - np.float64(box) + masked_y = y[mask] + masked_z = z[mask] + + hsml = data.gas.smoothing_length.value[mask] + + if plot == "density": + mass = data.gas.masses.value[mask] + image = slice( + x=masked_y, + y=masked_x, + z=masked_z, + m=mass, + h=hsml, + z_slice=0.5, + res=resolution, + ) + else: + quantity = getattr(data.gas, plot).value[mask] + # Need to divide out the particle density for non-projected density quantities + image = scatter( + x=masked_y, + y=masked_x, + z=masked_z, + m=quantity, + h=hsml, + z_slice=0.5, + res=resolution, + ) / scatter( + x=masked_y, + y=masked_x, + z=masked_z, + m=np.ones_like(quantity), + h=hsml, + z_slice=0.5, + res=resolution, + ) + + output[:, box * resolution : (box + 1) * resolution] = image + + return output, data.metadata + + +def get_data_dump(metadata): + """ + Gets a big data dump from the SWIFT metadata + """ + + try: + viscosity = metadata.viscosity_info + except: + viscosity = "No info" + + try: + diffusion = metadata.diffusion_info + except: + diffusion = "No info" + + output = ( + "$\\bf{Blob}$ $\\bf{Test}$\n\n" + "$\\bf{SWIFT}$\n" + + metadata.code_info + + "\n\n" + + "$\\bf{Compiler}$\n" + + metadata.compiler_info + + "\n\n" + + "$\\bf{Hydrodynamics}$\n" + + metadata.hydro_info + + "\n\n" + + "$\\bf{Viscosity}$\n" + + viscosity + + "\n\n" + + "$\\bf{Diffusion}$\n" + + diffusion + ) + + return output + + +def time_formatter(metadata): + return f"$t = {metadata.t:2.2f}$" + + +# Generate the frames and unpack our variables +images, metadata = zip(*p_map(get_image, list(range(start_frame, end_frame)))) + +# The edges are funny because of the non-periodicity. +central_region = images[0][ + resolution // 10 : resolution - resolution // 10, + resolution // 10 : resolution - resolution // 10, +] +norm = LogNorm(vmin=np.min(central_region), vmax=np.max(central_region), clip="black") + +fig, ax = plt.subplots(figsize=(8 * 4, 8), dpi=resolution // 8) + +fig.subplots_adjust(0, 0, 1, 1) +ax.axis("off") + +# Set up the initial state +image = ax.imshow(np.zeros_like(images[0]), norm=norm, cmap=cmap, origin="lower") + +description_text = ax.text( + 0.5, + 0.5, + get_data_dump(metadata[0]), + va="center", + ha="center", + **text_args, + transform=ax.transAxes, +) + +time_text = ax.text( + (1 - 0.025 * 0.25), + 0.975, + time_formatter(metadata[0]), + **text_args, + va="top", + ha="right", + transform=ax.transAxes, +) + +info_text = ax.text( + 0.025 * 0.25, 0.975, name, **text_args, va="top", ha="left", transform=ax.transAxes +) + + +def animate(n): + # Display just our original frames at t < 0 + if n >= 0: + image.set_array(images[n]) + description_text.set_text("") + time_text.set_text(time_formatter(metadata[n])) + + return (image,) + + +animation = FuncAnimation( + fig, animate, range(start_frame - info_frames, end_frame), interval=40 +) + +animation.save("blob_slice.mp4") diff --git a/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py b/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py index a52784891ab4689dcd59dc27945e573e602785f3..e40ba44dedb6e43dd25f0ce7e0b5681e61048888 100644 --- a/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py +++ b/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py @@ -71,7 +71,7 @@ if __name__ == "__main__": import matplotlib.pyplot as plt - filename = "kelvinhelmholtz" + filename = "kelvinHelmholtz" dpi = 512 @@ -93,7 +93,7 @@ if __name__ == "__main__": fig, ax = plt.subplots(1, 1, figsize=(1, 1), frameon=False) ax.axis("off") # Remove annoying black frame. - data_x, data_y, density = load_and_extract("kelvinhelmholtz_0000.hdf5") + data_x, data_y, density = load_and_extract("kelvinHelmholtz_0000.hdf5") x = np.linspace(0, 1, dpi) y = np.linspace(0, 1, dpi) diff --git a/examples/HydroTests/KelvinHelmholtz_2D/makeMovieSwiftsimIO.py b/examples/HydroTests/KelvinHelmholtz_2D/makeMovieSwiftsimIO.py new file mode 100644 index 0000000000000000000000000000000000000000..a86445e0a369bb3697793be72a3053d20f597e45 --- /dev/null +++ b/examples/HydroTests/KelvinHelmholtz_2D/makeMovieSwiftsimIO.py @@ -0,0 +1,102 @@ +""" +Makes a movie of the KH 2D data. + +You will need to run your movie with far higher time-resolution than usual to +get a nice movie; around 450 snapshots over 6s is required. + +Edit this file near the bottom with the number of snaps you have. + +Written by Josh Borrow (joshua.borrow@durham.ac.uk) +""" + +import os +import h5py as h5 +import numpy as np +import scipy.interpolate as si + +from swiftsimio import load +from swiftsimio.visualisation import project_gas_pixel_grid + +def load_and_extract(filename): + """ + Load the data and extract relevant info. + """ + + return load(filename) + + +def make_plot(filename, array, nx, ny, dx, dy): + """ + Load the data and plop it on the grid using nearest + neighbour searching for finding the 'correct' value of + the density. + """ + + data = load_and_extract(filename) + + mesh = project_gas_pixel_grid(data, nx) + + array.set_array(mesh) + + return array, + + +def frame(n, *args): + """ + Make a single frame. Requires the global variables plot and dpi. + """ + + global plot, dpi + + fn = "{}_{:04d}.hdf5".format(filename, n) + + return make_plot(fn, plot, dpi, dpi, (0, 1), (0, 1)) + + +if __name__ == "__main__": + import matplotlib + matplotlib.use("Agg") + + from tqdm import tqdm + from matplotlib.animation import FuncAnimation + from scipy.stats import gaussian_kde + + import matplotlib.pyplot as plt + + filename = "kelvinHelmholtz" + dpi = 512 + + + # Look for the number of files in the directory. + i = 0 + while True: + if os.path.isfile("{}_{:04d}.hdf5".format(filename, i)): + i += 1 + else: + break + + if i > 10000: + raise FileNotFoundError( + "Could not find the snapshots in the directory") + + frames = tqdm(np.arange(0, i)) + + # Creation of first frame + fig, ax = plt.subplots(1, 1, figsize=(1, 1), frameon=False) + ax.axis("off") # Remove annoying black frame. + + data = load_and_extract("kelvinHelmholtz_0000.hdf5") + + + mesh = project_gas_pixel_grid(data, dpi) + + # Global variable for set_array + plot = ax.imshow(mesh, extent=[0, 1, 0, 1], animated=True, interpolation="none") + + anim = FuncAnimation(fig, frame, frames, interval=40, blit=False) + + # Remove all whitespace + fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) + + # Actually make the movie + anim.save("khmovie.mp4", dpi=dpi, bitrate=4096) diff --git a/examples/HydroTests/KelvinHelmholtz_2D/run.sh b/examples/HydroTests/KelvinHelmholtz_2D/run.sh index 355bf052a7ad124bcb4d88254ad780a7ffa97aba..ee3cdc4f542e3be1f28ba0deff279c612b9c49b0 100755 --- a/examples/HydroTests/KelvinHelmholtz_2D/run.sh +++ b/examples/HydroTests/KelvinHelmholtz_2D/run.sh @@ -10,6 +10,6 @@ fi # Run SWIFT ../../swift --hydro --threads=4 kelvinHelmholtz.yml 2>&1 | tee output.log + # Plot the solution -python plotSolution.py 6 -python makeMovie.py +python3 makeMovieSwiftsimIO.py diff --git a/examples/HydroTests/SodShock_BCC_3D/plotSolution.py b/examples/HydroTests/SodShock_BCC_3D/plotSolution.py index dc1b15c3eac862365d4422f95a14ffe1713f91a6..365b679991e9a3a5bbb9e9d5108066c04e497c2f 100644 --- a/examples/HydroTests/SodShock_BCC_3D/plotSolution.py +++ b/examples/HydroTests/SodShock_BCC_3D/plotSolution.py @@ -35,7 +35,7 @@ from analyticSolution import analytic snap = int(sys.argv[1]) -sim = load(f"sodshock_{snap:04d}.hdf5") +sim = load(f"sodShock_{snap:04d}.hdf5") # Set up plotting stuff try: diff --git a/examples/HydroTests/SodShock_BCC_3D/sodShock.yml b/examples/HydroTests/SodShock_BCC_3D/sodShock.yml index 373a70bf6b6782d7bdb4ed91417a13270f6521e6..816af9c9ad620ce8617340d749b8ab3e61e53ec6 100644 --- a/examples/HydroTests/SodShock_BCC_3D/sodShock.yml +++ b/examples/HydroTests/SodShock_BCC_3D/sodShock.yml @@ -28,9 +28,6 @@ Statistics: SPH: resolution_eta: 1.2348 # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel). CFL_condition: 0.1 # Courant-Friedrich-Levy condition for time integration. - viscosity_alpha: 1.0 - viscosity_alpha_max: 1.0 - viscosity_alpha_min: 1.0 # Parameters related to the initial conditions InitialConditions: diff --git a/examples/parameter_example.yml b/examples/parameter_example.yml index 996e35e41a3a6090a11ecf30a9decb391865ed63..6aeeaad3e41498b48e215f0a761ecdffb486a118 100644 --- a/examples/parameter_example.yml +++ b/examples/parameter_example.yml @@ -338,6 +338,11 @@ EAGLEChemistry: # Parameters related to star formation models ----------------------------------------------- +# GEAR star formation model (Revaz and Jablonka 2018) +GEARStarFormation: + star_formation_efficiency: 0.01 # star formation efficiency (c_*) + maximal_temperature: 3e4 # Upper limit to the temperature of a star forming particle + # EAGLE star formation model (Schaye and Dalla Vecchia 2008) EAGLEStarFormation: EOS_density_norm_H_p_cm3: 0.1 # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3. diff --git a/src/black_holes/Default/black_holes_io.h b/src/black_holes/Default/black_holes_io.h index 41ca7c1cd0b3542959d8a7372179c7e0cb285285..5288ae595663871bb3187782712d52708fac7931 100644 --- a/src/black_holes/Default/black_holes_io.h +++ b/src/black_holes/Default/black_holes_io.h @@ -22,20 +22,6 @@ #include "black_holes_part.h" #include "io_properties.h" -INLINE static void convert_bpart_pos(const struct engine *e, - const struct bpart *bp, double *ret) { - - if (e->s->periodic) { - ret[0] = box_wrap(bp->x[0], 0.0, e->s->dim[0]); - ret[1] = box_wrap(bp->x[1], 0.0, e->s->dim[1]); - ret[2] = box_wrap(bp->x[2], 0.0, e->s->dim[2]); - } else { - ret[0] = bp->x[0]; - ret[1] = bp->x[1]; - ret[2] = bp->x[2]; - } -} - /** * @brief Specifies which b-particle fields to read from a dataset * @@ -43,17 +29,16 @@ INLINE static void convert_bpart_pos(const struct engine *e, * @param list The list of i/o properties to read. * @param num_fields The number of i/o fields to read. */ -INLINE static void black_holes_read_particles(struct bpart *bparts, - struct io_props *list, - int *num_fields) { +INLINE static void black_holes_read_particles(struct bpart* bparts, + struct io_props* list, + int* num_fields) { /* Say how much we want to read */ *num_fields = 5; /* List what we want to read */ - list[0] = io_make_output_field_convert_bpart( - "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, bparts, convert_bpart_pos); - + list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY, + UNIT_CONV_LENGTH, bparts, x); list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY, UNIT_CONV_SPEED, bparts, v); list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS, @@ -64,6 +49,53 @@ INLINE static void black_holes_read_particles(struct bpart *bparts, UNIT_CONV_LENGTH, bparts, h); } +INLINE static void convert_bpart_pos(const struct engine* e, + const struct bpart* bp, double* ret) { + + if (e->s->periodic) { + ret[0] = box_wrap(bp->x[0], 0.0, e->s->dim[0]); + ret[1] = box_wrap(bp->x[1], 0.0, e->s->dim[1]); + ret[2] = box_wrap(bp->x[2], 0.0, e->s->dim[2]); + } else { + ret[0] = bp->x[0]; + ret[1] = bp->x[1]; + ret[2] = bp->x[2]; + } +} + +INLINE static void convert_bpart_vel(const struct engine* e, + const struct bpart* bp, float* ret) { + + const int with_cosmology = (e->policy & engine_policy_cosmology); + const struct cosmology* cosmo = e->cosmology; + const integertime_t ti_current = e->ti_current; + const double time_base = e->time_base; + + const integertime_t ti_beg = get_integer_time_begin(ti_current, bp->time_bin); + const integertime_t ti_end = get_integer_time_end(ti_current, bp->time_bin); + + /* Get time-step since the last kick */ + float dt_kick_grav; + if (with_cosmology) { + dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current); + dt_kick_grav -= + cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2); + } else { + dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base; + } + + /* Extrapolate the velocites to the current time */ + const struct gpart* gp = bp->gpart; + ret[0] = gp->v_full[0] + gp->a_grav[0] * dt_kick_grav; + ret[1] = gp->v_full[1] + gp->a_grav[1] * dt_kick_grav; + ret[2] = gp->v_full[2] + gp->a_grav[2] * dt_kick_grav; + + /* Conversion from internal units to peculiar velocities */ + ret[0] *= cosmo->a_inv; + ret[1] *= cosmo->a_inv; + ret[2] *= cosmo->a_inv; +} + /** * @brief Specifies which b-particle fields to write to a dataset * @@ -71,24 +103,33 @@ INLINE static void black_holes_read_particles(struct bpart *bparts, * @param list The list of i/o properties to write. * @param num_fields The number of i/o fields to write. */ -INLINE static void black_holes_write_particles(const struct bpart *bparts, - struct io_props *list, - int *num_fields) { +INLINE static void black_holes_write_particles(const struct bpart* bparts, + struct io_props* list, + int* num_fields, + int with_cosmology) { /* Say how much we want to write */ *num_fields = 5; /* List what we want to write */ - list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, - bparts, x); - list[1] = - io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, bparts, v); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, bparts, mass); + list[0] = io_make_output_field_convert_bpart( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, bparts, + convert_bpart_pos, "Co-moving position of the particles"); + + list[1] = io_make_output_field_convert_bpart( + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, bparts, convert_bpart_vel, + "Peculiar velocities of the particles. This is a * dx/dt where x is the " + "co-moving position of the particles."); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, mass, "Masses of the particles"); + list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, - bparts, id); - list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - bparts, h); + 0.f, bparts, id, "Unique ID of the particles"); + + list[4] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, bparts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); #ifdef DEBUG_INTERACTIONS_BLACK_HOLES diff --git a/src/black_holes/Default/black_holes_part.h b/src/black_holes/Default/black_holes_part.h index 5d614aa873f8260b53479088e0fd25a44ce0ce82..1b7d4ac37caf100c48b504a3836166ce33c24169 100644 --- a/src/black_holes/Default/black_holes_part.h +++ b/src/black_holes/Default/black_holes_part.h @@ -21,6 +21,8 @@ #include "chemistry_struct.h" +#include "timeline.h" + /** * @brief Particle fields for the black hole particles. * diff --git a/src/black_holes/EAGLE/black_holes_io.h b/src/black_holes/EAGLE/black_holes_io.h index c02cb59fcce73feefc749ed1d0ba5fa60145419f..ea8b6f70d2847e65128cb23051ee059a1460b52d 100644 --- a/src/black_holes/EAGLE/black_holes_io.h +++ b/src/black_holes/EAGLE/black_holes_io.h @@ -22,20 +22,6 @@ #include "black_holes_part.h" #include "io_properties.h" -INLINE static void convert_bpart_pos(const struct engine *e, - const struct bpart *bp, double *ret) { - - if (e->s->periodic) { - ret[0] = box_wrap(bp->x[0], 0.0, e->s->dim[0]); - ret[1] = box_wrap(bp->x[1], 0.0, e->s->dim[1]); - ret[2] = box_wrap(bp->x[2], 0.0, e->s->dim[2]); - } else { - ret[0] = bp->x[0]; - ret[1] = bp->x[1]; - ret[2] = bp->x[2]; - } -} - /** * @brief Specifies which b-particle fields to read from a dataset * @@ -43,9 +29,9 @@ INLINE static void convert_bpart_pos(const struct engine *e, * @param list The list of i/o properties to read. * @param num_fields The number of i/o fields to read. */ -INLINE static void black_holes_read_particles(struct bpart *bparts, - struct io_props *list, - int *num_fields) { +INLINE static void black_holes_read_particles(struct bpart* bparts, + struct io_props* list, + int* num_fields) { /* Say how much we want to read */ *num_fields = 6; @@ -65,6 +51,53 @@ INLINE static void black_holes_read_particles(struct bpart *bparts, UNIT_CONV_ENERGY, bparts, energy_reservoir); } +INLINE static void convert_bpart_pos(const struct engine* e, + const struct bpart* bp, double* ret) { + + if (e->s->periodic) { + ret[0] = box_wrap(bp->x[0], 0.0, e->s->dim[0]); + ret[1] = box_wrap(bp->x[1], 0.0, e->s->dim[1]); + ret[2] = box_wrap(bp->x[2], 0.0, e->s->dim[2]); + } else { + ret[0] = bp->x[0]; + ret[1] = bp->x[1]; + ret[2] = bp->x[2]; + } +} + +INLINE static void convert_bpart_vel(const struct engine* e, + const struct bpart* bp, float* ret) { + + const int with_cosmology = (e->policy & engine_policy_cosmology); + const struct cosmology* cosmo = e->cosmology; + const integertime_t ti_current = e->ti_current; + const double time_base = e->time_base; + + const integertime_t ti_beg = get_integer_time_begin(ti_current, bp->time_bin); + const integertime_t ti_end = get_integer_time_end(ti_current, bp->time_bin); + + /* Get time-step since the last kick */ + float dt_kick_grav; + if (with_cosmology) { + dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current); + dt_kick_grav -= + cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2); + } else { + dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base; + } + + /* Extrapolate the velocites to the current time */ + const struct gpart* gp = bp->gpart; + ret[0] = gp->v_full[0] + gp->a_grav[0] * dt_kick_grav; + ret[1] = gp->v_full[1] + gp->a_grav[1] * dt_kick_grav; + ret[2] = gp->v_full[2] + gp->a_grav[2] * dt_kick_grav; + + /* Conversion from internal units to peculiar velocities */ + ret[0] *= cosmo->a_inv; + ret[1] *= cosmo->a_inv; + ret[2] *= cosmo->a_inv; +} + /** * @brief Specifies which b-particle fields to write to a dataset * @@ -72,43 +105,77 @@ INLINE static void black_holes_read_particles(struct bpart *bparts, * @param list The list of i/o properties to write. * @param num_fields The number of i/o fields to write. */ -INLINE static void black_holes_write_particles(const struct bpart *bparts, - struct io_props *list, - int *num_fields) { +INLINE static void black_holes_write_particles(const struct bpart* bparts, + struct io_props* list, + int* num_fields, + int with_cosmology) { /* Say how much we want to write */ - *num_fields = 13; + *num_fields = 12; /* List what we want to write */ list[0] = io_make_output_field_convert_bpart( - "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, bparts, convert_bpart_pos); - list[1] = - io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, bparts, v); + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, bparts, + convert_bpart_pos, "Co-moving position of the particles"); + + list[1] = io_make_output_field_convert_bpart( + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, bparts, convert_bpart_vel, + "Peculiar velocities of the particles. This is a * dx/dt where x is the " + "co-moving position of the particles."); + list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, bparts, mass); + io_make_output_field("DynamicalMasses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, mass, "Dynamical masses of the particles"); + list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, - bparts, id); - list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - bparts, h); - list[5] = io_make_output_field("SubgridMasses", FLOAT, 1, UNIT_CONV_MASS, - bparts, subgrid_mass); - list[6] = io_make_output_field("FormationTime", FLOAT, 1, UNIT_CONV_TIME, - bparts, formation_time); - list[7] = io_make_output_field("GasDensity", FLOAT, 1, UNIT_CONV_DENSITY, - bparts, rho_gas); - list[8] = io_make_output_field("GasSoundSpeed", FLOAT, 1, UNIT_CONV_SPEED, - bparts, sound_speed_gas); - list[9] = io_make_output_field("EnergyReservoir", FLOAT, 1, UNIT_CONV_ENERGY, - bparts, energy_reservoir); - list[10] = io_make_output_field("AccretionRate", FLOAT, 1, - UNIT_CONV_MASS_PER_UNIT_TIME, bparts, - accretion_rate); - list[11] = io_make_output_field("TotalAccretedMass", FLOAT, 1, - UNIT_CONV_MASS_PER_UNIT_TIME, bparts, - total_accreted_mass); - list[12] = - io_make_output_field("CumulativeNumberSeeds", INT, 1, UNIT_CONV_NO_UNITS, - bparts, cumulative_number_seeds); + 0.f, bparts, id, "Unique ID of the particles"); + + list[4] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, bparts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[5] = io_make_output_field("SubgridMasses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, subgrid_mass, + "Subgrid masses of the particles"); + + if (with_cosmology) { + list[6] = io_make_output_field( + "FormationScaleFactors", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, bparts, + formation_scale_factor, "Scale-factors at which the BHs were formed"); + } else { + list[6] = io_make_output_field("FormationTimes", FLOAT, 1, UNIT_CONV_TIME, + 0.f, bparts, formation_time, + "Times at which the BHs were formed"); + } + + list[7] = io_make_output_field( + "GasDensities", FLOAT, 1, UNIT_CONV_DENSITY, 0.f, bparts, rho_gas, + "Co-moving densities of the gas around the particles"); + + list[8] = io_make_output_field( + "GasSoundSpeeds", FLOAT, 1, UNIT_CONV_SPEED, 1.5f * hydro_gamma_minus_one, + bparts, sound_speed_gas, + "Co-moving sound-speeds of the gas around the particles"); + + list[9] = io_make_output_field( + "EnergyReservoirs", FLOAT, 1, UNIT_CONV_ENERGY, 0.f, bparts, + energy_reservoir, + "Physcial energy contained in the feedback reservoir of the particles"); + + list[10] = io_make_output_field( + "AccretionRates", FLOAT, 1, UNIT_CONV_MASS_PER_UNIT_TIME, 0.f, bparts, + accretion_rate, + "Physical instantaneous accretion rates of the particles"); + + list[11] = io_make_output_field( + "TotalAccretedMasses", FLOAT, 1, UNIT_CONV_MASS_PER_UNIT_TIME, 0.f, + bparts, total_accreted_mass, + "Total mass accreted onto the particles since its birth"); + + list[12] = io_make_output_field( + "CumulativeNumberSeeds", INT, 1, UNIT_CONV_NO_UNITS, 0.f, bparts, + cumulative_number_seeds, + "Total number of BH seeds that have merged into this black hole"); #ifdef DEBUG_INTERACTIONS_BLACK_HOLES diff --git a/src/black_holes/EAGLE/black_holes_part.h b/src/black_holes/EAGLE/black_holes_part.h index dac48070cbcfaa9b6768d316aca1185c4f773847..cbfa0fcaf560d1c771d8595ec84ad08e9ba3608e 100644 --- a/src/black_holes/EAGLE/black_holes_part.h +++ b/src/black_holes/EAGLE/black_holes_part.h @@ -20,6 +20,7 @@ #define SWIFT_EAGLE_BLACK_HOLE_PART_H #include "chemistry_struct.h" +#include "timeline.h" /** * @brief Particle fields for the black hole particles. diff --git a/src/cell.c b/src/cell.c index d9ddc66b9111c5d45186abf1b140378a7c2fcbb8..0649d31b38279f3a75ec2b439f71de105425ae27 100644 --- a/src/cell.c +++ b/src/cell.c @@ -4379,13 +4379,7 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) { hydro_remove_part(p, xp); /* Remove the particle entirely */ - struct gpart *gp = p->gpart; cell_remove_part(e, c, p, xp); - - /* and it's gravity friend */ - if (gp != NULL) { - cell_remove_gpart(e, c, gp); - } } if (lock_unlock(&e->s->lock) != 0) @@ -4419,7 +4413,7 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) { if (part_is_active(p, e)) { hydro_init_part(p, &e->s->hs); chemistry_init_part(p, e->chemistry); - star_formation_init_part(p, e->star_formation); + star_formation_init_part(p, xp, e->star_formation); tracers_after_init(p, xp, e->internal_units, e->physical_constants, with_cosmology, e->cosmology, e->hydro_properties, e->cooling_func, e->time); @@ -4547,7 +4541,9 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) { if (!gpart_is_inhibited(gp, e)) { /* Remove the particle entirely */ - cell_remove_gpart(e, c, gp); + if (gp->type == swift_type_dark_matter) { + cell_remove_gpart(e, c, gp); + } } if (lock_unlock(&e->s->lock) != 0) @@ -4688,11 +4684,7 @@ void cell_drift_spart(struct cell *c, const struct engine *e, int force) { if (!spart_is_inhibited(sp, e)) { /* Remove the particle entirely */ - struct gpart *gp = sp->gpart; cell_remove_spart(e, c, sp); - - /* and it's gravity friend */ - cell_remove_gpart(e, c, gp); } if (lock_unlock(&e->s->lock) != 0) @@ -4863,11 +4855,7 @@ void cell_drift_bpart(struct cell *c, const struct engine *e, int force) { if (!bpart_is_inhibited(bp, e)) { /* Remove the particle entirely */ - struct gpart *gp = bp->gpart; cell_remove_bpart(e, c, bp); - - /* and it's gravity friend */ - cell_remove_gpart(e, c, gp); } if (lock_unlock(&e->s->lock) != 0) @@ -5327,6 +5315,9 @@ void cell_remove_part(const struct engine *e, struct cell *c, struct part *p, if (c->nodeID != e->nodeID) error("Can't remove a particle in a foreign cell."); + /* Don't remove a particle twice */ + if (p->time_bin == time_bin_inhibited) return; + /* Mark the particle as inhibited */ p->time_bin = time_bin_inhibited; @@ -5337,12 +5328,15 @@ void cell_remove_part(const struct engine *e, struct cell *c, struct part *p, p->gpart->type = swift_type_dark_matter; } - /* Un-link the part */ - p->gpart = NULL; - /* Update the space-wide counters */ const size_t one = 1; atomic_add(&e->s->nr_inhibited_parts, one); + if (p->gpart) { + atomic_add(&e->s->nr_inhibited_gparts, one); + } + + /* Un-link the part */ + p->gpart = NULL; } /** @@ -5357,6 +5351,14 @@ void cell_remove_part(const struct engine *e, struct cell *c, struct part *p, */ void cell_remove_gpart(const struct engine *e, struct cell *c, struct gpart *gp) { + + /* Quick cross-check */ + if (c->nodeID != e->nodeID) + error("Can't remove a particle in a foreign cell."); + + /* Don't remove a particle twice */ + if (gp->time_bin == time_bin_inhibited) return; + /* Quick cross-check */ if (c->nodeID != e->nodeID) error("Can't remove a particle in a foreign cell."); @@ -5385,6 +5387,9 @@ void cell_remove_spart(const struct engine *e, struct cell *c, if (c->nodeID != e->nodeID) error("Can't remove a particle in a foreign cell."); + /* Don't remove a particle twice */ + if (sp->time_bin == time_bin_inhibited) return; + /* Mark the particle as inhibited and stand-alone */ sp->time_bin = time_bin_inhibited; if (sp->gpart) { @@ -5393,12 +5398,15 @@ void cell_remove_spart(const struct engine *e, struct cell *c, sp->gpart->type = swift_type_dark_matter; } - /* Un-link the spart */ - sp->gpart = NULL; - /* Update the space-wide counters */ const size_t one = 1; atomic_add(&e->s->nr_inhibited_sparts, one); + if (sp->gpart) { + atomic_add(&e->s->nr_inhibited_gparts, one); + } + + /* Un-link the spart */ + sp->gpart = NULL; } /** @@ -5418,6 +5426,9 @@ void cell_remove_bpart(const struct engine *e, struct cell *c, if (c->nodeID != e->nodeID) error("Can't remove a particle in a foreign cell."); + /* Don't remove a particle twice */ + if (bp->time_bin == time_bin_inhibited) return; + /* Mark the particle as inhibited and stand-alone */ bp->time_bin = time_bin_inhibited; if (bp->gpart) { @@ -5426,12 +5437,15 @@ void cell_remove_bpart(const struct engine *e, struct cell *c, bp->gpart->type = swift_type_dark_matter; } - /* Un-link the bpart */ - bp->gpart = NULL; - /* Update the space-wide counters */ const size_t one = 1; atomic_add(&e->s->nr_inhibited_bparts, one); + if (bp->gpart) { + atomic_add(&e->s->nr_inhibited_gparts, one); + } + + /* Un-link the bpart */ + bp->gpart = NULL; } /** diff --git a/src/chemistry/EAGLE/chemistry_io.h b/src/chemistry/EAGLE/chemistry_io.h index 023791133d6fee78497f27ad741bcfcd1eeec5ce..5e671e4c74dafc515812bdbf7516b03c6bcff08e 100644 --- a/src/chemistry/EAGLE/chemistry_io.h +++ b/src/chemistry/EAGLE/chemistry_io.h @@ -58,50 +58,72 @@ INLINE static int chemistry_write_particles(const struct part* parts, struct io_props* list) { /* List what we want to write */ - list[0] = io_make_output_field("ElementAbundance", FLOAT, - chemistry_element_count, UNIT_CONV_NO_UNITS, - parts, chemistry_data.metal_mass_fraction); + list[0] = io_make_output_field( + "ElementMassFractions", FLOAT, chemistry_element_count, + UNIT_CONV_NO_UNITS, 0.f, parts, chemistry_data.metal_mass_fraction, + "Fractions of the particles' masses that are in the given element"); list[1] = io_make_output_field( - "SmoothedElementAbundance", FLOAT, chemistry_element_count, - UNIT_CONV_NO_UNITS, parts, chemistry_data.smoothed_metal_mass_fraction); + "SmoothedElementMassFractions", FLOAT, chemistry_element_count, + UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.smoothed_metal_mass_fraction, + "Smoothed fractions of the particles' masses that are " + "in the given element"); - list[2] = - io_make_output_field("Metallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, parts, - chemistry_data.metal_mass_fraction_total); + list[2] = io_make_output_field( + "MetalMassFractions", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.metal_mass_fraction_total, + "Fractions of the particles' masses that are in metals"); list[3] = io_make_output_field( - "SmoothedMetallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, parts, - chemistry_data.smoothed_metal_mass_fraction_total); - - list[4] = io_make_output_field("TotalMassFromSNIa", FLOAT, 1, UNIT_CONV_MASS, - parts, chemistry_data.mass_from_SNIa); - - list[5] = io_make_output_field("MetalMassFracFromSNIa", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - chemistry_data.metal_mass_fraction_from_SNIa); - - list[6] = io_make_output_field("TotalMassFromAGB", FLOAT, 1, UNIT_CONV_MASS, - parts, chemistry_data.mass_from_AGB); - - list[7] = - io_make_output_field("MetalMassFracFromAGB", FLOAT, 1, UNIT_CONV_NO_UNITS, - parts, chemistry_data.metal_mass_fraction_from_AGB); - - list[8] = io_make_output_field("TotalMassFromSNII", FLOAT, 1, UNIT_CONV_MASS, - parts, chemistry_data.mass_from_SNII); - - list[9] = io_make_output_field("MetalMassFracFromSNII", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - chemistry_data.metal_mass_fraction_from_SNII); - - list[10] = - io_make_output_field("IronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, - parts, chemistry_data.iron_mass_fraction_from_SNIa); + "SmoothedMetalMassFractions", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.smoothed_metal_mass_fraction_total, + "Smoothed fractions of the particles masses that are in metals"); + + list[4] = io_make_output_field( + "MassesFromSNIa", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + chemistry_data.mass_from_SNIa, + "Masses of gas that have been produced by SNIa stars"); + + list[5] = io_make_output_field("MetalMassFractionsFromSNIa", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.metal_mass_fraction_from_SNIa, + "Fractions of the particles' masses that are " + "in metals produced by SNIa stars"); + + list[6] = io_make_output_field( + "MassesFromAGB", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + chemistry_data.mass_from_AGB, + "Masses of gas that have been produced by AGN stars"); + + list[7] = io_make_output_field("MetalMassFractionsFromAGB", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0., parts, + chemistry_data.metal_mass_fraction_from_AGB, + "Fractions of the particles' masses that are " + "in metals produced by AGB stars"); + + list[8] = io_make_output_field( + "MassesFromSNII", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + chemistry_data.mass_from_SNII, + "Masses of gas that have been produced by SNII stars"); + + list[9] = io_make_output_field("MetalMassFractionsFromSNII", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.metal_mass_fraction_from_SNII, + "Fractions of the particles' masses that are " + "in metals produced by SNII stars"); + + list[10] = io_make_output_field("IronMassFractionsFromSNIa", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.iron_mass_fraction_from_SNIa, + "Fractions of the particles' masses that are " + "in iron produced by SNIa stars"); list[11] = io_make_output_field( - "SmoothedIronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, parts, - chemistry_data.smoothed_iron_mass_fraction_from_SNIa); + "SmoothedIronMassFractionsFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, chemistry_data.smoothed_iron_mass_fraction_from_SNIa, + "Smoothed fractions of the particles' masses that are " + "in iron produced by SNIa stars"); return 12; } @@ -118,50 +140,72 @@ INLINE static int chemistry_write_sparticles(const struct spart* sparts, struct io_props* list) { /* List what we want to write */ - list[0] = io_make_output_field("ElementAbundance", FLOAT, - chemistry_element_count, UNIT_CONV_NO_UNITS, - sparts, chemistry_data.metal_mass_fraction); + list[0] = io_make_output_field( + "ElementMassFractions", FLOAT, chemistry_element_count, + UNIT_CONV_NO_UNITS, 0.f, sparts, chemistry_data.metal_mass_fraction, + "Fractions of the particles' masses that are in the given element"); list[1] = io_make_output_field( - "SmoothedElementAbundance", FLOAT, chemistry_element_count, - UNIT_CONV_NO_UNITS, sparts, chemistry_data.smoothed_metal_mass_fraction); + "SmoothedElementMassFractions", FLOAT, chemistry_element_count, + UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.smoothed_metal_mass_fraction, + "Smoothed fractions of the particles' masses that are " + "in the given element"); - list[2] = - io_make_output_field("Metallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts, - chemistry_data.metal_mass_fraction_total); + list[2] = io_make_output_field( + "MetalMassFractions", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.metal_mass_fraction_total, + "Fractions of the particles' masses that are in metals"); list[3] = io_make_output_field( - "SmoothedMetallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts, - chemistry_data.smoothed_metal_mass_fraction_total); - - list[4] = io_make_output_field("TotalMassFromSNIa", FLOAT, 1, UNIT_CONV_MASS, - sparts, chemistry_data.mass_from_SNIa); - - list[5] = io_make_output_field("MetalMassFracFromSNIa", FLOAT, 1, - UNIT_CONV_NO_UNITS, sparts, - chemistry_data.metal_mass_fraction_from_SNIa); - - list[6] = io_make_output_field("TotalMassFromAGB", FLOAT, 1, UNIT_CONV_MASS, - sparts, chemistry_data.mass_from_AGB); - - list[7] = - io_make_output_field("MetalMassFracFromAGB", FLOAT, 1, UNIT_CONV_NO_UNITS, - sparts, chemistry_data.metal_mass_fraction_from_AGB); - - list[8] = io_make_output_field("TotalMassFromSNII", FLOAT, 1, UNIT_CONV_MASS, - sparts, chemistry_data.mass_from_SNII); - - list[9] = io_make_output_field("MetalMassFracFromSNII", FLOAT, 1, - UNIT_CONV_NO_UNITS, sparts, - chemistry_data.metal_mass_fraction_from_SNII); - - list[10] = - io_make_output_field("IronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, - sparts, chemistry_data.iron_mass_fraction_from_SNIa); + "SmoothedMetalMassFractions", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.smoothed_metal_mass_fraction_total, + "Smoothed fractions of the particles masses that are in metals"); + + list[4] = io_make_output_field( + "MassesFromSNIa", FLOAT, 1, UNIT_CONV_MASS, 0.f, sparts, + chemistry_data.mass_from_SNIa, + "Masses of gas that have been produced by SNIa stars"); + + list[5] = io_make_output_field("MetalMassFractionsFromSNIa", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.metal_mass_fraction_from_SNIa, + "Fractions of the particles' masses that are " + "in metals produced by SNIa stars"); + + list[6] = io_make_output_field( + "MassesFromAGB", FLOAT, 1, UNIT_CONV_MASS, 0.f, sparts, + chemistry_data.mass_from_AGB, + "Masses of gas that have been produced by AGN stars"); + + list[7] = io_make_output_field("MetalMassFractionsFromAGB", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0., sparts, + chemistry_data.metal_mass_fraction_from_AGB, + "Fractions of the particles' masses that are " + "in metals produced by AGB stars"); + + list[8] = io_make_output_field( + "MassesFromSNII", FLOAT, 1, UNIT_CONV_MASS, 0.f, sparts, + chemistry_data.mass_from_SNII, + "Masses of gas that have been produced by SNII stars"); + + list[9] = io_make_output_field("MetalMassFractionsFromSNII", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.metal_mass_fraction_from_SNII, + "Fractions of the particles' masses that are " + "in metals produced by SNII stars"); + + list[10] = io_make_output_field("IronMassFractionsFromSNIa", FLOAT, 1, + UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.iron_mass_fraction_from_SNIa, + "Fractions of the particles' masses that are " + "in iron produced by SNIa stars"); list[11] = io_make_output_field( - "SmoothedIronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts, - chemistry_data.smoothed_iron_mass_fraction_from_SNIa); + "SmoothedIronMassFractionsFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, + sparts, chemistry_data.smoothed_iron_mass_fraction_from_SNIa, + "Smoothed fractions of the particles' masses that are " + "in iron produced by SNIa stars"); return 12; } @@ -178,34 +222,54 @@ INLINE static int chemistry_write_bparticles(const struct bpart* bparts, struct io_props* list) { /* List what we want to write */ - list[0] = - io_make_output_field("ElementMass", FLOAT, chemistry_element_count, - UNIT_CONV_MASS, bparts, chemistry_data.metal_mass); - - list[1] = io_make_output_field("MetalMass", FLOAT, chemistry_element_count, - UNIT_CONV_MASS, bparts, - chemistry_data.metal_mass_total); - - list[2] = io_make_output_field("MassFromSNIa", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.mass_from_SNIa); + list[0] = io_make_output_field( + "ElementMasses", FLOAT, chemistry_element_count, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.metal_mass, + "Mass contents of the BH particles in a given element"); - list[3] = io_make_output_field("MassFromSNII", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.mass_from_SNII); - - list[4] = io_make_output_field("MassFromAGB", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.mass_from_AGB); - - list[5] = io_make_output_field("MetalMassFromSNIa", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.metal_mass_from_SNIa); + list[1] = io_make_output_field( + "MetalMasses", FLOAT, chemistry_element_count, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.metal_mass_total, + "Mass contents of the BH particles in a metals"); - list[6] = io_make_output_field("MetalMassFromSNII", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.metal_mass_from_SNII); + list[2] = io_make_output_field( + "MassesFromSNIa", FLOAT, 1, UNIT_CONV_MASS, 0.f, bparts, + chemistry_data.mass_from_SNIa, + "Masses of the BH particles that have been produced by SNIa stars"); - list[7] = io_make_output_field("MetalMassFromAGB", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.metal_mass_from_AGB); + list[3] = io_make_output_field( + "MassesFromSNII", FLOAT, 1, UNIT_CONV_MASS, 0.f, bparts, + chemistry_data.mass_from_SNII, + "Masses of the BH particles that have been produced by SNII stars"); + + list[4] = io_make_output_field( + "MassesFromAGB", FLOAT, 1, UNIT_CONV_MASS, 0.f, bparts, + chemistry_data.mass_from_AGB, + "Masses of the BH particles that have been produced by AGB stars"); + + list[5] = + io_make_output_field("MetalMassesFromSNIa", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.metal_mass_from_SNIa, + "Masses of the BH particles in metals that have " + "been produced by SNIa stars"); + + list[6] = + io_make_output_field("MetalMassesFromSNII", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.metal_mass_from_SNII, + "Masses of the BH particles in metals that have " + "been produced by SNII stars"); - list[8] = io_make_output_field("IronMassFromSNIa", FLOAT, 1, UNIT_CONV_MASS, - bparts, chemistry_data.iron_mass_from_SNIa); + list[7] = + io_make_output_field("MetalMassesFromAGB", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.metal_mass_from_AGB, + "Masses of the BH particles in metals that have " + "been produced by AGB stars"); + + list[8] = + io_make_output_field("IronMassesFromSNIa", FLOAT, 1, UNIT_CONV_MASS, 0.f, + bparts, chemistry_data.iron_mass_from_SNIa, + "Masses of the BH particles in iron that have been " + "produced by SNIa stars"); return 9; } diff --git a/src/chemistry/GEAR/chemistry_io.h b/src/chemistry/GEAR/chemistry_io.h index 7d21be4d138f40b626c29a9711166496f34b75d3..008268657fc89ae20208585cb983f4d436122201 100644 --- a/src/chemistry/GEAR/chemistry_io.h +++ b/src/chemistry/GEAR/chemistry_io.h @@ -73,16 +73,19 @@ INLINE static int chemistry_write_particles(const struct part* parts, struct io_props* list) { /* List what we want to write */ - list[0] = io_make_output_field( - "SmoothedElementAbundance", FLOAT, chemistry_element_count, - UNIT_CONV_NO_UNITS, parts, chemistry_data.smoothed_metal_mass_fraction); + list[0] = + io_make_output_field("SmoothedElementAbundances", FLOAT, + chemistry_element_count, UNIT_CONV_NO_UNITS, 0.f, + parts, chemistry_data.smoothed_metal_mass_fraction, + "Element abundances smoothed over the neighbors"); - list[1] = io_make_output_field("Z", FLOAT, 1, UNIT_CONV_NO_UNITS, parts, - chemistry_data.Z); + list[1] = io_make_output_field("Z", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, + chemistry_data.Z, "Temporary field"); - list[2] = io_make_output_field("ElementAbundance", FLOAT, + list[2] = io_make_output_field("ElementAbundances", FLOAT, chemistry_element_count, UNIT_CONV_NO_UNITS, - parts, chemistry_data.metal_mass_fraction); + 0.f, parts, chemistry_data.metal_mass_fraction, + "Mass fraction of each element"); return 3; } @@ -99,16 +102,19 @@ INLINE static int chemistry_write_sparticles(const struct spart* sparts, struct io_props* list) { /* List what we want to write */ - list[0] = io_make_output_field( - "SmoothedElementAbundance", FLOAT, chemistry_element_count, - UNIT_CONV_NO_UNITS, sparts, chemistry_data.smoothed_metal_mass_fraction); - - list[1] = io_make_output_field("Z", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts, - chemistry_data.Z); - - list[2] = io_make_output_field("ElementAbundance", FLOAT, - chemistry_element_count, UNIT_CONV_NO_UNITS, - sparts, chemistry_data.metal_mass_fraction); + list[0] = + io_make_output_field("SmoothedElementAbundances", FLOAT, + chemistry_element_count, UNIT_CONV_NO_UNITS, 0.f, + sparts, chemistry_data.smoothed_metal_mass_fraction, + "Element abundances smoothed over the neighbors"); + + list[1] = io_make_output_field("Z", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + chemistry_data.Z, "Temporary field"); + + list[2] = io_make_output_field( + "ElementAbundance", FLOAT, chemistry_element_count, UNIT_CONV_NO_UNITS, + 0.f, sparts, chemistry_data.metal_mass_fraction, + "Mass fraction of each element"); return 3; } diff --git a/src/collectgroup.c b/src/collectgroup.c index d17608d67a291ae412728b3fcd9ea80c192802bf..8dd9e974f41c979f4c50e84fb555f9b80db19e25 100644 --- a/src/collectgroup.c +++ b/src/collectgroup.c @@ -122,7 +122,8 @@ void collectgroup1_apply(const struct collectgroup1 *grp1, struct engine *e) { e->total_nr_cells = grp1->total_nr_cells; e->total_nr_tasks = grp1->total_nr_tasks; e->tasks_per_cell_max = grp1->tasks_per_cell_max; - e->sfh = grp1->sfh; + + star_formation_logger_add_to_accumulator(&e->sfh, &grp1->sfh); } /** diff --git a/src/common_io.c b/src/common_io.c index 124a122de7cba6b1b6fde36e0ecfea663f2acb31..945badd14cb814e5c06c5e0edf2b983669259850 100644 --- a/src/common_io.c +++ b/src/common_io.c @@ -24,10 +24,15 @@ /* This object's header. */ #include "common_io.h" +/* First common header */ +#include "engine.h" + /* Local includes. */ +#include "black_holes_io.h" #include "chemistry_io.h" -#include "engine.h" +#include "cooling_io.h" #include "error.h" +#include "fof_io.h" #include "gravity_io.h" #include "hydro.h" #include "hydro_io.h" @@ -35,9 +40,12 @@ #include "kernel_hydro.h" #include "part.h" #include "part_type.h" +#include "star_formation_io.h" #include "stars_io.h" #include "threadpool.h" +#include "tracers_io.h" #include "units.h" +#include "velociraptor_io.h" #include "version.h" /* Some standard headers. */ @@ -392,8 +400,8 @@ static long long cell_count_non_inhibited_gas(const struct cell* c) { struct part* parts = c->hydro.parts; long long count = 0; for (int i = 0; i < total_count; ++i) { - if (!(parts[i].time_bin != time_bin_inhibited) && - !(parts[i].time_bin != time_bin_not_created)) { + if ((parts[i].time_bin != time_bin_inhibited) && + (parts[i].time_bin != time_bin_not_created)) { ++count; } } @@ -405,8 +413,8 @@ static long long cell_count_non_inhibited_dark_matter(const struct cell* c) { struct gpart* gparts = c->grav.parts; long long count = 0; for (int i = 0; i < total_count; ++i) { - if (!(gparts[i].time_bin != time_bin_inhibited) && - !(gparts[i].time_bin != time_bin_not_created) && + if ((gparts[i].time_bin != time_bin_inhibited) && + (gparts[i].time_bin != time_bin_not_created) && (gparts[i].type == swift_type_dark_matter)) { ++count; } @@ -419,8 +427,8 @@ static long long cell_count_non_inhibited_stars(const struct cell* c) { struct spart* sparts = c->stars.parts; long long count = 0; for (int i = 0; i < total_count; ++i) { - if (!(sparts[i].time_bin != time_bin_inhibited) && - !(sparts[i].time_bin != time_bin_not_created)) { + if ((sparts[i].time_bin != time_bin_inhibited) && + (sparts[i].time_bin != time_bin_not_created)) { ++count; } } @@ -432,8 +440,8 @@ static long long cell_count_non_inhibited_black_holes(const struct cell* c) { struct bpart* bparts = c->black_holes.parts; long long count = 0; for (int i = 0; i < total_count; ++i) { - if (!(bparts[i].time_bin != time_bin_inhibited) && - !(bparts[i].time_bin != time_bin_not_created)) { + if ((bparts[i].time_bin != time_bin_inhibited) && + (bparts[i].time_bin != time_bin_not_created)) { ++count; } } @@ -1889,12 +1897,6 @@ void io_collect_gparts_to_write( void io_check_output_fields(const struct swift_params* params, const long long N_total[3]) { - /* Create some fake particles as arguments for the writing routines */ - struct part p; - struct xpart xp; - struct spart sp; - struct gpart gp; - /* Copy N_total to array with length == 6 */ const long long nr_total[swift_type_count] = {N_total[0], N_total[1], 0, 0, N_total[2], 0}; @@ -1912,16 +1914,39 @@ void io_check_output_fields(const struct swift_params* params, switch (ptype) { case swift_type_gas: - hydro_write_particles(&p, &xp, list, &num_fields); - num_fields += chemistry_write_particles(&p, list + num_fields); + hydro_write_particles(NULL, NULL, list, &num_fields); + num_fields += chemistry_write_particles(NULL, list + num_fields); + num_fields += + cooling_write_particles(NULL, NULL, list + num_fields, NULL); + num_fields += tracers_write_particles(NULL, NULL, list + num_fields, + /*with_cosmology=*/1); + num_fields += + star_formation_write_particles(NULL, NULL, list + num_fields); + num_fields += fof_write_parts(NULL, NULL, list + num_fields); + num_fields += velociraptor_write_parts(NULL, NULL, list + num_fields); break; case swift_type_dark_matter: - darkmatter_write_particles(&gp, list, &num_fields); + darkmatter_write_particles(NULL, list, &num_fields); + num_fields += fof_write_gparts(NULL, list + num_fields); + num_fields += velociraptor_write_gparts(NULL, list + num_fields); break; case swift_type_stars: - stars_write_particles(&sp, list, &num_fields); + stars_write_particles(NULL, list, &num_fields, /*with_cosmology=*/1); + num_fields += chemistry_write_sparticles(NULL, list + num_fields); + num_fields += tracers_write_sparticles(NULL, list + num_fields, + /*with_cosmology=*/1); + num_fields += fof_write_sparts(NULL, list + num_fields); + num_fields += velociraptor_write_sparts(NULL, list + num_fields); + break; + + case swift_type_black_hole: + black_holes_write_particles(NULL, list, &num_fields, + /*with_cosmology=*/1); + num_fields += chemistry_write_bparticles(NULL, list + num_fields); + num_fields += fof_write_bparts(NULL, list + num_fields); + num_fields += velociraptor_write_bparts(NULL, list + num_fields); break; default: @@ -1980,13 +2005,17 @@ void io_check_output_fields(const struct swift_params* params, /** * @brief Write the output field parameters file * - * @param filename The file to write + * @param filename The file to write. */ void io_write_output_field_parameter(const char* filename) { FILE* file = fopen(filename, "w"); if (file == NULL) error("Error opening file '%s'", filename); + /* Create a fake unit system for the snapshots */ + struct unit_system snapshot_units; + units_init_cgs(&snapshot_units); + /* Loop over all particle types */ fprintf(file, "SelectOutput:\n"); for (int ptype = 0; ptype < swift_type_count; ptype++) { @@ -2000,14 +2029,37 @@ void io_write_output_field_parameter(const char* filename) { case swift_type_gas: hydro_write_particles(NULL, NULL, list, &num_fields); num_fields += chemistry_write_particles(NULL, list + num_fields); + num_fields += + cooling_write_particles(NULL, NULL, list + num_fields, NULL); + num_fields += tracers_write_particles(NULL, NULL, list + num_fields, + /*with_cosmology=*/1); + num_fields += + star_formation_write_particles(NULL, NULL, list + num_fields); + num_fields += fof_write_parts(NULL, NULL, list + num_fields); + num_fields += velociraptor_write_parts(NULL, NULL, list + num_fields); break; case swift_type_dark_matter: darkmatter_write_particles(NULL, list, &num_fields); + num_fields += fof_write_gparts(NULL, list + num_fields); + num_fields += velociraptor_write_gparts(NULL, list + num_fields); break; case swift_type_stars: - stars_write_particles(NULL, list, &num_fields); + stars_write_particles(NULL, list, &num_fields, /*with_cosmology=*/1); + num_fields += chemistry_write_sparticles(NULL, list + num_fields); + num_fields += tracers_write_sparticles(NULL, list + num_fields, + /*with_cosmology=*/1); + num_fields += fof_write_sparts(NULL, list + num_fields); + num_fields += velociraptor_write_sparts(NULL, list + num_fields); + break; + + case swift_type_black_hole: + black_holes_write_particles(NULL, list, &num_fields, + /*with_cosmology=*/1); + num_fields += chemistry_write_bparticles(NULL, list + num_fields); + num_fields += fof_write_bparts(NULL, list + num_fields); + num_fields += velociraptor_write_bparts(NULL, list + num_fields); break; default: @@ -2020,8 +2072,17 @@ void io_write_output_field_parameter(const char* filename) { fprintf(file, " # Particle Type %s\n", part_type_names[ptype]); /* Write all the fields of this particle type */ - for (int i = 0; i < num_fields; ++i) - fprintf(file, " %s_%s: 1\n", list[i].name, part_type_names[ptype]); + for (int i = 0; i < num_fields; ++i) { + + char buffer[FIELD_BUFFER_SIZE] = {0}; + units_cgs_conversion_string(buffer, &snapshot_units, list[i].units, + list[i].scale_factor_exponent); + + fprintf(file, + " %s_%s: %*d \t # %s. ::: Conversion to physical CGS: %s\n", + list[i].name, part_type_names[ptype], + (int)(28 - strlen(list[i].name)), 1, list[i].description, buffer); + } fprintf(file, "\n"); } diff --git a/src/common_io.h b/src/common_io.h index c93414d1ffca8f7ead7a1ff29d387967f82a9cb2..03437d1a38a09e59e6abd4b9ca762f181ffc3731 100644 --- a/src/common_io.h +++ b/src/common_io.h @@ -27,7 +27,8 @@ #include "part_type.h" #include "units.h" -#define FIELD_BUFFER_SIZE 200 +#define FIELD_BUFFER_SIZE 64 +#define DESCRIPTION_BUFFER_SIZE 256 #define PARTICLE_GROUP_BUFFER_SIZE 50 #define FILENAME_BUFFER_SIZE 150 #define IO_BUFFER_ALIGNMENT 1024 diff --git a/src/cooling/Compton/cooling_io.h b/src/cooling/Compton/cooling_io.h index 8fa3944ff78e7592da3978ee9465451c96e1d533..1f137e18ca3c62c1083a8139cbb636b0b934d81d 100644 --- a/src/cooling/Compton/cooling_io.h +++ b/src/cooling/Compton/cooling_io.h @@ -64,9 +64,9 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const struct cooling_function_data* cooling) { - list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, parts, - xparts, convert_part_T); + list[0] = io_make_output_field_convert_part( + "Temperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, parts, xparts, + convert_part_T, "Temperatures of the gas particles"); return 1; } diff --git a/src/cooling/EAGLE/cooling_io.h b/src/cooling/EAGLE/cooling_io.h index 5508153afc094d84383893f55ac0362a6d427b24..a57e4451d8e350e1f16a64318b510495ed77e811 100644 --- a/src/cooling/EAGLE/cooling_io.h +++ b/src/cooling/EAGLE/cooling_io.h @@ -63,9 +63,9 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const struct cooling_function_data* cooling) { - list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, parts, - xparts, convert_part_T); + list[0] = io_make_output_field_convert_part( + "Temperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, parts, xparts, + convert_part_T, "Temperatures of the gas particles"); return 1; } diff --git a/src/cooling/const_du/cooling_io.h b/src/cooling/const_du/cooling_io.h index a60aa5d282d0a244f206f74827f0c1979d3bcb75..8c82d0e3f7b134fc1fa1ee12f81474e1223912cb 100644 --- a/src/cooling/const_du/cooling_io.h +++ b/src/cooling/const_du/cooling_io.h @@ -73,9 +73,9 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const struct cooling_function_data* cooling) { - list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, parts, - xparts, convert_part_T); + list[0] = io_make_output_field_convert_part( + "Temperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, parts, xparts, + convert_part_T, "Temperatures of the gas particles"); return 1; } diff --git a/src/cooling/const_lambda/cooling_io.h b/src/cooling/const_lambda/cooling_io.h index 2e2ba799ab51a73c610701499ef61f1b398e42c5..65f47dbbec97c51a8d59de58a4dd458cf1c366bf 100644 --- a/src/cooling/const_lambda/cooling_io.h +++ b/src/cooling/const_lambda/cooling_io.h @@ -72,12 +72,14 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const struct cooling_function_data* cooling) { - list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, parts, - xparts, convert_part_T); + list[0] = io_make_output_field_convert_part( + "Temperature", FLOAT, 0.f, 1, UNIT_CONV_TEMPERATURE, parts, xparts, + convert_part_T, "Temperatures of the gas particles"); - list[1] = io_make_output_field("RadiatedEnergy", FLOAT, 1, UNIT_CONV_ENERGY, - xparts, cooling_data.radiated_energy); + list[1] = io_make_output_field( + "RadiatedEnergies", FLOAT, 1, UNIT_CONV_ENERGY, 0.f, xparts, + cooling_data.radiated_energy, + "Thermal energies radiated by the cooling mechanism"); return 2; } diff --git a/src/cooling/grackle/cooling.h b/src/cooling/grackle/cooling.h index 1abbe5d3827726bde34502cd2b6a1d18cd309950..57f4b146c451557365e08d260ddd45276844af9c 100644 --- a/src/cooling/grackle/cooling.h +++ b/src/cooling/grackle/cooling.h @@ -697,6 +697,17 @@ __attribute__((always_inline)) INLINE static void cooling_cool_part( xp->cooling_data.radiated_energy -= hydro_get_mass(p) * cooling_du_dt * dt; } +/** + * @brief Compute the temperature of a #part based on the cooling function. + * + * @param phys_const #phys_const data structure. + * @param hydro_props The properties of the hydro scheme. + * @param us The internal system of units. + * @param cosmo #cosmology data structure. + * @param cooling #cooling_function_data struct. + * @param p #part data. + * @param xp Pointer to the #xpart data. + */ static INLINE float cooling_get_temperature( const struct phys_const* restrict phys_const, const struct hydro_props* restrict hydro_props, @@ -704,9 +715,30 @@ static INLINE float cooling_get_temperature( const struct cosmology* restrict cosmo, const struct cooling_function_data* restrict cooling, const struct part* restrict p, const struct xpart* restrict xp) { + // TODO use the grackle library + + /* Physical constants */ + const double m_H = phys_const->const_proton_mass; + const double k_B = phys_const->const_boltzmann_k; - error("This function needs implementing!!"); - return 0.; + /* Gas properties */ + const double T_transition = hydro_props->hydrogen_ionization_temperature; + const double mu_neutral = hydro_props->mu_neutral; + const double mu_ionised = hydro_props->mu_ionised; + + /* Particle temperature */ + const double u = hydro_get_physical_internal_energy(p, xp, cosmo); + + /* Temperature over mean molecular weight */ + const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B; + + /* Are we above or below the HII -> HI transition? */ + if (T_over_mu > (T_transition + 1.) / mu_ionised) + return T_over_mu * mu_ionised; + else if (T_over_mu < (T_transition - 1.) / mu_neutral) + return T_over_mu * mu_neutral; + else + return T_transition; } /** diff --git a/src/cooling/grackle/cooling_io.h b/src/cooling/grackle/cooling_io.h index 3905cafd05fb8e15ddf33f4ea688d6144698df73..18712f4cf6c74253bfed3bd36f1a4484b9e50943 100644 --- a/src/cooling/grackle/cooling_io.h +++ b/src/cooling/grackle/cooling_io.h @@ -63,23 +63,29 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( #if COOLING_GRACKLE_MODE >= 1 /* List what we want to write */ - list[0] = io_make_output_field("HI", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HI_frac); + list[0] = + io_make_output_field("HI", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HI_frac, "HI mass fraction"); - list[1] = io_make_output_field("HII", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HII_frac); + list[1] = + io_make_output_field("HII", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HII_frac, "HII mass fraction"); - list[2] = io_make_output_field("HeI", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HeI_frac); + list[2] = + io_make_output_field("HeI", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HeI_frac, "HeI mass fraction"); - list[3] = io_make_output_field("HeII", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HeII_frac); + list[3] = + io_make_output_field("HeII", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HeII_frac, "HeII mass fraction"); - list[4] = io_make_output_field("HeIII", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HeIII_frac); + list[4] = + io_make_output_field("HeIII", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HeIII_frac, "HeIII mass fraction"); - list[5] = io_make_output_field("e", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.e_frac); + list[5] = + io_make_output_field("e", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.e_frac, "free electron mass fraction"); num += 6; #endif @@ -87,14 +93,17 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( #if COOLING_GRACKLE_MODE >= 2 list += num; - list[0] = io_make_output_field("HM", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HM_frac); + list[0] = + io_make_output_field("HM", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HM_frac, "H- mass fraction"); - list[1] = io_make_output_field("H2I", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.H2I_frac); + list[1] = + io_make_output_field("H2I", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.H2I_frac, "H2I mass fraction"); - list[2] = io_make_output_field("H2II", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.H2II_frac); + list[2] = + io_make_output_field("H2II", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.H2II_frac, "H2II mass fraction"); num += 3; #endif @@ -102,14 +111,17 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( #if COOLING_GRACKLE_MODE >= 3 list += num; - list[0] = io_make_output_field("DI", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.DI_frac); + list[0] = + io_make_output_field("DI", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.DI_frac, "DI mass fraction"); - list[1] = io_make_output_field("DII", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.DII_frac); + list[1] = + io_make_output_field("DII", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.DII_frac, "DII mass fraction"); - list[2] = io_make_output_field("HDI", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts, - cooling_data.HDI_frac); + list[2] = + io_make_output_field("HDI", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + cooling_data.HDI_frac, "HDI mass fraction"); num += 3; #endif diff --git a/src/cooling/none/cooling_io.h b/src/cooling/none/cooling_io.h index 16b4b4ca29f8ebd325decc25420d7db617e1e4ef..0c551f0f6617a57b85672654dd633e4f90afc8dd 100644 --- a/src/cooling/none/cooling_io.h +++ b/src/cooling/none/cooling_io.h @@ -62,9 +62,9 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const struct cooling_function_data* cooling) { - list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, parts, - xparts, convert_part_T); + list[0] = io_make_output_field_convert_part( + "Temperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, parts, xparts, + convert_part_T, "Temperature of the particles"); return 1; } diff --git a/src/engine.c b/src/engine.c index 8f14cfd0088cff9d47bee38662a5aeff357dd8e0..33f94e9c16475b5281cdb5948e6b31e1026c7d46 100644 --- a/src/engine.c +++ b/src/engine.c @@ -5062,6 +5062,11 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params, parser_get_opt_param_double(params, "FOF:delta_time", -1.); } + /* Initialize the star formation history structure */ + if (e->policy & engine_policy_star_formation) { + star_formation_logger_accumulator_init(&e->sfh); + } + engine_init_output_lists(e, params); } diff --git a/src/engine.h b/src/engine.h index 8296933561b5f7664904d439ec6ca20f1e37a4b8..967a430871648ac4cb91f390f8b85675a314d3a8 100644 --- a/src/engine.h +++ b/src/engine.h @@ -237,7 +237,7 @@ struct engine { long long b_updates_since_rebuild; /* Star formation logger information */ - struct star_formation_history sfh; + struct star_formation_history_accumulator sfh; /* Properties of the previous step */ int step_props; diff --git a/src/equation_of_state/planetary/tillotson.h b/src/equation_of_state/planetary/tillotson.h index 7522ba7b17e0e1bf734b853a89aada0375072ddc..9e2fa9dd9eb50ca8e8d3ba2e62ddb765f29cb2b1 100644 --- a/src/equation_of_state/planetary/tillotson.h +++ b/src/equation_of_state/planetary/tillotson.h @@ -55,9 +55,9 @@ INLINE static void set_Til_iron(struct Til_params *mat, mat->b = 1.5f; mat->A = 1.28e11f; mat->B = 1.05e11f; - mat->u_0 = 9.5e9f; - mat->u_iv = 2.4e9f; - mat->u_cv = 8.67e9f; + mat->u_0 = 9.5e6f; + mat->u_iv = 2.4e6f; + mat->u_cv = 8.67e6f; mat->alpha = 5.0f; mat->beta = 5.0f; mat->eta_min = 0.0f; @@ -72,9 +72,9 @@ INLINE static void set_Til_granite(struct Til_params *mat, mat->b = 1.3f; mat->A = 1.8e10f; mat->B = 1.8e10f; - mat->u_0 = 1.6e10f; - mat->u_iv = 3.5e9f; - mat->u_cv = 1.8e10f; + mat->u_0 = 1.6e7f; + mat->u_iv = 3.5e6f; + mat->u_cv = 1.8e7f; mat->alpha = 5.0f; mat->beta = 5.0f; mat->eta_min = 0.0f; @@ -89,9 +89,9 @@ INLINE static void set_Til_water(struct Til_params *mat, mat->b = 0.15f; mat->A = 2.18e9f; mat->B = 1.325e10f; - mat->u_0 = 7.0e9f; - mat->u_iv = 4.19e8f; - mat->u_cv = 2.69e9f; + mat->u_0 = 7.0e6f; + mat->u_iv = 4.19e5f; + mat->u_cv = 2.69e6f; mat->alpha = 10.0f; mat->beta = 5.0f; mat->eta_min = 0.925f; diff --git a/src/fof_io.h b/src/fof_io.h index e6e91c577ad2676cc9bb2d731b2250c9c05cafd4..24edefed3f0e04d40b9d6a1d444c8942426e4f41 100644 --- a/src/fof_io.h +++ b/src/fof_io.h @@ -60,9 +60,10 @@ INLINE static int fof_write_parts(const struct part* parts, #ifdef WITH_FOF - list[0] = io_make_output_field_convert_part("GroupIDs", LONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, xparts, - convert_part_group_id); + list[0] = io_make_output_field_convert_part( + "FOFGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_part_group_id, + "Friends-Of-Friends ID of the group the particles belong to"); return 1; #else return 0; @@ -82,8 +83,10 @@ INLINE static int fof_write_gparts(const struct gpart* gparts, #ifdef WITH_FOF - list[0] = io_make_output_field("GroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, - gparts, fof_data.group_id); + list[0] = io_make_output_field( + "FOFGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, gparts, + fof_data.group_id, + "Friends-Of-Friends ID of the group the particles belong to"); return 1; #else @@ -104,9 +107,10 @@ INLINE static int fof_write_sparts(const struct spart* sparts, #ifdef WITH_FOF - list[0] = io_make_output_field_convert_spart("GroupIDs", LONGLONG, 1, - UNIT_CONV_NO_UNITS, sparts, - convert_spart_group_id); + list[0] = io_make_output_field_convert_spart( + "FOFGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + convert_spart_group_id, + "Friends-Of-Friends ID of the group the particles belong to"); return 1; #else return 0; @@ -126,9 +130,10 @@ INLINE static int fof_write_bparts(const struct bpart* bparts, #ifdef WITH_FOF - list[0] = io_make_output_field_convert_bpart("GroupIDs", LONGLONG, 1, - UNIT_CONV_NO_UNITS, bparts, - convert_bpart_group_id); + list[0] = io_make_output_field_convert_bpart( + "FOFGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, bparts, + convert_bpart_group_id, + "Friends-Of-Friends ID of the group the particles belong to"); return 1; #else return 0; diff --git a/src/gravity/Default/gravity_io.h b/src/gravity/Default/gravity_io.h index 1ba3899e7ecc346227c10679bb8b704937c625b2..fea193e92cfe738880ef146b199253c7300501b4 100644 --- a/src/gravity/Default/gravity_io.h +++ b/src/gravity/Default/gravity_io.h @@ -108,13 +108,20 @@ INLINE static void darkmatter_write_particles(const struct gpart* gparts, /* List what we want to write */ list[0] = io_make_output_field_convert_gpart( - "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, gparts, convert_gpart_pos); + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, gparts, + convert_gpart_pos, "Co-moving position of the particles"); + list[1] = io_make_output_field_convert_gpart( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, gparts, convert_gpart_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, gparts, mass); - list[3] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, gparts, id_or_neg_offset); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, gparts, convert_gpart_vel, + "Peculiar velocities of the stars. This is a * dx/dt where x is the " + "co-moving position of the particles."); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + gparts, mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, gparts, + id_or_neg_offset, "Unique ID of the particles"); } #endif /* SWIFT_DEFAULT_GRAVITY_IO_H */ diff --git a/src/gravity/Potential/gravity_io.h b/src/gravity/Potential/gravity_io.h index 6aa4cbb4786af99ac372564ed67f4ce77c08f25c..e43bf4fd9ad57b293f4e8567f048dd08d3c89d59 100644 --- a/src/gravity/Potential/gravity_io.h +++ b/src/gravity/Potential/gravity_io.h @@ -115,7 +115,7 @@ INLINE static void darkmatter_write_particles(const struct gpart* gparts, io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, gparts, mass); list[3] = io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, gparts, id_or_neg_offset); - list[4] = io_make_output_field("Potential", FLOAT, 1, UNIT_CONV_POTENTIAL, + list[4] = io_make_output_field("Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, gparts, potential); } diff --git a/src/hydro/AnarchyDU/hydro.h b/src/hydro/AnarchyDU/hydro.h index 53c66e316136b172f7610dd6f066c6e841421ced..ac4ed6c2f8176669aa7364109f95de08ddbf722c 100644 --- a/src/hydro/AnarchyDU/hydro.h +++ b/src/hydro/AnarchyDU/hydro.h @@ -616,6 +616,7 @@ __attribute__((always_inline)) INLINE static void hydro_reset_gradient( struct part *restrict p) { p->viscosity.v_sig = 2.f * p->force.soundspeed; + p->force.alpha_visc_max_ngb = p->viscosity.alpha; } /** @@ -774,11 +775,23 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force( new_diffusion_alpha += alpha_diff_dt * dt_alpha; /* Consistency checks to ensure min < alpha < max */ - new_diffusion_alpha = - min(new_diffusion_alpha, hydro_props->diffusion.alpha_max); new_diffusion_alpha = max(new_diffusion_alpha, hydro_props->diffusion.alpha_min); + /* Now we limit in viscous flows; remove diffusion there. If we + * don't do that, then we end up diffusing energy away in supernovae. + * This is an EAGLE-specific fix. We limit based on the maximal + * viscous alpha over our neighbours in an attempt to keep diffusion + * low near to supernovae sites. */ + + /* This also enforces alpha_diff < alpha_diff_max */ + + const float viscous_diffusion_limit = + hydro_props->diffusion.alpha_max * + (1.f - p->force.alpha_visc_max_ngb / hydro_props->viscosity.alpha_max); + + new_diffusion_alpha = min(new_diffusion_alpha, viscous_diffusion_limit); + p->diffusion.alpha = new_diffusion_alpha; } diff --git a/src/hydro/AnarchyDU/hydro_iact.h b/src/hydro/AnarchyDU/hydro_iact.h index cba945ecae8cbf2971b3d0d810cd565cb8ecc8eb..19489f7460753b15961603e68c22c039f6de27d3 100644 --- a/src/hydro/AnarchyDU/hydro_iact.h +++ b/src/hydro/AnarchyDU/hydro_iact.h @@ -227,6 +227,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_gradient( const float delta_u_factor = (pi->u - pj->u) * r_inv; pi->diffusion.laplace_u += pj->mass * delta_u_factor * wi_dx / pj->rho; pj->diffusion.laplace_u -= pi->mass * delta_u_factor * wj_dx / pi->rho; + + /* Set the maximal alpha from the previous step over the neighbours + * (this is used to limit the diffusion in hydro_prepare_force) */ + const float alpha_i = pi->viscosity.alpha; + const float alpha_j = pj->viscosity.alpha; + pi->force.alpha_visc_max_ngb = max(pi->force.alpha_visc_max_ngb, alpha_j); + pj->force.alpha_visc_max_ngb = max(pj->force.alpha_visc_max_ngb, alpha_i); } /** @@ -289,6 +296,11 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_gradient( const float delta_u_factor = (pi->u - pj->u) * r_inv; pi->diffusion.laplace_u += pj->mass * delta_u_factor * wi_dx / pj->rho; + + /* Set the maximal alpha from the previous step over the neighbours + * (this is used to limit the diffusion in hydro_prepare_force) */ + const float alpha_j = pj->viscosity.alpha; + pi->force.alpha_visc_max_ngb = max(pi->force.alpha_visc_max_ngb, alpha_j); } /** @@ -398,9 +410,9 @@ __attribute__((always_inline)) INLINE static void runner_iact_force( /* Diffusion term */ const float alpha_diff = 0.5f * (pi->diffusion.alpha + pj->diffusion.alpha); - const float v_diff = - alpha_diff * sqrtf(0.5f * fabsf(pressurei - pressurej) / rho_ij) + - fabsf(fac_mu * r_inv * dvdr_Hubble); + const float v_diff = alpha_diff * 0.5f * + (sqrtf(2.f * fabsf(pressurei - pressurej) / rho_ij) + + fabsf(fac_mu * r_inv * dvdr_Hubble)); /* wi_dx + wj_dx / 2 is F_ij */ const float diff_du_term = v_diff * (pi->u - pj->u) * (wi_dr / rhoi + wj_dr / rhoj); @@ -520,9 +532,9 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force( /* Diffusion term */ const float alpha_diff = 0.5f * (pi->diffusion.alpha + pj->diffusion.alpha); - const float v_diff = - alpha_diff * sqrtf(0.5f * fabsf(pressurei - pressurej) / rho_ij) + - fabsf(fac_mu * r_inv * dvdr_Hubble); + const float v_diff = alpha_diff * 0.5f * + (sqrtf(2.f * fabsf(pressurei - pressurej) / rho_ij) + + fabsf(fac_mu * r_inv * dvdr_Hubble)); /* wi_dx + wj_dx / 2 is F_ij */ const float diff_du_term = v_diff * (pi->u - pj->u) * (wi_dr / rhoi + wj_dr / rhoj); diff --git a/src/hydro/AnarchyDU/hydro_io.h b/src/hydro/AnarchyDU/hydro_io.h index db9995c3fe8a5089415dc9152a44a655ec97f0f3..d78d16c35bf4ebb561116e1a504d24b248104206 100644 --- a/src/hydro/AnarchyDU/hydro_io.h +++ b/src/hydro/AnarchyDU/hydro_io.h @@ -161,38 +161,53 @@ INLINE static void hydro_write_particles(const struct part* parts, struct io_props* list, int* num_fields) { - *num_fields = 12; - + *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + list[7] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P); - list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); - list[10] = io_make_output_field_convert_part("Viscosity", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_viscosity); - list[11] = io_make_output_field_convert_part("Diffusion", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_diffusion); + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[8] = io_make_output_field_convert_part( + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_P, "Co-moving pressures of the particles"); + + list[9] = io_make_output_field_convert_part( + "ViscosityParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_viscosity, "Visosity coefficient (alpha_visc) of the particles"); + + list[10] = io_make_output_field_convert_part( + "DiffusionParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_diffusion, "Diffusion coefficient (alpha_diff) of the particles"); } /** diff --git a/src/hydro/AnarchyDU/hydro_part.h b/src/hydro/AnarchyDU/hydro_part.h index c30f778b80d91d576052082c6e849fa9d1a4f38e..4b4cc187a96f5111389cde8c50f535a545e9e7f5 100644 --- a/src/hydro/AnarchyDU/hydro_part.h +++ b/src/hydro/AnarchyDU/hydro_part.h @@ -185,6 +185,9 @@ struct part { /*! Balsara switch */ float balsara; + /*! Maximal alpha (viscosity) over neighbours */ + float alpha_visc_max_ngb; + } force; }; diff --git a/src/hydro/AnarchyPU/hydro_io.h b/src/hydro/AnarchyPU/hydro_io.h index e1525cc99db8074a99c8d28a73918adb7b7b5319..499ef5dc2d79f86f6f76c119b1b421260cf25cea 100644 --- a/src/hydro/AnarchyPU/hydro_io.h +++ b/src/hydro/AnarchyPU/hydro_io.h @@ -165,35 +165,55 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 12; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, - parts, pressure_bar); - list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); - list[10] = io_make_output_field_convert_part("Viscosity", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_viscosity); - list[11] = io_make_output_field_convert_part("Diffusion", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_diffusion); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field( + "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + pressure_bar, "Co-moving smoothed pressures of the particles"); + + list[8] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[9] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); + + list[10] = io_make_output_field_convert_part( + "ViscosityParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_viscosity, "Visosity coefficient (alpha_visc) of the particles"); + + list[11] = io_make_output_field_convert_part( + "DiffusionParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_diffusion, "Diffusion coefficient (alpha_diff) of the particles"); } /** diff --git a/src/hydro/Default/hydro_io.h b/src/hydro/Default/hydro_io.h index 7b668fac1700738fb83199900dbb171ac913084f..5d923837765c3d4759db37cd67badb40d75763f3 100644 --- a/src/hydro/Default/hydro_io.h +++ b/src/hydro/Default/hydro_io.h @@ -164,35 +164,56 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 12; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + list[7] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P); - list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); - list[10] = io_make_output_field_convert_part("Viscosity", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_viscosity); - list[11] = io_make_output_field_convert_part("Diffusion", FLOAT, 1, - UNIT_CONV_NO_UNITS, parts, - xparts, convert_diffusion); + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_P, "Co-moving pressures of the particles"); + + list[8] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[9] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, + "Co-moving gravitational potential at position of the particles"); + + list[10] = io_make_output_field_convert_part( + "ViscosityParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_viscosity, "Visosity coefficient (alpha_visc) of the particles"); + + list[11] = io_make_output_field_convert_part( + "DiffusionParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_diffusion, "Diffusion coefficient (alpha_diff) of the particles"); } /** diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h index 54154ce970a9fccf0f2f6324d86faaebed392555..9715558be39c853ec5a96262d95cdf4fe92309fa 100644 --- a/src/hydro/Gadget2/hydro_io.h +++ b/src/hydro/Gadget2/hydro_io.h @@ -146,30 +146,48 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 10; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + list[4] = io_make_output_field( - "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, parts, entropy); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, - parts, xparts, convert_part_u); + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + entropy, "Co-moving entropies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field_convert_part( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, xparts, convert_part_u, + "Co-moving thermal energies per unit mass of the particles"); + list[8] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_part_P); + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_part_P, "Co-moving pressures of the particles"); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + list[9] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, + "Co-moving gravitational potential at position of the particles"); #ifdef DEBUG_INTERACTIONS_SPH diff --git a/src/hydro/Gadget2/hydro_part.h b/src/hydro/Gadget2/hydro_part.h index 7a8844d560561dae80c676a0b5bb72b34416d080..853d2adf17bc069434562fa96ddb881f760f6830 100644 --- a/src/hydro/Gadget2/hydro_part.h +++ b/src/hydro/Gadget2/hydro_part.h @@ -155,6 +155,9 @@ struct part { /*! Black holes information (e.g. swallowing ID) */ struct black_holes_part_data black_holes_data; + /* Additional data used by the star formation */ + struct star_formation_part_data sf_data; + /* Time-step length */ timebin_t time_bin; diff --git a/src/hydro/GizmoMFM/hydro_io.h b/src/hydro/GizmoMFM/hydro_io.h index 1f956edf3fdc31990c6aba254603ea69a98238eb..b1d51cff90de52285abcd63c519aac911dcba7c2 100644 --- a/src/hydro/GizmoMFM/hydro_io.h +++ b/src/hydro/GizmoMFM/hydro_io.h @@ -188,33 +188,52 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - - list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, - conserved.mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, - parts, xparts, convert_u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = + io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 1.f, parts, + conserved.mass, "Co-moving masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field_convert_part( + "InternalEnergy", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3.f * hydro_gamma_minus_one, parts, xparts, convert_u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + list[7] = io_make_output_field_convert_part( - "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY, parts, xparts, convert_A); - list[8] = - io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, P); + "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY, 0.f, parts, xparts, convert_A, + "Co-moving entropies of the particles"); + + list[8] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, + 3.f * hydro_gamma, parts, P, + "Co-moving pressures of the particles"); + list[9] = io_make_output_field_convert_part( - "TotEnergy", FLOAT, 1, UNIT_CONV_ENERGY, parts, xparts, convert_Etot); + "TotalEnergies", FLOAT, 1, UNIT_CONV_ENERGY, 3.f * hydro_gamma_minus_one, + parts, xparts, convert_Etot, "Total (co-moving) energy of the particles"); - list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + list[10] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); } /** diff --git a/src/hydro/GizmoMFV/hydro_io.h b/src/hydro/GizmoMFV/hydro_io.h index 92e4378f071cb71678929716be86588a3405f40e..7288df0c510ea48489353b5be4ef9d3f252d5f59 100644 --- a/src/hydro/GizmoMFV/hydro_io.h +++ b/src/hydro/GizmoMFV/hydro_io.h @@ -184,37 +184,55 @@ INLINE static void hydro_write_particles(const struct part* parts, const struct xpart* xparts, struct io_props* list, int* num_fields) { - *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - - list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, - conserved.mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, - parts, xparts, convert_u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, - primitives.rho); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = + io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 1.f, parts, + conserved.mass, "Co-moving masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field_convert_part( + "InternalEnergy", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3.f * hydro_gamma_minus_one, parts, xparts, convert_u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, primitives.rho, + "Co-moving mass densities of the particles"); + list[7] = io_make_output_field_convert_part( - "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY, parts, xparts, convert_A); + "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY, 0.f, parts, xparts, convert_A, + "Co-moving entropies of the particles"); + list[8] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, - parts, primitives.P); + 3.f * hydro_gamma, parts, primitives.P, + "Co-moving pressures of the particles"); + list[9] = io_make_output_field_convert_part( - "TotEnergy", FLOAT, 1, UNIT_CONV_ENERGY, parts, xparts, convert_Etot); + "TotalEnergies", FLOAT, 1, UNIT_CONV_ENERGY, 3.f * hydro_gamma_minus_one, + parts, xparts, convert_Etot, "Total (co-moving) energy of the particles"); - list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + list[10] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); } /** diff --git a/src/hydro/Minimal/hydro_io.h b/src/hydro/Minimal/hydro_io.h index c6e36e32d6176c6968e70c7ba689b6651a2d1c18..a32e2c1a87c3219640ed94d56725632539121dd8 100644 --- a/src/hydro/Minimal/hydro_io.h +++ b/src/hydro/Minimal/hydro_io.h @@ -157,33 +157,46 @@ INLINE static void hydro_write_particles(const struct part* parts, struct io_props* list, int* num_fields) { - *num_fields = 10; + *num_fields = 9; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[8] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[8] = io_make_output_field_convert_part( + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_P, "Co-moving pressures of the particles"); } /** diff --git a/src/hydro/Planetary/hydro.h b/src/hydro/Planetary/hydro.h index 7139f811a6e868d9eafdbbf4628b03c156aa459e..ee9aa95d5082e4bf21e8ac1ebf6710530638a974 100644 --- a/src/hydro/Planetary/hydro.h +++ b/src/hydro/Planetary/hydro.h @@ -601,8 +601,8 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force( /* Compute the "grad h" term */ const float rho_inv = 1.f / p->rho; float rho_dh = p->density.rho_dh; - /* Ignore changing-kernel effects when h is h_max */ - if (p->h == hydro_props->h_max) { + /* Ignore changing-kernel effects when h ~= h_max */ + if (p->h > 0.9999f * hydro_props->h_max) { rho_dh = 0.f; } const float grad_h_term = @@ -869,7 +869,9 @@ hydro_set_init_internal_energy(struct part *p, float u_init) { __attribute__((always_inline)) INLINE static void hydro_remove_part( const struct part *p, const struct xpart *xp) { + printf("Removed particle id=%lld \n", p->id); printParticle_single(p, xp); + fflush(stdout); } #endif /* SWIFT_PLANETARY_HYDRO_H */ diff --git a/src/hydro/Planetary/hydro_debug.h b/src/hydro/Planetary/hydro_debug.h index 306f7526404599a051f83dc1b61886ed2aa5b69e..6d0a226f49ab9d1b57f00cba646c7cb38eae180e 100644 --- a/src/hydro/Planetary/hydro_debug.h +++ b/src/hydro/Planetary/hydro_debug.h @@ -42,7 +42,7 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle( "v_full=[%.3g, %.3g, %.3g], a=[%.3g, %.3g, %.3g], \n " "m=%.3g, u=%.3g, du/dt=%.3g, P=%.3g, c_s=%.3g, \n " "v_sig=%.3g, h=%.3g, dh/dt=%.3g, wcount=%.3g, rho=%.3g, \n " - "dh_drho=%.3g, time_bin=%d, wakeup=%d mat_id=%d \n", + "dh_drho=%.3g, time_bin=%d, wakeup=%d, mat_id=%d \n", p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0], xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2], p->mass, p->u, p->u_dt, hydro_get_comoving_pressure(p), diff --git a/src/hydro/Planetary/hydro_io.h b/src/hydro/Planetary/hydro_io.h index 64f229be3087fcb225b2b934b613f2eda0d84eba..6dd84b3e1b00beda160b4b51109b544ac0ad8b86 100644 --- a/src/hydro/Planetary/hydro_io.h +++ b/src/hydro/Planetary/hydro_io.h @@ -71,7 +71,7 @@ INLINE static void hydro_read_particles(struct part* parts, UNIT_CONV_ACCELERATION, parts, a_hydro); list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL, UNIT_CONV_DENSITY, parts, rho); - list[8] = io_make_input_field("MaterialID", INT, 1, COMPULSORY, + list[8] = io_make_input_field("MaterialIDs", INT, 1, COMPULSORY, UNIT_CONV_NO_UNITS, parts, mat_id); } @@ -163,31 +163,38 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Positions of the particles"); list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[8] = io_make_output_field("MaterialID", INT, 1, UNIT_CONV_NO_UNITS, - parts, mat_id); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, "Velocities of the particles"); + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Smoothing lengths (FWHM of the kernel) of the particles"); + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Thermal energies per unit mass of the particles"); + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, "Densities of the particles"); + list[7] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Entropies per unit mass of the particles"); + list[8] = + io_make_output_field("MaterialIDs", INT, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, mat_id, "Material IDs of the particles"); list[9] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P); - list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_P, "Pressures of the particles"); + list[10] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, 0.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); } /** diff --git a/src/hydro/PressureEnergy/hydro_io.h b/src/hydro/PressureEnergy/hydro_io.h index 3e645803ebc3cbdbc361e73d776824e2ea7906fa..e093fe628e5dc8f546fe9be8133692e95b30af2c 100644 --- a/src/hydro/PressureEnergy/hydro_io.h +++ b/src/hydro/PressureEnergy/hydro_io.h @@ -156,29 +156,47 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 10; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, - parts, pressure_bar); - list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field( + "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + pressure_bar, "Co-moving smoothed pressures of the particles"); + + list[8] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[9] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); } /** diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h index fe44864b2c360373a3eff676775de6e9fac96266..d89dc36ad018f89114a8e80eb2abb663e211d762 100644 --- a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h +++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h @@ -157,31 +157,51 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, - parts, pressure_bar); - list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1, - UNIT_CONV_ENTROPY_PER_UNIT_MASS, - parts, xparts, convert_S); - list[9] = io_make_output_field("Viscosity", FLOAT, 1, UNIT_CONV_NO_UNITS, - parts, alpha); - list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, u, + "Co-moving thermal energies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field( + "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + pressure_bar, "Co-moving smoothed pressures of the particles"); + + list[8] = io_make_output_field_convert_part( + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + xparts, convert_S, "Co-moving entropies per unit mass of the particles"); + + list[9] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); + + list[10] = io_make_output_field_convert_part( + "ViscosityParameters", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, parts, xparts, + convert_viscosity, "Visosity coefficient (alpha_visc) of the particles"); } /** diff --git a/src/hydro/PressureEntropy/hydro_io.h b/src/hydro/PressureEntropy/hydro_io.h index 5c0bb71d3dcfe77619d18115e27fbafbac3facec..d7591fbdea1576cf6fec645c7a9842f2197b60a3 100644 --- a/src/hydro/PressureEntropy/hydro_io.h +++ b/src/hydro/PressureEntropy/hydro_io.h @@ -157,31 +157,51 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 11; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field_convert_part( - "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, parts, xparts, + convert_part_vel, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, parts, + mass, "Masses of the particles"); + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + list[4] = io_make_output_field( - "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, parts, entropy); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = - io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho); - list[7] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1, - UNIT_CONV_ENERGY_PER_UNIT_MASS, - parts, xparts, convert_u); + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY_PER_UNIT_MASS, 0.f, parts, + entropy, "Co-moving entropies per unit mass of the particles"); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, + parts, rho, + "Co-moving mass densities of the particles"); + + list[7] = io_make_output_field_convert_part( + "InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, + 3. * hydro_gamma_minus_one, parts, xparts, convert_u, + "Co-moving thermal energies per unit mass of the particles"); + list[8] = io_make_output_field_convert_part( - "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P); - list[9] = io_make_output_field("WeightedDensity", FLOAT, 1, UNIT_CONV_DENSITY, - parts, rho_bar); - list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1, - UNIT_CONV_POTENTIAL, parts, - xparts, convert_part_potential); + "Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, 3.f * hydro_gamma, parts, + xparts, convert_P, "Co-moving smoothed pressures of the particles"); + + list[9] = io_make_output_field( + "WeightedDensity", FLOAT, 1, UNIT_CONV_DENSITY, -3.f, parts, rho_bar, + "Co-moving pressure-weighted densities of the particles"); + + list[10] = io_make_output_field_convert_part( + "Potentials", FLOAT, 1, UNIT_CONV_POTENTIAL, -1.f, parts, xparts, + convert_part_potential, "Gravitational potentials of the particles"); } /** diff --git a/src/hydro/Shadowswift/hydro_io.h b/src/hydro/Shadowswift/hydro_io.h index 1f6bb86e62c6a3359d1242328775c6e4067ef8f2..410422331469f980b398139f18ef09d995e0f655 100644 --- a/src/hydro/Shadowswift/hydro_io.h +++ b/src/hydro/Shadowswift/hydro_io.h @@ -138,34 +138,56 @@ INLINE static void hydro_write_particles(const struct part* parts, *num_fields = 13; /* List what we want to write */ - list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3, - UNIT_CONV_LENGTH, parts, xparts, - convert_part_pos); + list[0] = io_make_output_field_convert_part( + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, parts, xparts, + convert_part_pos, "Co-moving positions of the particles"); + list[1] = io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, primitives.v); + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, conserved.mass); - list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - parts, h); - list[4] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1, + + list[3] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, parts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[4] = io_make_output_field_convert_part("InternalEnergies", FLOAT, 1, UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, xparts, convert_u); - list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1, - UNIT_CONV_NO_UNITS, parts, id); - list[6] = io_make_output_field("Acceleration", FLOAT, 3, - UNIT_CONV_ACCELERATION, parts, a_hydro); - list[7] = io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, - primitives.rho); - list[8] = io_make_output_field("Volume", FLOAT, 1, UNIT_CONV_VOLUME, parts, - cell.volume); - list[9] = io_make_output_field("GradDensity", FLOAT, 3, UNIT_CONV_DENSITY, - parts, primitives.gradients.rho); + + list[5] = + io_make_output_field("ParticleIDs", ULONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + parts, id, "Unique IDs of the particles"); + + list[6] = io_make_output_field("Accelerations", FLOAT, 3, + UNIT_CONV_ACCELERATION, parts, 1.f, a_hydro, + "Accelerations of the particles(does not " + "work in non-cosmological runs)."); + + list[7] = io_make_output_field("Densities", FLOAT, 1, UNIT_CONV_DENSITY, + parts, 3.f * hydro_gamma, primitives.rho, + "Co-moving mass densities of the particles"); + + list[8] = io_make_output_field("Volumes", FLOAT, 1, UNIT_CONV_VOLUME, parts, + 3.f * hydro_gamma, cell.volume, + "Co-moving volumes of the particles"); + + list[9] = io_make_output_field("GradDensities", FLOAT, 3, UNIT_CONV_DENSITY, + parts, 1.f, primitives.gradients.rho, + "Gradient densities of the particles"); + list[10] = io_make_output_field_convert_part( - "Entropy", FLOAT, 1, UNIT_CONV_ENTROPY, parts, xparts, convert_A); - list[11] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, - parts, primitives.P); + "Entropies", FLOAT, 1, UNIT_CONV_ENTROPY, 1.f, parts, xparts, convert_A, + "Co-moving entropies of the particles"); + + list[11] = io_make_output_field("Pressures", FLOAT, 1, UNIT_CONV_PRESSURE, + 3.f * hydro_gamma, parts, primitives.P, + "Co-moving pressures of the particles"); + list[12] = io_make_output_field_convert_part( - "TotEnergy", FLOAT, 1, UNIT_CONV_ENERGY, parts, xparts, convert_Etot); + "TotalEnergies", FLOAT, 1, UNIT_CONV_ENERGY, 3.f * hydro_gamma_minus_one, + parts, xparts, convert_Etot, "Total (co-moving) energy of the particles"); } /** diff --git a/src/io_properties.h b/src/io_properties.h index 7ddc8e241f6f114196e25bda2f737253f6fa9338..07d29c3ce4450d77237b34411ceb5a40bcecc136 100644 --- a/src/io_properties.h +++ b/src/io_properties.h @@ -24,6 +24,7 @@ /* Local includes. */ #include "common_io.h" +#include "error.h" #include "inline.h" #include "part.h" @@ -85,6 +86,9 @@ struct io_props { /* Name */ char name[FIELD_BUFFER_SIZE]; + /* Description of the variable to write to the field's meta-data */ + char description[DESCRIPTION_BUFFER_SIZE]; + /* Type of the field */ enum IO_DATA_TYPE type; @@ -97,6 +101,9 @@ struct io_props { /* Units of the quantity */ enum unit_conversion_factor units; + /* Scale-factor exponent to apply for unit conversion to physical */ + float scale_factor_exponent; + /* Pointer to the field of the first particle in the array */ char* field; @@ -205,9 +212,10 @@ INLINE static struct io_props io_make_input_field_( /** * @brief Constructs an #io_props from its parameters */ -#define io_make_output_field(name, type, dim, units, part, field) \ - io_make_output_field_(name, type, dim, units, (char*)(&(part[0]).field), \ - sizeof(part[0])) +#define io_make_output_field(name, type, dim, units, a_exponent, part, field, \ + desc) \ + io_make_output_field_(name, type, dim, units, a_exponent, \ + (char*)(&(part[0]).field), sizeof(part[0]), desc) /** * @brief Construct an #io_props from its parameters @@ -216,23 +224,32 @@ INLINE static struct io_props io_make_input_field_( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param field Pointer to the field of the first particle * @param partSize The size in byte of the particle + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, char* field, size_t partSize) { + enum unit_conversion_factor units, float a_exponent, char* field, + size_t partSize, const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.field = field; r.partSize = partSize; r.conversion = 0; @@ -243,10 +260,11 @@ INLINE static struct io_props io_make_output_field_( /** * @brief Constructs an #io_props (with conversion) from its parameters */ -#define io_make_output_field_convert_part(name, type, dim, units, part, xpart, \ - convert) \ - io_make_output_field_convert_part_##type( \ - name, type, dim, units, sizeof(part[0]), part, xpart, convert) +#define io_make_output_field_convert_part(name, type, dim, units, a_exponent, \ + part, xpart, convert, desc) \ + io_make_output_field_convert_part_##type(name, type, dim, units, a_exponent, \ + sizeof(part[0]), part, xpart, \ + convert, desc) /** * @brief Construct an #io_props from its parameters @@ -255,27 +273,36 @@ INLINE static struct io_props io_make_output_field_( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param partSize The size in byte of the particle * @param parts The particle array * @param xparts The xparticle array * @param functionPtr The function used to convert a particle to an int + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_part_INT( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t partSize, + enum unit_conversion_factor units, float a_exponent, size_t partSize, const struct part* parts, const struct xpart* xparts, - conversion_func_part_int functionPtr) { + conversion_func_part_int functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = partSize; r.parts = parts; r.xparts = xparts; @@ -292,27 +319,36 @@ INLINE static struct io_props io_make_output_field_convert_part_INT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param partSize The size in byte of the particle * @param parts The particle array * @param xparts The xparticle array * @param functionPtr The function used to convert a particle to a float + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_part_FLOAT( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t partSize, + enum unit_conversion_factor units, float a_exponent, size_t partSize, const struct part* parts, const struct xpart* xparts, - conversion_func_part_float functionPtr) { + conversion_func_part_float functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = partSize; r.parts = parts; r.xparts = xparts; @@ -329,27 +365,36 @@ INLINE static struct io_props io_make_output_field_convert_part_FLOAT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param partSize The size in byte of the particle * @param parts The particle array * @param xparts The xparticle array * @param functionPtr The function used to convert a particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_part_DOUBLE( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t partSize, + enum unit_conversion_factor units, float a_exponent, size_t partSize, const struct part* parts, const struct xpart* xparts, - conversion_func_part_double functionPtr) { + conversion_func_part_double functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = partSize; r.parts = parts; r.xparts = xparts; @@ -366,27 +411,36 @@ INLINE static struct io_props io_make_output_field_convert_part_DOUBLE( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param partSize The size in byte of the particle * @param parts The particle array * @param xparts The xparticle array * @param functionPtr The function used to convert a particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_part_LONGLONG( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t partSize, + enum unit_conversion_factor units, float a_exponent, size_t partSize, const struct part* parts, const struct xpart* xparts, - conversion_func_part_long_long functionPtr) { + conversion_func_part_long_long functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = partSize; r.parts = parts; r.xparts = xparts; @@ -399,10 +453,11 @@ INLINE static struct io_props io_make_output_field_convert_part_LONGLONG( /** * @brief Constructs an #io_props (with conversion) from its parameters */ -#define io_make_output_field_convert_gpart(name, type, dim, units, gpart, \ - convert) \ - io_make_output_field_convert_gpart_##type(name, type, dim, units, \ - sizeof(gpart[0]), gpart, convert) +#define io_make_output_field_convert_gpart(name, type, dim, units, a_exponent, \ + gpart, convert, desc) \ + io_make_output_field_convert_gpart_##type(name, type, dim, units, \ + a_exponent, sizeof(gpart[0]), \ + gpart, convert, desc) /** * @brief Construct an #io_props from its parameters @@ -453,17 +508,24 @@ INLINE static struct io_props io_make_output_field_convert_gpart_INT( */ INLINE static struct io_props io_make_output_field_convert_gpart_FLOAT( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t gpartSize, - const struct gpart* gparts, conversion_func_gpart_float functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t gpartSize, + const struct gpart* gparts, conversion_func_gpart_float functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = gpartSize; r.gparts = gparts; r.conversion = 1; @@ -479,25 +541,34 @@ INLINE static struct io_props io_make_output_field_convert_gpart_FLOAT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param gpartSize The size in byte of the particle * @param gparts The particle array * @param functionPtr The function used to convert a g-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_gpart_DOUBLE( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t gpartSize, - const struct gpart* gparts, conversion_func_gpart_double functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t gpartSize, + const struct gpart* gparts, conversion_func_gpart_double functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = gpartSize; r.gparts = gparts; r.conversion = 1; @@ -513,25 +584,34 @@ INLINE static struct io_props io_make_output_field_convert_gpart_DOUBLE( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param gpartSize The size in byte of the particle * @param gparts The particle array * @param functionPtr The function used to convert a g-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_gpart_LONGLONG( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t gpartSize, - const struct gpart* gparts, conversion_func_gpart_long_long functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t gpartSize, + const struct gpart* gparts, conversion_func_gpart_long_long functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = gpartSize; r.gparts = gparts; r.conversion = 1; @@ -543,10 +623,11 @@ INLINE static struct io_props io_make_output_field_convert_gpart_LONGLONG( /** * @brief Constructs an #io_props (with conversion) from its parameters */ -#define io_make_output_field_convert_spart(name, type, dim, units, spart, \ - convert) \ - io_make_output_field_convert_spart_##type(name, type, dim, units, \ - sizeof(spart[0]), spart, convert) +#define io_make_output_field_convert_spart(name, type, dim, units, a_exponent, \ + spart, convert, desc) \ + io_make_output_field_convert_spart_##type(name, type, dim, units, \ + a_exponent, sizeof(spart[0]), \ + spart, convert, desc) /** * @brief Construct an #io_props from its parameters @@ -555,6 +636,7 @@ INLINE static struct io_props io_make_output_field_convert_gpart_LONGLONG( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param spartSize The size in byte of the particle * @param sparts The particle array * @param functionPtr The function used to convert a g-particle to a float @@ -597,17 +679,24 @@ INLINE static struct io_props io_make_output_field_convert_spart_INT( */ INLINE static struct io_props io_make_output_field_convert_spart_FLOAT( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t spartSize, - const struct spart* sparts, conversion_func_spart_float functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t spartSize, + const struct spart* sparts, conversion_func_spart_float functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = spartSize; r.sparts = sparts; r.conversion = 1; @@ -623,25 +712,34 @@ INLINE static struct io_props io_make_output_field_convert_spart_FLOAT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param spartSize The size in byte of the particle * @param sparts The particle array * @param functionPtr The function used to convert a s-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_spart_DOUBLE( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t spartSize, - const struct spart* sparts, conversion_func_spart_double functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t spartSize, + const struct spart* sparts, conversion_func_spart_double functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = spartSize; r.sparts = sparts; r.conversion = 1; @@ -657,25 +755,34 @@ INLINE static struct io_props io_make_output_field_convert_spart_DOUBLE( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param spartSize The size in byte of the particle * @param sparts The particle array * @param functionPtr The function used to convert a s-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_spart_LONGLONG( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t spartSize, - const struct spart* sparts, conversion_func_spart_long_long functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t spartSize, + const struct spart* sparts, conversion_func_spart_long_long functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = spartSize; r.sparts = sparts; r.conversion = 1; @@ -687,44 +794,11 @@ INLINE static struct io_props io_make_output_field_convert_spart_LONGLONG( /** * @brief Constructs an #io_props (with conversion) from its parameters */ -#define io_make_output_field_convert_bpart(name, type, dim, units, bpart, \ - convert) \ - io_make_output_field_convert_bpart_##type(name, type, dim, units, \ - sizeof(bpart[0]), bpart, convert) - -/** - * @brief Construct an #io_props from its parameters - * - * @param name Name of the field to read - * @param type The type of the data - * @param dimension Dataset dimension (1D, 3D, ...) - * @param units The units of the dataset - * @param bpartSize The size in byte of the particle - * @param bparts The particle array - * @param functionPtr The function used to convert a b-particle to a int - * - * Do not call this function directly. Use the macro defined above. - */ -INLINE static struct io_props io_make_output_field_convert_bpart_INT( - const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t bpartSize, - const struct bpart* bparts, conversion_func_bpart_int functionPtr) { - - struct io_props r; - bzero(&r, sizeof(struct io_props)); - - strcpy(r.name, name); - r.type = type; - r.dimension = dimension; - r.importance = UNUSED; - r.units = units; - r.partSize = bpartSize; - r.bparts = bparts; - r.conversion = 1; - r.convert_bpart_i = functionPtr; - - return r; -} +#define io_make_output_field_convert_bpart(name, type, dim, units, a_exponent, \ + bpart, convert, desc) \ + io_make_output_field_convert_bpart_##type(name, type, dim, units, \ + a_exponent, sizeof(bpart[0]), \ + bpart, convert, desc) /** * @brief Construct an #io_props from its parameters @@ -733,25 +807,34 @@ INLINE static struct io_props io_make_output_field_convert_bpart_INT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param bpartSize The size in byte of the particle * @param bparts The particle array * @param functionPtr The function used to convert a g-particle to a float + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_bpart_FLOAT( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t bpartSize, - const struct bpart* bparts, conversion_func_bpart_float functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t bpartSize, + const struct bpart* bparts, conversion_func_bpart_float functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = bpartSize; r.bparts = bparts; r.conversion = 1; @@ -767,25 +850,34 @@ INLINE static struct io_props io_make_output_field_convert_bpart_FLOAT( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param bpartSize The size in byte of the particle * @param bparts The particle array * @param functionPtr The function used to convert a s-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_bpart_DOUBLE( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t bpartSize, - const struct bpart* bparts, conversion_func_bpart_double functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t bpartSize, + const struct bpart* bparts, conversion_func_bpart_double functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = bpartSize; r.bparts = bparts; r.conversion = 1; @@ -801,25 +893,34 @@ INLINE static struct io_props io_make_output_field_convert_bpart_DOUBLE( * @param type The type of the data * @param dimension Dataset dimension (1D, 3D, ...) * @param units The units of the dataset + * @param a_exponent Exponent of the scale-factor to convert to physical units. * @param bpartSize The size in byte of the particle * @param bparts The particle array * @param functionPtr The function used to convert a s-particle to a double + * @param description Description of the field added to the meta-data. * * Do not call this function directly. Use the macro defined above. */ INLINE static struct io_props io_make_output_field_convert_bpart_LONGLONG( const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, - enum unit_conversion_factor units, size_t bpartSize, - const struct bpart* bparts, conversion_func_bpart_long_long functionPtr) { + enum unit_conversion_factor units, float a_exponent, size_t bpartSize, + const struct bpart* bparts, conversion_func_bpart_long_long functionPtr, + const char description[DESCRIPTION_BUFFER_SIZE]) { struct io_props r; bzero(&r, sizeof(struct io_props)); strcpy(r.name, name); + if (strlen(description) == 0) { + sprintf(r.description, "No description given"); + } else { + strcpy(r.description, description); + } r.type = type; r.dimension = dimension; r.importance = UNUSED; r.units = units; + r.scale_factor_exponent = a_exponent; r.partSize = bpartSize; r.bparts = bparts; r.conversion = 1; diff --git a/src/parallel_io.c b/src/parallel_io.c index 3b43e623784fd60735ac759e89fcf5536094076f..929e0bb43ac15526bffe66a9563e38e5ea7af9a4 100644 --- a/src/parallel_io.c +++ b/src/parallel_io.c @@ -427,18 +427,18 @@ void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, if (h_data < 0) error("Error while creating dataspace '%s'.", props.name); /* Write unit conversion factors for this data set */ - char buffer[FIELD_BUFFER_SIZE]; - units_cgs_conversion_string(buffer, snapshot_units, props.units); + char buffer[FIELD_BUFFER_SIZE] = {0}; + units_cgs_conversion_string(buffer, snapshot_units, props.units, + props.scale_factor_exponent); float baseUnitsExp[5]; units_get_base_unit_exponents_array(baseUnitsExp, props.units); - const float a_factor_exp = units_a_factor(snapshot_units, props.units); io_write_attribute_f(h_data, "U_M exponent", baseUnitsExp[UNIT_MASS]); io_write_attribute_f(h_data, "U_L exponent", baseUnitsExp[UNIT_LENGTH]); io_write_attribute_f(h_data, "U_t exponent", baseUnitsExp[UNIT_TIME]); io_write_attribute_f(h_data, "U_I exponent", baseUnitsExp[UNIT_CURRENT]); io_write_attribute_f(h_data, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]); - io_write_attribute_f(h_data, "h-scale exponent", 0); - io_write_attribute_f(h_data, "a-scale exponent", a_factor_exp); + io_write_attribute_f(h_data, "h-scale exponent", 0.f); + io_write_attribute_f(h_data, "a-scale exponent", props.scale_factor_exponent); io_write_attribute_s(h_data, "Expression for physical CGS units", buffer); /* Write the actual number this conversion factor corresponds to */ @@ -450,8 +450,16 @@ void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, factor); io_write_attribute_d( h_data, - "Conversion factor to phyical CGS (including cosmological corrections)", - factor * pow(e->cosmology->a, a_factor_exp)); + "Conversion factor to physical CGS (including cosmological corrections)", + factor * pow(e->cosmology->a, props.scale_factor_exponent)); + +#ifdef SWIFT_DEBUG_CHECKS + if (strlen(props.description) == 0) + error("Invalid (empty) description of the field '%s'", props.name); +#endif + + /* Write the full description */ + io_write_attribute_s(h_data, "Description", props.description); /* Add a line to the XMF */ if (xmfFile != NULL) @@ -1009,6 +1017,7 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6], const int with_cosmology = e->policy & engine_policy_cosmology; const int with_cooling = e->policy & engine_policy_cooling; const int with_temperature = e->policy & engine_policy_temperature; + const int with_fof = e->policy & engine_policy_fof; #ifdef HAVE_VELOCIRAPTOR const int with_stf = (e->policy & engine_policy_structure_finding) && (e->s->gpart_group_data != NULL); @@ -1207,7 +1216,9 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6], with_cosmology); num_fields += star_formation_write_particles(parts, xparts, list + num_fields); - num_fields += fof_write_parts(parts, xparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts, xparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts(parts, xparts, list + num_fields); @@ -1216,7 +1227,9 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6], case swift_type_dark_matter: darkmatter_write_particles(gparts, list, &num_fields); - num_fields += fof_write_gparts(gparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_gparts(gparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(e->s->gpart_group_data, list + num_fields); @@ -1224,20 +1237,24 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6], break; case swift_type_stars: - stars_write_particles(sparts, list, &num_fields); + stars_write_particles(sparts, list, &num_fields, with_cosmology); num_fields += chemistry_write_sparticles(sparts, list + num_fields); num_fields += tracers_write_sparticles(sparts, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts, list + num_fields); } break; case swift_type_black_hole: - black_holes_write_particles(bparts, list, &num_fields); + black_holes_write_particles(bparts, list, &num_fields, with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts, list + num_fields); } @@ -1311,6 +1328,7 @@ void write_output_parallel(struct engine* e, const char* baseName, const int with_cosmology = e->policy & engine_policy_cosmology; const int with_cooling = e->policy & engine_policy_cooling; const int with_temperature = e->policy & engine_policy_temperature; + const int with_fof = e->policy & engine_policy_fof; #ifdef HAVE_VELOCIRAPTOR const int with_stf = (e->policy & engine_policy_structure_finding) && (e->s->gpart_group_data != NULL); @@ -1540,7 +1558,9 @@ void write_output_parallel(struct engine* e, const char* baseName, num_fields += cooling_write_particles( parts, xparts, list + num_fields, e->cooling_func); } - num_fields += fof_write_parts(parts, xparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts, xparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts(parts, xparts, list + num_fields); @@ -1579,8 +1599,10 @@ void write_output_parallel(struct engine* e, const char* baseName, cooling_write_particles(parts_written, xparts_written, list + num_fields, e->cooling_func); } - num_fields += - fof_write_parts(parts_written, xparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts_written, xparts_written, + list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts( parts_written, xparts_written, list + num_fields); @@ -1598,7 +1620,9 @@ void write_output_parallel(struct engine* e, const char* baseName, /* This is a DM-only run without inhibited particles */ Nparticles = Ntot; darkmatter_write_particles(gparts, list, &num_fields); - num_fields += fof_write_gparts(gparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_gparts(gparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(e->s->gpart_group_data, list + num_fields); @@ -1631,7 +1655,9 @@ void write_output_parallel(struct engine* e, const char* baseName, /* Select the fields to write */ darkmatter_write_particles(gparts_written, list, &num_fields); - num_fields += fof_write_gparts(gparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_gparts(gparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(gpart_group_data_written, list + num_fields); @@ -1644,11 +1670,13 @@ void write_output_parallel(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ Nparticles = Nstars; - stars_write_particles(sparts, list, &num_fields); + stars_write_particles(sparts, list, &num_fields, with_cosmology); num_fields += chemistry_write_sparticles(sparts, list + num_fields); num_fields += tracers_write_sparticles(sparts, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts, list + num_fields); } @@ -1668,11 +1696,14 @@ void write_output_parallel(struct engine* e, const char* baseName, Nstars_written); /* Select the fields to write */ - stars_write_particles(sparts_written, list, &num_fields); + stars_write_particles(sparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_sparticles(sparts, list + num_fields); num_fields += tracers_write_sparticles(sparts, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts_written, list + num_fields); @@ -1685,9 +1716,12 @@ void write_output_parallel(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ Nparticles = Nblackholes; - black_holes_write_particles(bparts, list, &num_fields); + black_holes_write_particles(bparts, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts, list + num_fields); } @@ -1707,9 +1741,12 @@ void write_output_parallel(struct engine* e, const char* baseName, Nblackholes_written); /* Select the fields to write */ - black_holes_write_particles(bparts_written, list, &num_fields); + black_holes_write_particles(bparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts_written, list + num_fields); diff --git a/src/runner.c b/src/runner.c index 26282a0fcd9ec284a918c66dc11fca2731ed635d..05dc33e97e74901bd87d1fe4da4259b6d8aa29e0 100644 --- a/src/runner.c +++ b/src/runner.c @@ -2276,7 +2276,7 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { /* Re-initialise everything */ hydro_init_part(p, hs); chemistry_init_part(p, chemistry); - star_formation_init_part(p, star_formation); + star_formation_init_part(p, xp, star_formation); tracers_after_init(p, xp, e->internal_units, e->physical_constants, with_cosmology, e->cosmology, e->hydro_properties, e->cooling_func, e->time); @@ -3876,10 +3876,10 @@ void runner_do_gas_swallow(struct runner *r, struct cell *c, int timer) { * by another thread before we do the deed. */ if (!part_is_inhibited(p, e)) { - /* Finally, remove the gas particle from the system */ - struct gpart *gp = p->gpart; + /* Finally, remove the gas particle from the system + * Recall that the gpart associated with it is also removed + * at the same time. */ cell_remove_part(e, c, p, xp); - cell_remove_gpart(e, c, gp); } if (lock_unlock(&e->s->lock) != 0) @@ -3920,9 +3920,7 @@ void runner_do_gas_swallow(struct runner *r, struct cell *c, int timer) { if (!part_is_inhibited(p, e)) { /* Finally, remove the gas particle from the system */ - struct gpart *gp = p->gpart; cell_remove_part(e, c, p, xp); - cell_remove_gpart(e, c, gp); } if (lock_unlock(&e->s->lock) != 0) @@ -4096,10 +4094,10 @@ void runner_do_bh_swallow(struct runner *r, struct cell *c, int timer) { message("BH %lld removing BH particle %lld", bp->id, cell_bp->id); - /* Finally, remove the gas particle from the system */ - struct gpart *cell_gp = cell_bp->gpart; + /* Finally, remove the gas particle from the system + * Recall that the gpart associated with it is also removed + * at the same time. */ cell_remove_bpart(e, c, cell_bp); - cell_remove_gpart(e, c, cell_gp); } /* In any case, prevent the particle from being re-swallowed */ @@ -4130,9 +4128,7 @@ void runner_do_bh_swallow(struct runner *r, struct cell *c, int timer) { bp->id, cell_bp->id); /* Finally, remove the gas particle from the system */ - struct gpart *cell_gp = cell_bp->gpart; cell_remove_bpart(e, c, cell_bp); - cell_remove_gpart(e, c, cell_gp); found = 1; break; diff --git a/src/runner_doiact.h b/src/runner_doiact.h index 6caa287cf726f85778ef5abdc184acaf759e8b0e..1a39c8d49f2c4234c04982e255705f06ec1c5d38 100644 --- a/src/runner_doiact.h +++ b/src/runner_doiact.h @@ -884,7 +884,6 @@ void DOSELF_SUBSET(struct runner *r, struct cell *restrict ci, const int count_i = ci->hydro.count; struct part *restrict parts_j = ci->hydro.parts; - /* Loop over the parts in ci. */ for (int pid = 0; pid < count; pid++) { diff --git a/src/serial_io.c b/src/serial_io.c index 851e6ede0e8b1dc0d40e8adc031760e299f155d8..0d367b1cf423d0471a0f6235fa3a1c6877e9f279 100644 --- a/src/serial_io.c +++ b/src/serial_io.c @@ -312,18 +312,18 @@ void prepareArray(const struct engine* e, hid_t grp, char* fileName, props.dimension, props.type); /* Write unit conversion factors for this data set */ - char buffer[FIELD_BUFFER_SIZE]; - units_cgs_conversion_string(buffer, snapshot_units, props.units); + char buffer[FIELD_BUFFER_SIZE] = {0}; + units_cgs_conversion_string(buffer, snapshot_units, props.units, + props.scale_factor_exponent); float baseUnitsExp[5]; units_get_base_unit_exponents_array(baseUnitsExp, props.units); - const float a_factor_exp = units_a_factor(snapshot_units, props.units); io_write_attribute_f(h_data, "U_M exponent", baseUnitsExp[UNIT_MASS]); io_write_attribute_f(h_data, "U_L exponent", baseUnitsExp[UNIT_LENGTH]); io_write_attribute_f(h_data, "U_t exponent", baseUnitsExp[UNIT_TIME]); io_write_attribute_f(h_data, "U_I exponent", baseUnitsExp[UNIT_CURRENT]); io_write_attribute_f(h_data, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]); - io_write_attribute_f(h_data, "h-scale exponent", 0); - io_write_attribute_f(h_data, "a-scale exponent", a_factor_exp); + io_write_attribute_f(h_data, "h-scale exponent", 0.f); + io_write_attribute_f(h_data, "a-scale exponent", props.scale_factor_exponent); io_write_attribute_s(h_data, "Expression for physical CGS units", buffer); /* Write the actual number this conversion factor corresponds to */ @@ -335,8 +335,16 @@ void prepareArray(const struct engine* e, hid_t grp, char* fileName, factor); io_write_attribute_d( h_data, - "Conversion factor to phyical CGS (including cosmological corrections)", - factor * pow(e->cosmology->a, a_factor_exp)); + "Conversion factor to physical CGS (including cosmological corrections)", + factor * pow(e->cosmology->a, props.scale_factor_exponent)); + +#ifdef SWIFT_DEBUG_CHECKS + if (strlen(props.description) == 0) + error("Invalid (empty) description of the field '%s'", props.name); +#endif + + /* Write the full description */ + io_write_attribute_s(h_data, "Description", props.description); /* Close everything */ H5Pclose(h_prop); @@ -838,6 +846,7 @@ void write_output_serial(struct engine* e, const char* baseName, const int with_cosmology = e->policy & engine_policy_cosmology; const int with_cooling = e->policy & engine_policy_cooling; const int with_temperature = e->policy & engine_policy_temperature; + const int with_fof = e->policy & engine_policy_fof; #ifdef HAVE_VELOCIRAPTOR const int with_stf = (e->policy & engine_policy_structure_finding) && (e->s->gpart_group_data != NULL); @@ -1173,7 +1182,9 @@ void write_output_serial(struct engine* e, const char* baseName, num_fields += cooling_write_particles( parts, xparts, list + num_fields, e->cooling_func); } - num_fields += fof_write_parts(parts, xparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts, xparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts(parts, xparts, list + num_fields); @@ -1212,8 +1223,10 @@ void write_output_serial(struct engine* e, const char* baseName, cooling_write_particles(parts_written, xparts_written, list + num_fields, e->cooling_func); } - num_fields += fof_write_parts(parts_written, xparts_written, - list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts_written, xparts_written, + list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts( parts_written, xparts_written, list + num_fields); @@ -1232,7 +1245,10 @@ void write_output_serial(struct engine* e, const char* baseName, /* This is a DM-only run without inhibited particles */ Nparticles = Ntot; darkmatter_write_particles(gparts, list, &num_fields); - num_fields += fof_write_gparts(gparts_written, list + num_fields); + if (with_fof) { + num_fields += + fof_write_gparts(gparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(e->s->gpart_group_data, list + num_fields); @@ -1266,7 +1282,10 @@ void write_output_serial(struct engine* e, const char* baseName, /* Select the fields to write */ darkmatter_write_particles(gparts_written, list, &num_fields); - num_fields += fof_write_gparts(gparts_written, list + num_fields); + if (with_fof) { + num_fields += + fof_write_gparts(gparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts( gpart_group_data_written, list + num_fields); @@ -1279,12 +1298,14 @@ void write_output_serial(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ Nparticles = Nstars; - stars_write_particles(sparts, list, &num_fields); + stars_write_particles(sparts, list, &num_fields, with_cosmology); num_fields += chemistry_write_sparticles(sparts, list + num_fields); num_fields += tracers_write_sparticles(sparts, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts, list + num_fields); @@ -1305,12 +1326,16 @@ void write_output_serial(struct engine* e, const char* baseName, Nstars_written); /* Select the fields to write */ - stars_write_particles(sparts_written, list, &num_fields); + stars_write_particles(sparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_sparticles(sparts_written, list + num_fields); num_fields += tracers_write_sparticles( sparts_written, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts_written, list + num_fields); + if (with_fof) { + num_fields += + fof_write_sparts(sparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts_written, list + num_fields); @@ -1323,10 +1348,13 @@ void write_output_serial(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ Nparticles = Nblackholes; - black_holes_write_particles(bparts, list, &num_fields); + black_holes_write_particles(bparts, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts, list + num_fields); @@ -1347,10 +1375,14 @@ void write_output_serial(struct engine* e, const char* baseName, Nblackholes_written); /* Select the fields to write */ - black_holes_write_particles(bparts_written, list, &num_fields); + black_holes_write_particles(bparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts_written, list + num_fields); + if (with_fof) { + num_fields += + fof_write_bparts(bparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts_written, list + num_fields); diff --git a/src/single_io.c b/src/single_io.c index 20d7f98e1bbbe6761b15378508f50104dd14f92d..76a5630120df5e946093e9f046503f73bce706b6 100644 --- a/src/single_io.c +++ b/src/single_io.c @@ -324,18 +324,18 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName, props.dimension, props.type); /* Write unit conversion factors for this data set */ - char buffer[FIELD_BUFFER_SIZE]; - units_cgs_conversion_string(buffer, snapshot_units, props.units); + char buffer[FIELD_BUFFER_SIZE] = {0}; + units_cgs_conversion_string(buffer, snapshot_units, props.units, + props.scale_factor_exponent); float baseUnitsExp[5]; units_get_base_unit_exponents_array(baseUnitsExp, props.units); - const float a_factor_exp = units_a_factor(snapshot_units, props.units); io_write_attribute_f(h_data, "U_M exponent", baseUnitsExp[UNIT_MASS]); io_write_attribute_f(h_data, "U_L exponent", baseUnitsExp[UNIT_LENGTH]); io_write_attribute_f(h_data, "U_t exponent", baseUnitsExp[UNIT_TIME]); io_write_attribute_f(h_data, "U_I exponent", baseUnitsExp[UNIT_CURRENT]); io_write_attribute_f(h_data, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]); - io_write_attribute_f(h_data, "h-scale exponent", 0); - io_write_attribute_f(h_data, "a-scale exponent", a_factor_exp); + io_write_attribute_f(h_data, "h-scale exponent", 0.f); + io_write_attribute_f(h_data, "a-scale exponent", props.scale_factor_exponent); io_write_attribute_s(h_data, "Expression for physical CGS units", buffer); /* Write the actual number this conversion factor corresponds to */ @@ -347,8 +347,16 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName, factor); io_write_attribute_d( h_data, - "Conversion factor to phyical CGS (including cosmological corrections)", - factor * pow(e->cosmology->a, a_factor_exp)); + "Conversion factor to physical CGS (including cosmological corrections)", + factor * pow(e->cosmology->a, props.scale_factor_exponent)); + +#ifdef SWIFT_DEBUG_CHECKS + if (strlen(props.description) == 0) + error("Invalid (empty) description of the field '%s'", props.name); +#endif + + /* Write the full description */ + io_write_attribute_s(h_data, "Description", props.description); /* Free and close everything */ swift_free("writebuff", temp); @@ -700,6 +708,7 @@ void write_output_single(struct engine* e, const char* baseName, const int with_cosmology = e->policy & engine_policy_cosmology; const int with_cooling = e->policy & engine_policy_cooling; const int with_temperature = e->policy & engine_policy_temperature; + const int with_fof = e->policy & engine_policy_fof; #ifdef HAVE_VELOCIRAPTOR const int with_stf = (e->policy & engine_policy_structure_finding) && (e->s->gpart_group_data != NULL); @@ -972,7 +981,9 @@ void write_output_single(struct engine* e, const char* baseName, num_fields += cooling_write_particles( parts, xparts, list + num_fields, e->cooling_func); } - num_fields += fof_write_parts(parts, xparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts, xparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts(parts, xparts, list + num_fields); @@ -1011,8 +1022,10 @@ void write_output_single(struct engine* e, const char* baseName, cooling_write_particles(parts_written, xparts_written, list + num_fields, e->cooling_func); } - num_fields += - fof_write_parts(parts_written, xparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_parts(parts_written, xparts_written, + list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_parts( parts_written, xparts_written, list + num_fields); @@ -1030,7 +1043,9 @@ void write_output_single(struct engine* e, const char* baseName, /* This is a DM-only run without inhibited particles */ N = Ntot; darkmatter_write_particles(gparts, list, &num_fields); - num_fields += fof_write_gparts(gparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_gparts(gparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(e->s->gpart_group_data, list + num_fields); @@ -1063,7 +1078,9 @@ void write_output_single(struct engine* e, const char* baseName, /* Select the fields to write */ darkmatter_write_particles(gparts_written, list, &num_fields); - num_fields += fof_write_gparts(gparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_gparts(gparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_gparts(gpart_group_data_written, list + num_fields); @@ -1076,11 +1093,13 @@ void write_output_single(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ N = Nstars; - stars_write_particles(sparts, list, &num_fields); + stars_write_particles(sparts, list, &num_fields, with_cosmology); num_fields += chemistry_write_sparticles(sparts, list + num_fields); num_fields += tracers_write_sparticles(sparts, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts, list + num_fields); } @@ -1100,12 +1119,15 @@ void write_output_single(struct engine* e, const char* baseName, Nstars_written); /* Select the fields to write */ - stars_write_particles(sparts_written, list, &num_fields); + stars_write_particles(sparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_sparticles(sparts_written, list + num_fields); num_fields += tracers_write_sparticles( sparts_written, list + num_fields, with_cosmology); - num_fields += fof_write_sparts(sparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_sparts(sparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_sparts(sparts_written, list + num_fields); @@ -1118,9 +1140,12 @@ void write_output_single(struct engine* e, const char* baseName, /* No inhibted particles: easy case */ N = Nblackholes; - black_holes_write_particles(bparts, list, &num_fields); + black_holes_write_particles(bparts, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts, list + num_fields); - num_fields += fof_write_bparts(bparts, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts, list + num_fields); } @@ -1140,10 +1165,13 @@ void write_output_single(struct engine* e, const char* baseName, Nblackholes_written); /* Select the fields to write */ - black_holes_write_particles(bparts_written, list, &num_fields); + black_holes_write_particles(bparts_written, list, &num_fields, + with_cosmology); num_fields += chemistry_write_bparticles(bparts_written, list + num_fields); - num_fields += fof_write_bparts(bparts_written, list + num_fields); + if (with_fof) { + num_fields += fof_write_bparts(bparts_written, list + num_fields); + } if (with_stf) { num_fields += velociraptor_write_bparts(bparts_written, list + num_fields); diff --git a/src/space.c b/src/space.c index 5dd39d2061e93437eadc7e0a240d7f52e38b24f8..250af5efafa506408c9fd91c1adffb7cd96a1a21 100644 --- a/src/space.c +++ b/src/space.c @@ -3947,7 +3947,7 @@ void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts, else if (gp->type == swift_type_gas) { - /* Get it's gassy friend */ + /* Get its gassy friend */ struct part *p = &s->parts[-gp->id_or_neg_offset]; struct xpart *xp = &s->xparts[-gp->id_or_neg_offset]; @@ -3965,7 +3965,7 @@ void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts, else if (gp->type == swift_type_stars) { - /* Get it's stellar friend */ + /* Get its stellar friend */ struct spart *sp = &s->sparts[-gp->id_or_neg_offset]; /* Synchronize positions */ @@ -3978,7 +3978,7 @@ void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts, else if (gp->type == swift_type_black_hole) { - /* Get it's black hole friend */ + /* Get its black hole friend */ struct bpart *bp = &s->bparts[-gp->id_or_neg_offset]; /* Synchronize positions */ @@ -4365,7 +4365,7 @@ void space_init_parts_mapper(void *restrict map_data, int count, for (int k = 0; k < count; k++) { hydro_init_part(&parts[k], hs); chemistry_init_part(&parts[k], e->chemistry); - star_formation_init_part(&parts[k], e->star_formation); + star_formation_init_part(&parts[k], &xparts[k], e->star_formation); tracers_after_init(&parts[k], &xparts[k], e->internal_units, e->physical_constants, with_cosmology, e->cosmology, e->hydro_properties, e->cooling_func, e->time); diff --git a/src/star_formation.c b/src/star_formation.c index 698a64cc636dd79f00feac3f6cc88bf519fe09c1..60cff1e2e68feaf7e71705b5079294ec478fad42 100644 --- a/src/star_formation.c +++ b/src/star_formation.c @@ -24,6 +24,7 @@ #include "part.h" #include "restart.h" #include "star_formation.h" +#include "star_formation_io.h" #include "units.h" /** diff --git a/src/star_formation/EAGLE/star_formation.h b/src/star_formation/EAGLE/star_formation.h index 1fc6531656c94c566e1ffac502b5023e09094872..55f1c8d4271b16f63ce00e6d6cac9eaee5a778a6 100644 --- a/src/star_formation/EAGLE/star_formation.h +++ b/src/star_formation/EAGLE/star_formation.h @@ -690,6 +690,7 @@ star_formation_first_init_part(const struct phys_const* restrict phys_const, * @param data The global star_formation information. */ __attribute__((always_inline)) INLINE static void star_formation_init_part( - struct part* restrict p, const struct star_formation* data) {} + struct part* restrict p, struct xpart* restrict xp, + const struct star_formation* data) {} #endif /* SWIFT_EAGLE_STAR_FORMATION_H */ diff --git a/src/star_formation/EAGLE/star_formation_io.h b/src/star_formation/EAGLE/star_formation_io.h index cee96326e458d0581af6e62e452ac433dcf407bd..f8bf57145955d41f7ae0ecd4141651a24f1c2727 100644 --- a/src/star_formation/EAGLE/star_formation_io.h +++ b/src/star_formation/EAGLE/star_formation_io.h @@ -38,8 +38,11 @@ __attribute__((always_inline)) INLINE static int star_formation_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list) { - list[0] = - io_make_output_field("SFR", FLOAT, 1, UNIT_CONV_SFR, xparts, sf_data.SFR); + list[0] = io_make_output_field( + "StarFormationRates", FLOAT, 1, UNIT_CONV_SFR, 0.f, xparts, sf_data.SFR, + "If positive, star formation rates of the particles. If negative, stores " + "the last time/scale-factor at which the gas particle was star-forming. " + "If zero, the particle was never star-forming."); return 1; } diff --git a/src/star_formation/EAGLE/star_formation_logger.h b/src/star_formation/EAGLE/star_formation_logger.h index d634c876e52d45588ed0b93c0afc09731317c037..a843f63ce518aa9be6b1d389d42cf71c8608b903 100644 --- a/src/star_formation/EAGLE/star_formation_logger.h +++ b/src/star_formation/EAGLE/star_formation_logger.h @@ -87,6 +87,28 @@ INLINE static void star_formation_logger_add( sf_update->SFR_inactive += sf_add->SFR_inactive; } +/** + * @brief add a star formation history struct to the engine star formation + * history accumulator struct + * + * @param sf_add the star formation accumulator struct which we want to add to + * the star formation history + * @param sf_update the star formation structure which we want to update + */ +INLINE static void star_formation_logger_add_to_accumulator( + struct star_formation_history_accumulator *sf_update, + const struct star_formation_history *sf_add) { + + /* Update the SFH structure */ + sf_update->new_stellar_mass += sf_add->new_stellar_mass; + + sf_update->SFR_active += sf_add->SFR_active; + + sf_update->SFRdt_active += sf_add->SFRdt_active; + + sf_update->SFR_inactive += sf_add->SFR_inactive; +} + /** * @brief Initialize the star formation history structure in the #engine * @@ -105,6 +127,24 @@ INLINE static void star_formation_logger_init( sfh->SFR_inactive = 0.f; } +/** + * @brief Initialize the star formation history structure in the #engine + * + * @param sfh The pointer to the star formation history structure + */ +INLINE static void star_formation_logger_accumulator_init( + struct star_formation_history_accumulator *sfh) { + + /* Initialize the collecting SFH structure to zero */ + sfh->new_stellar_mass = 0.f; + + sfh->SFR_active = 0.f; + + sfh->SFRdt_active = 0.f; + + sfh->SFR_inactive = 0.f; +} + /** * @brief Write the final SFH to a file * @@ -117,7 +157,7 @@ INLINE static void star_formation_logger_init( */ INLINE static void star_formation_logger_write_to_log_file( FILE *fp, const double time, const double a, const double z, - const struct star_formation_history sf, const int step) { + const struct star_formation_history_accumulator sf, const int step) { /* Calculate the total SFR */ const float totalSFR = sf.SFR_active + sf.SFR_inactive; diff --git a/src/star_formation/EAGLE/star_formation_logger_struct.h b/src/star_formation/EAGLE/star_formation_logger_struct.h index 2a23659c4d931735d1b82a6143b3d9f871f7137a..c03a00c97ead46f552350a43574c5bbe7ac6df1b 100644 --- a/src/star_formation/EAGLE/star_formation_logger_struct.h +++ b/src/star_formation/EAGLE/star_formation_logger_struct.h @@ -34,4 +34,21 @@ struct star_formation_history { float SFRdt_active; }; +/* Starformation history struct for the engine. + Allows to integrate in time some values. + Nothing to do in EAGLE => copy of star_formation_history */ +struct star_formation_history_accumulator { + /*! Total new stellar mass */ + float new_stellar_mass; + + /*! SFR of all particles */ + float SFR_inactive; + + /*! SFR of active particles */ + float SFR_active; + + /*! SFR*dt of active particles */ + float SFRdt_active; +}; + #endif /* SWIFT_EAGLE_STAR_FORMATION_LOGGER_STRUCT_H */ diff --git a/src/star_formation/EAGLE/star_formation_struct.h b/src/star_formation/EAGLE/star_formation_struct.h index 41247e160a3eddbc9184c59b67cfa2a1d7259a05..8caac49d4b57652c5db9ae93e3789dc690e6d23f 100644 --- a/src/star_formation/EAGLE/star_formation_struct.h +++ b/src/star_formation/EAGLE/star_formation_struct.h @@ -29,4 +29,6 @@ struct star_formation_xpart_data { float SFR; }; +struct star_formation_part_data {}; + #endif /* SWIFT_EAGLE_STAR_FORMATION_STRUCT_H */ diff --git a/src/star_formation/GEAR/star_formation.h b/src/star_formation/GEAR/star_formation.h index c479feb5c66328f9fab8bf62593ca66b6658b79e..ac423a51865609460e870f65b1eeeb266182e2ef 100644 --- a/src/star_formation/GEAR/star_formation.h +++ b/src/star_formation/GEAR/star_formation.h @@ -1,6 +1,7 @@ /******************************************************************************* * This file is part of SWIFT. - * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl) + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * 2019 Fabien Jeanquartier (fabien.jeanquartier@epfl.ch) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -20,20 +21,24 @@ #define SWIFT_GEAR_STAR_FORMATION_H /* Local includes */ +#include "cooling.h" #include "cosmology.h" +#include "engine.h" #include "entropy_floor.h" #include "error.h" #include "hydro_properties.h" #include "parser.h" #include "part.h" #include "physical_constants.h" +#include "random.h" +#include "star_formation_struct.h" #include "units.h" /** * @brief Calculate if the gas has the potential of becoming * a star. * - * No star formation should occur, so return 0. + * Use the star formation criterion given by eq. 3 in Revaz & Jablonka 2018. * * @param starform the star formation law properties to use. * @param p the gas particles. @@ -46,7 +51,7 @@ * */ INLINE static int star_formation_is_star_forming( - const struct part* restrict p, const struct xpart* restrict xp, + struct part* restrict p, struct xpart* restrict xp, const struct star_formation* starform, const struct phys_const* phys_const, const struct cosmology* cosmo, const struct hydro_props* restrict hydro_props, @@ -54,14 +59,43 @@ INLINE static int star_formation_is_star_forming( const struct cooling_function_data* restrict cooling, const struct entropy_floor_properties* restrict entropy_floor) { - return 0; + const float temperature = cooling_get_temperature(phys_const, hydro_props, us, + cosmo, cooling, p, xp); + + const float temperature_max = starform->maximal_temperature; + + /* Check the temperature criterion */ + if (temperature > temperature_max) { + return 0; + } + + /* Get the required variables */ + const float sigma2 = p->sf_data.sigma2; + const float n_jeans_2_3 = starform->n_jeans_2_3; + + const float h = p->h; + const float density = hydro_get_physical_density(p, cosmo); + + // TODO use GRACKLE */ + const float mu = hydro_props->mu_neutral; + + /* Compute the density criterion */ + const float coef = + M_PI_4 / (phys_const->const_newton_G * n_jeans_2_3 * h * h); + const float density_criterion = + coef * (hydro_gamma * phys_const->const_boltzmann_k * temperature / + (mu * phys_const->const_proton_mass) + + sigma2); + + /* Check the density criterion */ + return density > density_criterion; } /** - * @brief Compute the star-formation rate of a given particle and store - * it into the #xpart. + * @brief Compute the star-formation rate of a given particle. * - * Nothing to do here. + * Nothing to do here. Everything is done in + * #star_formation_should_convert_to_star. * * @param p #part. * @param xp the #xpart. @@ -71,15 +105,15 @@ INLINE static int star_formation_is_star_forming( * @param dt_star The time-step of this particle. */ INLINE static void star_formation_compute_SFR( - const struct part* restrict p, struct xpart* restrict xp, + struct part* restrict p, struct xpart* restrict xp, const struct star_formation* starform, const struct phys_const* phys_const, const struct cosmology* cosmo, const double dt_star) {} /** * @brief Decides whether a particle should be converted into a * star or not. - * - * No SF should occur, so return 0. + + * Compute the star formation rate from eq. 4 in Revaz & Jablonka 2012. * * @param p The #part. * @param xp The #xpart. @@ -89,18 +123,38 @@ INLINE static void star_formation_compute_SFR( * @return 1 if a conversion should be done, 0 otherwise. */ INLINE static int star_formation_should_convert_to_star( - const struct part* p, const struct xpart* xp, - const struct star_formation* starform, const struct engine* e, - const double dt_star) { + struct part* p, struct xpart* xp, const struct star_formation* starform, + const struct engine* e, const double dt_star) { + + const struct phys_const* phys_const = e->physical_constants; + const struct cosmology* cosmo = e->cosmology; + + /* Check that we are running a full time step */ + if (dt_star == 0.) { + return 0; + } + + /* Get a few variables */ + const float G = phys_const->const_newton_G; + const float density = hydro_get_physical_density(p, cosmo); + + /* Compute the probability */ + const float inv_free_fall_time = + sqrtf(density * 32.f * G * 0.33333333f * M_1_PI); + const float prob = 1.f - exp(-starform->star_formation_efficiency * + inv_free_fall_time * dt_star); + + /* Roll the dice... */ + const float random_number = + random_unit_interval(p->id, e->ti_current, random_number_star_formation); - return 0; + /* Can we form a star? */ + return random_number < prob; } /** * @brief Update the SF properties of a particle that is not star forming. * - * Nothing to do here. - * * @param p The #part. * @param xp The #xpart. * @param e The #engine. @@ -115,8 +169,6 @@ INLINE static void star_formation_update_part_not_SFR( * @brief Copies the properties of the gas particle over to the * star particle. * - * Nothing to do here. - * * @param e The #engine * @param p the gas particles. * @param xp the additional properties of the gas particles. @@ -133,21 +185,33 @@ INLINE static void star_formation_copy_properties( const struct phys_const* phys_const, const struct hydro_props* restrict hydro_props, const struct unit_system* restrict us, - const struct cooling_function_data* restrict cooling) {} + const struct cooling_function_data* restrict cooling) { -/** - * @brief initialization of the star formation law - * - * @param parameter_file The parsed parameter file - * @param phys_const Physical constants in internal units - * @param us The current internal system of units - * @param starform the star formation law properties to initialize - * - */ -INLINE static void starformation_init_backend( - struct swift_params* parameter_file, const struct phys_const* phys_const, - const struct unit_system* us, const struct hydro_props* hydro_props, - const struct star_formation* starform) {} + /* Store the current mass */ + sp->mass = hydro_get_mass(p); + sp->birth.mass = sp->mass; + + /* Store either the birth_scale_factor or birth_time depending */ + if (with_cosmology) { + sp->birth_scale_factor = cosmo->a; + } else { + sp->birth_time = e->time; + } + + // TODO copy only metals + /* Store the chemistry struct in the star particle */ + sp->chemistry_data = p->chemistry_data; + + /* Store the tracers data */ + sp->tracers_data = xp->tracers_data; + + /* Store the birth density in the star particle */ + sp->birth.density = hydro_get_physical_density(p, cosmo); + + /* Store the birth temperature*/ + sp->birth.temperature = cooling_get_temperature(phys_const, hydro_props, us, + cosmo, cooling, p, xp); +} /** * @brief Prints the used parameters of the star formation law @@ -156,7 +220,6 @@ INLINE static void starformation_init_backend( */ INLINE static void starformation_print_backend( const struct star_formation* starform) { - message("Star formation law is 'GEAR'"); } @@ -164,12 +227,22 @@ INLINE static void starformation_print_backend( * @brief Finishes the density calculation. * * @param p The particle to act upon - * @param cd The global star_formation information. + * @param xp The extended particle data to act upon + * @param sf The global star_formation information. * @param cosmo The current cosmological model. */ __attribute__((always_inline)) INLINE static void star_formation_end_density( - struct part* restrict p, const struct star_formation* cd, - const struct cosmology* cosmo) {} + struct part* restrict p, const struct star_formation* sf, + const struct cosmology* cosmo) { + + // TODO move into pressure floor + /* To finish the turbulence estimation we devide by the density */ + p->sf_data.sigma2 /= + pow_dimension(p->h) * hydro_get_physical_density(p, cosmo); + + /* Add the cosmological factor */ + p->sf_data.sigma2 *= cosmo->a * cosmo->a; +} /** * @brief Sets all particle fields to sensible values when the #part has 0 ngbs. @@ -183,39 +256,46 @@ __attribute__((always_inline)) INLINE static void star_formation_part_has_no_neighbours(struct part* restrict p, struct xpart* restrict xp, const struct star_formation* cd, - const struct cosmology* cosmo) {} + const struct cosmology* cosmo) { + + // TODO move into pressure floor + /* If part has 0 neighbours, the estimation of turbulence is 0 */ + p->sf_data.sigma2 = 0.f; +} /** * @brief Sets the star_formation properties of the (x-)particles to a valid * start state. * - * Nothing to do here. - * + * @param p Pointer to the particle data. + * @param xp Pointer to extended particle data + * @param data The global star_formation information. + */ +__attribute__((always_inline)) INLINE static void star_formation_init_part( + struct part* restrict p, struct xpart* restrict xp, + const struct star_formation* data) { + p->sf_data.sigma2 = 0.f; +} + +/** + * @brief Sets the star_formation properties of the (x-)particles to a valid + * start state. * @param phys_const The physical constant in internal units. * @param us The unit system. * @param cosmo The current cosmological model. * @param data The global star_formation information used for this run. * @param p Pointer to the particle data. - * @param xp Pointer to the extended particle data. */ __attribute__((always_inline)) INLINE static void star_formation_first_init_part(const struct phys_const* restrict phys_const, const struct unit_system* restrict us, const struct cosmology* restrict cosmo, const struct star_formation* data, - const struct part* restrict p, - struct xpart* restrict xp) {} + struct part* restrict p, + struct xpart* restrict xp) { -/** - * @brief Sets the star_formation properties of the (x-)particles to a valid - * start state. - * - * Nothing to do here. - * - * @param p Pointer to the particle data. - * @param data The global star_formation information. - */ -__attribute__((always_inline)) INLINE static void star_formation_init_part( - struct part* restrict p, const struct star_formation* data) {} + /* Nothing special here */ + star_formation_init_part(p, xp, data); +} #endif /* SWIFT_GEAR_STAR_FORMATION_H */ diff --git a/src/star_formation/GEAR/star_formation_iact.h b/src/star_formation/GEAR/star_formation_iact.h index 749b608068650a27cbe4c9a0ca4126d2740337f3..7325b92af2840b317cf1a924a1e509b34bdffba3 100644 --- a/src/star_formation/GEAR/star_formation_iact.h +++ b/src/star_formation/GEAR/star_formation_iact.h @@ -1,6 +1,7 @@ /******************************************************************************* * This file is part of SWIFT. - * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * 2019 Fabien Jeanquartier (fabien.jeanquartier@epfl.ch) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -28,6 +29,8 @@ * @brief do star_formation computation after the runner_iact_density (symmetric * version) * + * Compute the velocity dispersion follow eq. 2 in Revaz & Jablonka 2018. + * * @param r2 Comoving square distance between the two particles. * @param dx Comoving vector separating both particles (pi - pj). * @param hi Comoving smoothing-length of particle i. @@ -39,7 +42,28 @@ */ __attribute__((always_inline)) INLINE static void runner_iact_star_formation( float r2, const float *dx, float hi, float hj, struct part *restrict pi, - struct part *restrict pj, float a, float H) {} + struct part *restrict pj, float a, float H) { + + float wi; + float wj; + /* Evaluation of the SPH kernel */ + kernel_eval(sqrt(r2) / hi, &wi); + kernel_eval(sqrt(r2) / hj, &wj); + + /* Delta v */ + float dv[3] = {pi->v[0] - pj->v[0], pi->v[1] - pj->v[1], pi->v[2] - pj->v[2]}; + + /* Norms at power 2 */ + const float norm_v2 = dv[0] * dv[0] + dv[1] * dv[1] + dv[2] * dv[2]; + const float norm_x2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]; + + /* Compute the velocity dispersion */ + const float sigma2 = norm_v2 + H * norm_x2; + + /* Compute the velocity dispersion */ + pi->sf_data.sigma2 += sigma2 * wi * hydro_get_mass(pj); + pj->sf_data.sigma2 += sigma2 * wj * hydro_get_mass(pi); +} /** * @brief do star_formation computation after the runner_iact_density (non @@ -58,6 +82,23 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_star_formation(float r2, const float *dx, float hi, float hj, struct part *restrict pi, const struct part *restrict pj, float a, - float H) {} + float H) { + float wi; + /* Evaluation of the SPH kernel */ + kernel_eval(sqrt(r2) / hi, &wi); + + /* Delta v */ + float dv[3] = {pi->v[0] - pj->v[0], pi->v[1] - pj->v[1], pi->v[2] - pj->v[2]}; + + /* Norms at power 2 */ + const float norm_v2 = dv[0] * dv[0] + dv[1] * dv[1] + dv[2] * dv[2]; + const float norm_x2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]; + + /* Compute the velocity dispersion */ + const float sigma2 = norm_v2 + H * norm_x2; + + /* Compute the velocity dispersion */ + pi->sf_data.sigma2 += sigma2 * wi * hydro_get_mass(pj); +} #endif /* SWIFT_GEAR_STAR_FORMATION_IACT_H */ diff --git a/src/star_formation/GEAR/star_formation_io.h b/src/star_formation/GEAR/star_formation_io.h index 6ef04c49c4abcd00175aaa164271628a9ff89360..3e021f7844c1deaeca40d7144d6f7b69cb6c2bdb 100644 --- a/src/star_formation/GEAR/star_formation_io.h +++ b/src/star_formation/GEAR/star_formation_io.h @@ -1,6 +1,7 @@ /******************************************************************************* * This file is part of SWIFT. - * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl) + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * 2019 Fabien Jeanquartier (fabien.jeanquartier@epfl.ch) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -37,8 +38,40 @@ __attribute__((always_inline)) INLINE static int star_formation_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list) { - + /* Nothing to write here */ return 0; } +/** + * @brief initialization of the star formation law + * + * @param parameter_file The parsed parameter file + * @param phys_const Physical constants in internal units + * @param us The current internal system of units + * @param starform the star formation law properties to initialize + * + */ +INLINE static void starformation_init_backend( + struct swift_params* parameter_file, const struct phys_const* phys_const, + const struct unit_system* us, const struct hydro_props* hydro_props, + struct star_formation* starform) { + + // TODO move into pressure floor + starform->n_jeans_2_3 = + parser_get_param_float(parameter_file, "GEARStarFormation:NJeans"); + starform->n_jeans_2_3 = pow(starform->n_jeans_2_3, 2. / 3.); + + /* Star formation efficiency */ + starform->star_formation_efficiency = parser_get_param_double( + parameter_file, "GEARStarFormation:star_formation_efficiency"); + + /* Maximum temperature for star formation */ + starform->maximal_temperature = parser_get_param_double( + parameter_file, "GEARStarFormation:maximal_temperature"); + + /* Apply unit change */ + starform->maximal_temperature *= + units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE); +} + #endif /* SWIFT_STAR_FORMATION_GEAR_IO_H */ diff --git a/src/star_formation/GEAR/star_formation_logger.h b/src/star_formation/GEAR/star_formation_logger.h index 5b9e033d21d3d202f3c289d1dd6a843ba17fa524..84475909c0f524a4f930a48dbcc3b0943719f8b0 100644 --- a/src/star_formation/GEAR/star_formation_logger.h +++ b/src/star_formation/GEAR/star_formation_logger.h @@ -1,6 +1,7 @@ /******************************************************************************* * This file is part of SWIFT. - * Copyright (c) 2019 Folkert Nobels (nobels@strw.leidenuniv.nl) + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * 2019 Fabien Jeanquartier (fabien.jeanquartier@epfl.ch) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -27,16 +28,28 @@ #include "hydro.h" #include "part.h" #include "star_formation_logger_struct.h" +#include "units.h" /** - * @brief Update the stellar mass in the current cell after creating - * the new star particle spart sp + * @brief Update the stellar quantities in the current cell after creating + * the new star particle spart sp. * + * @param time_step, the current time step of the simulation * @param sp new created star particle * @param sf the star_formation_history struct of the current cell */ INLINE static void star_formation_logger_log_new_spart( - struct spart *sp, struct star_formation_history *sf) {} + const struct spart *sp, struct star_formation_history *sf) { + + /* Add mass of created sparticle to the total stellar mass in this cell */ + sf->new_stellar_mass += sp->mass; + + /* Increase the number of stars */ + sf->number_new_stars += 1; + + /* No need to deal with the integrated quantities, only the engine's one is + * updated */ +} /** * @brief Initialize the star formation history struct in the case the cell is @@ -45,41 +58,42 @@ INLINE static void star_formation_logger_log_new_spart( * @param sf the star_formation_history struct we want to initialize */ INLINE static void star_formation_logger_log_inactive_cell( - struct star_formation_history *sf) {} + struct star_formation_history *sf) { -/** - * @brief add a star formation history struct to an other star formation history - * struct - * - * @param sf_add the star formation struct which we want to add to the star - * formation history - * @param sf_update the star formation structure which we want to update - */ -INLINE static void star_formation_logger_add( - struct star_formation_history *sf_update, - const struct star_formation_history *sf_add) {} + /* Initialize the stellar mass to zero */ + sf->new_stellar_mass = 0.f; + /* initialize number of stars to zero*/ + sf->number_new_stars = 0; +} /** * @brief Initialize the star formation history structure in the #engine * * @param sfh The pointer to the star formation history structure */ INLINE static void star_formation_logger_init( - struct star_formation_history *sfh) {} + struct star_formation_history *sfh) { + /* Initialize the collecting SFH structure to zero */ + sfh->new_stellar_mass = 0.f; + sfh->number_new_stars = 0; +} /** - * @brief Write the final SFH to a file + * @brief add a star formation history struct to an other star formation history + * struct * - * @param fp The file to write to. - * @param time the simulation time (time since Big Bang) in internal units. - * @param a the scale factor. - * @param z the redshift. - * @param sf the #star_formation_history struct. - * @param step The time-step of the simulation. + * @param sf_add the star formation struct which we want to add to the star + * formation history + * @param sf_update the star formation structure which we want to update */ -INLINE static void star_formation_logger_write_to_log_file( - FILE *fp, const double time, const double a, const double z, - const struct star_formation_history sf, const int step) {} +INLINE static void star_formation_logger_add( + struct star_formation_history *sf_update, + const struct star_formation_history *sf_add) { + + /* Update the SFH structure */ + sf_update->number_new_stars += sf_add->number_new_stars; + sf_update->new_stellar_mass += sf_add->new_stellar_mass; +} /** * @brief Initialize the SFH logger file @@ -90,13 +104,47 @@ INLINE static void star_formation_logger_write_to_log_file( */ INLINE static void star_formation_logger_init_log_file( FILE *fp, const struct unit_system *restrict us, - const struct phys_const *phys_const) {} + const struct phys_const *phys_const) { + + /* Write some general text to the logger file */ + fprintf(fp, "# Star Formation History Logger file\n"); + fprintf(fp, "######################################################\n"); + fprintf(fp, "# The quantities are all given in internal physical units!\n"); + fprintf(fp, "#\n"); + fprintf(fp, "# (0) Simulation step\n"); + fprintf(fp, + "# (1) Time since Big Bang (cosmological run), Time since start of " + "the simulation (non-cosmological run).\n"); + fprintf(fp, "# Unit = %e seconds\n", us->UnitTime_in_cgs); + fprintf(fp, "# Unit = %e yr or %e Myr\n", 1.f / phys_const->const_year, + 1.f / phys_const->const_year / 1e6); + fprintf(fp, "# (2) Scale factor (no unit)\n"); + fprintf(fp, "# (3) Redshift (no unit)\n"); + fprintf(fp, + "# (4) Total number of stars formed in the simulation (no unit)\n"); + fprintf(fp, "# (5) Total stellar mass formed in the simulation.\n"); + fprintf(fp, "# Unit = %e gram\n", us->UnitMass_in_cgs); + fprintf(fp, "# Unit = %e solar mass\n", + 1.f / phys_const->const_solar_mass); + fprintf(fp, + "# (6) Number of stars formed in the current time step (no unit).\n"); + fprintf(fp, "# (7) Mass of stars formed in the current time step.\n"); + fprintf(fp, "# Unit = %e gram\n", us->UnitMass_in_cgs); + fprintf(fp, "# Unit = %e solar mass\n", + 1.f / phys_const->const_solar_mass); + fprintf(fp, + "# (0) (1) (2) (3) (4) " + " (5) (6) (7)\n"); +} /** * @brief Add the SFR tracer to the total active SFR of this cell * + * Nothing to do here + * * @param p the #part * @param xp the #xpart + * * @param sf the SFH logger struct * @param dt_star The length of the time-step in physical internal units. */ @@ -108,6 +156,8 @@ INLINE static void star_formation_logger_log_active_part( * @brief Add the SFR tracer to the total inactive SFR of this cell as long as * the SFR tracer is larger than 0 * + * Nothing to do here + * * @param p the #part * @param xp the #xpart * @param sf the SFH logger struct @@ -116,4 +166,57 @@ INLINE static void star_formation_logger_log_inactive_part( const struct part *p, const struct xpart *xp, struct star_formation_history *sf) {} +/** + * @brief add a star formation history struct to an other star formation history + * struct + * + * @param sf_add the star formation struct which we want to add to the star + * formation history + * @param sf_update the star formation structure which we want to update + */ +INLINE static void star_formation_logger_add_to_accumulator( + struct star_formation_history_accumulator *sf_update, + const struct star_formation_history *sf_add) { + + /* Update the SFH structure */ + sf_update->number_new_stars = sf_add->number_new_stars; + sf_update->new_stellar_mass = sf_add->new_stellar_mass; + sf_update->total_number_stars += sf_add->number_new_stars; + sf_update->total_stellar_mass += sf_add->new_stellar_mass; +} + +/** + * @brief Write the final SFH to a file + * + * @param fp The file to write to. + * @param time the simulation time (time since Big Bang) in internal units. + * @param a the scale factor. + * @param z the redshift. + * @param sf the #star_formation_history struct. + * @param step The time-step of the simulation. + */ +INLINE static void star_formation_logger_write_to_log_file( + FILE *fp, const double time, const double a, const double z, + struct star_formation_history_accumulator sf, const int step) { + + fprintf(fp, "%6d %16e %12.7f %14e %14ld %14e %14ld %14e\n", step, time, a, z, + sf.total_number_stars, sf.total_stellar_mass, sf.number_new_stars, + sf.new_stellar_mass); +} + +/** + * @brief Initialize the star formation history struct in the #engine when + * starting the simulation. + * + * @param sfh the star_formation_history struct we want to initialize + */ +INLINE static void star_formation_logger_accumulator_init( + struct star_formation_history_accumulator *sfh) { + /* Initialize all values to zero */ + sfh->new_stellar_mass = 0.f; + sfh->number_new_stars = 0; + sfh->total_number_stars = 0; + sfh->total_stellar_mass = 0.f; +} + #endif /* SWIFT_GEAR_STARFORMATION_LOGGER_H */ diff --git a/src/star_formation/GEAR/star_formation_logger_struct.h b/src/star_formation/GEAR/star_formation_logger_struct.h index 04b5dfdc038f7b684cfb1f2079d13eb312624b3f..63e3af06a7cd5af375662a1ba28dc5000a69dc3f 100644 --- a/src/star_formation/GEAR/star_formation_logger_struct.h +++ b/src/star_formation/GEAR/star_formation_logger_struct.h @@ -1,6 +1,7 @@ /******************************************************************************* * This file is part of SWIFT. - * Copyright (c) 2019 Folkert Nobels (nobels@strw.leidenuniv.nl) + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * 2019 Fabien Jeanquartier (fabien.jeanquartier@epfl.ch) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -19,7 +20,33 @@ #ifndef SWIFT_GEAR_STAR_FORMATION_LOGGER_STRUCT_H #define SWIFT_GEAR_STAR_FORMATION_LOGGER_STRUCT_H -/* Starformation history struct */ -struct star_formation_history {}; +/** + * Structure containing the star formation information from the cells. + */ +struct star_formation_history { + /*! Stellar mass created in the current timestep */ + float new_stellar_mass; -#endif /* SWIFT_NONE_STAR_FORMATION_STRUCT_H */ + /*! Number of stars created in this timestep */ + long int number_new_stars; +}; + +/** + * Structure containing the global star formation information (including time + * integrated variables). + */ +struct star_formation_history_accumulator { + /*! Total stellar mass from the begining of the simulation */ + float total_stellar_mass; + + /*! Total number of stars */ + long int total_number_stars; + + /*! Stellar mass created in the current timestep */ + float new_stellar_mass; + + /*! Number of stars created in this timestep */ + long int number_new_stars; +}; + +#endif /* SWIFT_GEAR_STAR_FORMATION_LOGGER_STRUCT_H */ diff --git a/src/star_formation/GEAR/star_formation_struct.h b/src/star_formation/GEAR/star_formation_struct.h index 9b4e216fd2955f29d89dade6ee46c0e1af715cdb..2e9a7548f83ca6ae9bb78ee7bcf4be69a0a31489 100644 --- a/src/star_formation/GEAR/star_formation_struct.h +++ b/src/star_formation/GEAR/star_formation_struct.h @@ -25,7 +25,27 @@ */ struct star_formation_xpart_data {}; -/* Starformation struct */ -struct star_formation {}; +struct star_formation_part_data { + // TODO move it to the pressure floor + /*! Estimation of local turbulence (squared) */ + float sigma2; +}; + +/** + * @brief Global star formation properties + */ +struct star_formation { + + // TODO move it to pressure floor + /*! Number of particle required to resolved the Jeans criterion (at power 2/3) + */ + float n_jeans_2_3; + + /*! Maximal temperature for forming a star */ + float maximal_temperature; + + /*! Star formation efficiency */ + float star_formation_efficiency; +}; #endif /* SWIFT_GEAR_STAR_FORMATION_STRUCT_H */ diff --git a/src/star_formation/none/star_formation.h b/src/star_formation/none/star_formation.h index dfe645718d689841f89cf592194d435af299a642..3275e1a4c43ce232d73dbb2f331c1c6e81118ec1 100644 --- a/src/star_formation/none/star_formation.h +++ b/src/star_formation/none/star_formation.h @@ -219,6 +219,7 @@ star_formation_first_init_part(const struct phys_const* restrict phys_const, * @param data The global star_formation information. */ __attribute__((always_inline)) INLINE static void star_formation_init_part( - struct part* restrict p, const struct star_formation* data) {} + struct part* restrict p, struct xpart* restrict xp, + const struct star_formation* data) {} #endif /* SWIFT_NONE_STAR_FORMATION_H */ diff --git a/src/star_formation/none/star_formation_logger.h b/src/star_formation/none/star_formation_logger.h index b4e6987c03d295348fc8c22d66cb20d10e54378c..552df0c6cae533d1eb2678cb23dd675e0a058715 100644 --- a/src/star_formation/none/star_formation_logger.h +++ b/src/star_formation/none/star_formation_logger.h @@ -59,6 +59,18 @@ INLINE static void star_formation_logger_add( struct star_formation_history *sf_update, const struct star_formation_history *sf_add) {} +/** + * @brief add a star formation history accumulator struct to an other star + * formation history struct + * + * @param sf_add the star formation accumulator struct which we want to add to + * the star formation history + * @param sf_update the star formation structure which we want to update + */ +INLINE static void star_formation_logger_add_to_accumulator( + struct star_formation_history_accumulator *sf_update, + const struct star_formation_history *sf_add) {} + /** * @brief Initialize the star formation history structure in the #engine * @@ -67,6 +79,14 @@ INLINE static void star_formation_logger_add( INLINE static void star_formation_logger_init( struct star_formation_history *sfh) {} +/** + * @brief Initialize the star formation history structure in the #engine + * + * @param sfh The pointer to the star formation history structure + */ +INLINE static void star_formation_logger_accumulator_init( + struct star_formation_history_accumulator *sfh) {} + /** * @brief Write the final SFH to a file * @@ -74,12 +94,12 @@ INLINE static void star_formation_logger_init( * @param time the simulation time (time since Big Bang) in internal units. * @param a the scale factor. * @param z the redshift. - * @param sf the #star_formation_history struct. + * @param sf the #star_formation_history_accumulator struct. * @param step The time-step of the simulation. */ INLINE static void star_formation_logger_write_to_log_file( FILE *fp, const double time, const double a, const double z, - const struct star_formation_history sf, const int step) {} + const struct star_formation_history_accumulator sf, const int step) {} /** * @brief Initialize the SFH logger file diff --git a/src/star_formation/none/star_formation_logger_struct.h b/src/star_formation/none/star_formation_logger_struct.h index 9efda271da96faf2088169fd75d0e3c01247a429..b60c64f2eb47894db828a1bde0ca20803892c7db 100644 --- a/src/star_formation/none/star_formation_logger_struct.h +++ b/src/star_formation/none/star_formation_logger_struct.h @@ -22,4 +22,10 @@ /* Starformation history struct */ struct star_formation_history {}; +/* Starformation history accumulator struct. + This structure is only defined in the engine and + allows the user to integrate some quantities over time. +*/ +struct star_formation_history_accumulator {}; + #endif /* SWIFT_NONE_STAR_FORMATION_STRUCT_H */ diff --git a/src/star_formation/none/star_formation_struct.h b/src/star_formation/none/star_formation_struct.h index 27a2adaf83d0a02a0d08e7eef8b45bea630689e4..2f5241a58caf1ca70fa98a40d467c8ff5a3237f7 100644 --- a/src/star_formation/none/star_formation_struct.h +++ b/src/star_formation/none/star_formation_struct.h @@ -25,4 +25,10 @@ */ struct star_formation_xpart_data {}; +/** + * @brief Star-formation-related properties stored in the particle + * data. + */ +struct star_formation_part_data {}; + #endif /* SWIFT_NONE_STAR_FORMATION_STRUCT_H */ diff --git a/src/stars.h b/src/stars.h index dd8390e0206580fc2a07a08e51bb69c6ee5ab5ed..dea6e07a87cabd7d1778ec2a850be4f3b16b04b0 100644 --- a/src/stars.h +++ b/src/stars.h @@ -29,6 +29,9 @@ #elif defined(STARS_EAGLE) #include "./stars/EAGLE/stars.h" #include "./stars/EAGLE/stars_iact.h" +#elif defined(STARS_GEAR) +#include "./stars/GEAR/stars.h" +#include "./stars/GEAR/stars_iact.h" #else #error "Invalid choice of star model" #endif diff --git a/src/stars/Default/stars_io.h b/src/stars/Default/stars_io.h index a8ec1cfa55728f9ca8a348d8fd6ec07d06b72185..2a824aaa9954f48de8653b5a0b6c6a3b839aa2c9 100644 --- a/src/stars/Default/stars_io.h +++ b/src/stars/Default/stars_io.h @@ -22,20 +22,6 @@ #include "io_properties.h" #include "stars_part.h" -INLINE static void convert_spart_pos(const struct engine *e, - const struct spart *sp, double *ret) { - - if (e->s->periodic) { - ret[0] = box_wrap(sp->x[0], 0.0, e->s->dim[0]); - ret[1] = box_wrap(sp->x[1], 0.0, e->s->dim[1]); - ret[2] = box_wrap(sp->x[2], 0.0, e->s->dim[2]); - } else { - ret[0] = sp->x[0]; - ret[1] = sp->x[1]; - ret[2] = sp->x[2]; - } -} - /** * @brief Specifies which s-particle fields to read from a dataset * @@ -63,6 +49,53 @@ INLINE static void stars_read_particles(struct spart *sparts, UNIT_CONV_LENGTH, sparts, h); } +INLINE static void convert_spart_pos(const struct engine *e, + const struct spart *sp, double *ret) { + + if (e->s->periodic) { + ret[0] = box_wrap(sp->x[0], 0.0, e->s->dim[0]); + ret[1] = box_wrap(sp->x[1], 0.0, e->s->dim[1]); + ret[2] = box_wrap(sp->x[2], 0.0, e->s->dim[2]); + } else { + ret[0] = sp->x[0]; + ret[1] = sp->x[1]; + ret[2] = sp->x[2]; + } +} + +INLINE static void convert_spart_vel(const struct engine *e, + const struct spart *sp, float *ret) { + + const int with_cosmology = (e->policy & engine_policy_cosmology); + const struct cosmology *cosmo = e->cosmology; + const integertime_t ti_current = e->ti_current; + const double time_base = e->time_base; + + const integertime_t ti_beg = get_integer_time_begin(ti_current, sp->time_bin); + const integertime_t ti_end = get_integer_time_end(ti_current, sp->time_bin); + + /* Get time-step since the last kick */ + float dt_kick_grav; + if (with_cosmology) { + dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current); + dt_kick_grav -= + cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2); + } else { + dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base; + } + + /* Extrapolate the velocites to the current time */ + const struct gpart *gp = sp->gpart; + ret[0] = gp->v_full[0] + gp->a_grav[0] * dt_kick_grav; + ret[1] = gp->v_full[1] + gp->a_grav[1] * dt_kick_grav; + ret[2] = gp->v_full[2] + gp->a_grav[2] * dt_kick_grav; + + /* Conversion from internal units to peculiar velocities */ + ret[0] *= cosmo->a_inv; + ret[1] *= cosmo->a_inv; + ret[2] *= cosmo->a_inv; +} + /** * @brief Specifies which s-particle fields to write to a dataset * @@ -71,23 +104,31 @@ INLINE static void stars_read_particles(struct spart *sparts, * @param num_fields The number of i/o fields to write. */ INLINE static void stars_write_particles(const struct spart *sparts, - struct io_props *list, - int *num_fields) { + struct io_props *list, int *num_fields, + int with_cosmology) { /* Say how much we want to write */ *num_fields = 5; /* List what we want to write */ list[0] = io_make_output_field_convert_spart( - "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, sparts, convert_spart_pos); - list[1] = - io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, sparts, v); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, sparts, mass); + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, sparts, + convert_spart_pos, "Co-moving position of the particles"); + + list[1] = io_make_output_field_convert_spart( + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, sparts, convert_spart_vel, + "Peculiar velocities of the particles. This is a * dx/dt where x is the " + "co-moving position of the particles."); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + sparts, mass, "Masses of the particles"); + list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, - sparts, id); - list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - sparts, h); + 0.f, sparts, id, "Unique ID of the particles"); + + list[4] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, sparts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); #ifdef DEBUG_INTERACTIONS_STARS diff --git a/src/stars/EAGLE/stars_io.h b/src/stars/EAGLE/stars_io.h index cfacd52106398f435e56a9a2a67d1016726e2295..8e3f6056921936fdcf86337af165e130cc6e02eb 100644 --- a/src/stars/EAGLE/stars_io.h +++ b/src/stars/EAGLE/stars_io.h @@ -23,20 +23,6 @@ #include "io_properties.h" #include "stars_part.h" -INLINE static void convert_spart_pos(const struct engine *e, - const struct spart *sp, double *ret) { - - if (e->s->periodic) { - ret[0] = box_wrap(sp->x[0], 0.0, e->s->dim[0]); - ret[1] = box_wrap(sp->x[1], 0.0, e->s->dim[1]); - ret[2] = box_wrap(sp->x[2], 0.0, e->s->dim[2]); - } else { - ret[0] = sp->x[0]; - ret[1] = sp->x[1]; - ret[2] = sp->x[2]; - } -} - /** * @brief Specifies which s-particle fields to read from a dataset * @@ -66,6 +52,53 @@ INLINE static void stars_read_particles(struct spart *sparts, sparts, mass_init); } +INLINE static void convert_spart_pos(const struct engine *e, + const struct spart *sp, double *ret) { + + if (e->s->periodic) { + ret[0] = box_wrap(sp->x[0], 0.0, e->s->dim[0]); + ret[1] = box_wrap(sp->x[1], 0.0, e->s->dim[1]); + ret[2] = box_wrap(sp->x[2], 0.0, e->s->dim[2]); + } else { + ret[0] = sp->x[0]; + ret[1] = sp->x[1]; + ret[2] = sp->x[2]; + } +} + +INLINE static void convert_spart_vel(const struct engine *e, + const struct spart *sp, float *ret) { + + const int with_cosmology = (e->policy & engine_policy_cosmology); + const struct cosmology *cosmo = e->cosmology; + const integertime_t ti_current = e->ti_current; + const double time_base = e->time_base; + + const integertime_t ti_beg = get_integer_time_begin(ti_current, sp->time_bin); + const integertime_t ti_end = get_integer_time_end(ti_current, sp->time_bin); + + /* Get time-step since the last kick */ + float dt_kick_grav; + if (with_cosmology) { + dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current); + dt_kick_grav -= + cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2); + } else { + dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base; + } + + /* Extrapolate the velocites to the current time */ + const struct gpart *gp = sp->gpart; + ret[0] = gp->v_full[0] + gp->a_grav[0] * dt_kick_grav; + ret[1] = gp->v_full[1] + gp->a_grav[1] * dt_kick_grav; + ret[2] = gp->v_full[2] + gp->a_grav[2] * dt_kick_grav; + + /* Conversion from internal units to peculiar velocities */ + ret[0] *= cosmo->a_inv; + ret[1] *= cosmo->a_inv; + ret[2] *= cosmo->a_inv; +} + /** * @brief Specifies which s-particle fields to write to a dataset * @@ -74,34 +107,65 @@ INLINE static void stars_read_particles(struct spart *sparts, * @param num_fields The number of i/o fields to write. */ INLINE static void stars_write_particles(const struct spart *sparts, - struct io_props *list, - int *num_fields) { + struct io_props *list, int *num_fields, + const int with_cosmology) { /* Say how much we want to write */ *num_fields = 10; /* List what we want to write */ list[0] = io_make_output_field_convert_spart( - "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, sparts, convert_spart_pos); - list[1] = - io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, sparts, v); - list[2] = - io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, sparts, mass); + "Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1.f, sparts, + convert_spart_pos, "Co-moving position of the particles"); + + list[1] = io_make_output_field_convert_spart( + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, sparts, convert_spart_vel, + "Peculiar velocities of the particles. This is a * dx/dt where x is the " + "co-moving position of the particles."); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + sparts, mass, + "Masses of the particles at the current point " + "in time (i.e. after stellar losses"); + list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, - sparts, id); - list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, - sparts, h); - list[5] = io_make_output_field("BirthDensity", FLOAT, 1, UNIT_CONV_DENSITY, - sparts, birth_density); - list[6] = io_make_output_field("InitialMasses", FLOAT, 1, UNIT_CONV_MASS, - sparts, mass_init); - list[7] = io_make_output_field("BirthTime", FLOAT, 1, UNIT_CONV_TIME, sparts, - birth_time); - list[8] = io_make_output_field("FeedbackEnergyFraction", FLOAT, 1, - UNIT_CONV_NO_UNITS, sparts, f_E); + 0.f, sparts, id, "Unique ID of the particles"); + + list[4] = io_make_output_field( + "SmoothingLengths", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, sparts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + list[5] = io_make_output_field( + "BirthDensities", FLOAT, 1, UNIT_CONV_DENSITY, 0.f, sparts, birth_density, + "Physical densities at the time of birth of the gas particles that " + "turned into stars (note that " + "we store the physical density at the birth redshift, no conversion is " + "needed)"); + + list[6] = io_make_output_field("InitialMasses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + sparts, mass_init, + "Masses of the star particles at birth time"); + + if (with_cosmology) { + list[7] = io_make_output_field( + "BirthScaleFactors", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + birth_scale_factor, "Scale-factors at which the stars were born"); + } else { + list[7] = io_make_output_field("BirthTimes", FLOAT, 1, UNIT_CONV_TIME, 0.f, + sparts, birth_time, + "Times at which the stars were born"); + } + + list[8] = io_make_output_field( + "FeedbackEnergyFractions", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, f_E, + "Fractions of the canonical feedback energy that was used for the stars' " + "SNII feedback events"); + list[9] = - io_make_output_field("BirthTemperature", FLOAT, 1, UNIT_CONV_TEMPERATURE, - sparts, birth_temperature); + io_make_output_field("BirthTemperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, + 0.f, sparts, birth_temperature, + "Temperatures at the time of birth of the gas " + "particles that turned into stars"); } /** diff --git a/src/stars/GEAR/stars.h b/src/stars/GEAR/stars.h new file mode 100644 index 0000000000000000000000000000000000000000..467aaa164ba9762d85f8dfae85db86ff76ae779e --- /dev/null +++ b/src/stars/GEAR/stars.h @@ -0,0 +1,172 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_GEAR_STARS_H +#define SWIFT_GEAR_STARS_H + +#include <float.h> +#include "minmax.h" + +/** + * @brief Computes the gravity time-step of a given star particle. + * + * @param sp Pointer to the s-particle data. + */ +__attribute__((always_inline)) INLINE static float stars_compute_timestep( + const struct spart* const sp) { + + return FLT_MAX; +} + +/** + * @brief Initialises the s-particles for the first time + * + * This function is called only once just after the ICs have been + * read in to do some conversions. + * + * @param sp The particle to act upon + * @param stars_properties The properties of the stellar model. + */ +__attribute__((always_inline)) INLINE static void stars_first_init_spart( + struct spart* sp, const struct stars_props* stars_properties) { + + sp->time_bin = 0; +} + +/** + * @brief Prepares a s-particle for its interactions + * + * @param sp The particle to act upon + */ +__attribute__((always_inline)) INLINE static void stars_init_spart( + struct spart* sp) { + +#ifdef DEBUG_INTERACTIONS_STARS + for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i) + sp->ids_ngbs_density[i] = -1; + sp->num_ngb_density = 0; +#endif + + sp->density.wcount = 0.f; + sp->density.wcount_dh = 0.f; +} + +/** + * @brief Predict additional particle fields forward in time when drifting + * + * @param sp The particle + * @param dt_drift The drift time-step for positions. + */ +__attribute__((always_inline)) INLINE static void stars_predict_extra( + struct spart* restrict sp, float dt_drift) {} + +/** + * @brief Sets the values to be predicted in the drifts to their values at a + * kick time + * + * @param sp The particle. + */ +__attribute__((always_inline)) INLINE static void stars_reset_predicted_values( + struct spart* restrict sp) {} + +/** + * @brief Finishes the calculation of (non-gravity) forces acting on stars + * + * @param sp The particle to act upon + */ +__attribute__((always_inline)) INLINE static void stars_end_feedback( + struct spart* sp) {} + +/** + * @brief Kick the additional variables + * + * @param sp The particle to act upon + * @param dt The time-step for this kick + */ +__attribute__((always_inline)) INLINE static void stars_kick_extra( + struct spart* sp, float dt) {} + +/** + * @brief Finishes the calculation of density on stars + * + * @param sp The particle to act upon + * @param cosmo The current cosmological model. + */ +__attribute__((always_inline)) INLINE static void stars_end_density( + struct spart* sp, const struct cosmology* cosmo) { + + /* Some smoothing length multiples. */ + const float h = sp->h; + const float h_inv = 1.0f / h; /* 1/h */ + const float h_inv_dim = pow_dimension(h_inv); /* 1/h^d */ + const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */ + + /* Finish the calculation by inserting the missing h-factors */ + sp->density.wcount *= h_inv_dim; + sp->density.wcount_dh *= h_inv_dim_plus_one; +} + +/** + * @brief Sets all particle fields to sensible values when the #spart has 0 + * ngbs. + * + * @param sp The particle to act upon + * @param cosmo The current cosmological model. + */ +__attribute__((always_inline)) INLINE static void stars_spart_has_no_neighbours( + struct spart* restrict sp, const struct cosmology* cosmo) { + + /* Some smoothing length multiples. */ + const float h = sp->h; + const float h_inv = 1.0f / h; /* 1/h */ + const float h_inv_dim = pow_dimension(h_inv); /* 1/h^d */ + + /* Re-set problematic values */ + sp->density.wcount = kernel_root * h_inv_dim; + sp->density.wcount_dh = 0.f; +} + +/** + * @brief Reset acceleration fields of a particle + * + * This is the equivalent of hydro_reset_acceleration. + * We do not compute the acceleration on star, therefore no need to use it. + * + * @param p The particle to act upon + */ +__attribute__((always_inline)) INLINE static void stars_reset_feedback( + struct spart* restrict p) { + +#ifdef DEBUG_INTERACTIONS_STARS + for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i) + p->ids_ngbs_force[i] = -1; + p->num_ngb_force = 0; +#endif +} + +/** + * @brief Initializes constants related to stellar evolution, initializes imf, + * reads and processes yield tables + * + * @param params swift_params parameters structure + * @param stars stars_props data structure + */ +inline static void stars_evolve_init(struct swift_params* params, + struct stars_props* restrict stars) {} + +#endif /* SWIFT_GEAR_STARS_H */ diff --git a/src/stars/GEAR/stars_debug.h b/src/stars/GEAR/stars_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..41953367f6c8771ffd7460cee493c8330ecd874b --- /dev/null +++ b/src/stars/GEAR/stars_debug.h @@ -0,0 +1,31 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_GEAR_STARS_DEBUG_H +#define SWIFT_GEAR_STARS_DEBUG_H + +__attribute__((always_inline)) INLINE static void stars_debug_particle( + const struct spart* p) { + printf( + "x=[%.3e,%.3e,%.3e], " + "v_full=[%.3e,%.3e,%.3e] p->mass=%.3e \n t_begin=%d, t_end=%d\n", + p->x[0], p->x[1], p->x[2], p->v_full[0], p->v_full[1], p->v_full[2], + p->mass, p->ti_begin, p->ti_end); +} + +#endif /* SWIFT_GEAR_STARS_DEBUG_H */ diff --git a/src/stars/GEAR/stars_iact.h b/src/stars/GEAR/stars_iact.h new file mode 100644 index 0000000000000000000000000000000000000000..c7bda43fc0c66fcc890e6b05bacf871edfd10b5a --- /dev/null +++ b/src/stars/GEAR/stars_iact.h @@ -0,0 +1,94 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_GEAR_STARS_IACT_H +#define SWIFT_GEAR_STARS_IACT_H + +/** + * @brief Density interaction between two particles (non-symmetric). + * + * @param r2 Comoving square distance between the two particles. + * @param dx Comoving vector separating both particles (pi - pj). + * @param hi Comoving smoothing-length of particle i. + * @param hj Comoving smoothing-length of particle j. + * @param si First sparticle. + * @param pj Second particle (not updated). + * @param a Current scale factor. + * @param H Current Hubble parameter. + */ +__attribute__((always_inline)) INLINE static void +runner_iact_nonsym_stars_density(const float r2, const float *dx, + const float hi, const float hj, + struct spart *restrict si, + const struct part *restrict pj, const float a, + const float H) { + + float wi, wi_dx; + + /* Get r and 1/r. */ + const float r_inv = 1.0f / sqrtf(r2); + const float r = r2 * r_inv; + + /* Compute the kernel function */ + const float hi_inv = 1.0f / hi; + const float ui = r * hi_inv; + kernel_deval(ui, &wi, &wi_dx); + + /* Compute contribution to the number of neighbours */ + si->density.wcount += wi; + si->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx); + +#ifdef DEBUG_INTERACTIONS_STARS + /* Update ngb counters */ + if (si->num_ngb_density < MAX_NUM_OF_NEIGHBOURS_STARS) + si->ids_ngbs_density[si->num_ngb_density] = pj->id; + + /* Update ngb counters */ + ++si->num_ngb_density; +#endif +} + +/** + * @brief Feedback interaction between two particles (non-symmetric). + * + * @param r2 Comoving square distance between the two particles. + * @param dx Comoving vector separating both particles (pi - pj). + * @param hi Comoving smoothing-length of particle i. + * @param hj Comoving smoothing-length of particle j. + * @param si First sparticle. + * @param pj Second particle (not updated). + * @param a Current scale factor. + * @param H Current Hubble parameter. + */ +__attribute__((always_inline)) INLINE static void +runner_iact_nonsym_stars_feedback(const float r2, const float *dx, + const float hi, const float hj, + struct spart *restrict si, + struct part *restrict pj, const float a, + const float H) { +#ifdef DEBUG_INTERACTIONS_STARS + /* Update ngb counters */ + if (si->num_ngb_force < MAX_NUM_OF_NEIGHBOURS_STARS) + si->ids_ngbs_force[si->num_ngb_force] = pj->id; + + /* Update ngb counters */ + ++si->num_ngb_force; +#endif +} + +#endif diff --git a/src/stars/GEAR/stars_io.h b/src/stars/GEAR/stars_io.h new file mode 100644 index 0000000000000000000000000000000000000000..ebd72aa50a4194bf8c6f747e55d265ace0550c35 --- /dev/null +++ b/src/stars/GEAR/stars_io.h @@ -0,0 +1,248 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_GEAR_STARS_IO_H +#define SWIFT_GEAR_STARS_IO_H + +#include "io_properties.h" +#include "stars_part.h" + +/** + * @brief Specifies which s-particle fields to read from a dataset + * + * @param sparts The s-particle array. + * @param list The list of i/o properties to read. + * @param num_fields The number of i/o fields to read. + */ +INLINE static void stars_read_particles(struct spart *sparts, + struct io_props *list, + int *num_fields) { + + /* Say how much we want to read */ + *num_fields = 5; + + /* List what we want to read */ + list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY, + UNIT_CONV_LENGTH, sparts, x); + list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY, + UNIT_CONV_SPEED, sparts, v); + list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS, + sparts, mass); + list[3] = io_make_input_field("ParticleIDs", LONGLONG, 1, COMPULSORY, + UNIT_CONV_NO_UNITS, sparts, id); + list[4] = io_make_input_field("SmoothingLength", FLOAT, 1, OPTIONAL, + UNIT_CONV_LENGTH, sparts, h); +} + +/** + * @brief Specifies which s-particle fields to write to a dataset + * + * @param sparts The s-particle array. + * @param list The list of i/o properties to write. + * @param num_fields The number of i/o fields to write. + * @param with_cosmology Is it a cosmological run? + */ +INLINE static void stars_write_particles(const struct spart *sparts, + struct io_props *list, int *num_fields, + const int with_cosmology) { + + /* Say how much we want to write */ + *num_fields = 9; + + /* List what we want to write */ + list[0] = + io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, 1., + sparts, x, "Co-moving positions of the particles"); + + list[1] = io_make_output_field( + "Velocities", FLOAT, 3, UNIT_CONV_SPEED, 0.f, sparts, v, + "Peculiar velocities of the stars. This is (a * dx/dt) where x is the " + "co-moving positions of the particles"); + + list[2] = io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + sparts, mass, "Masses of the particles"); + + list[3] = + io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, + sparts, id, "Unique IDs of the particles"); + + list[4] = io_make_output_field( + "SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH, 1.f, sparts, h, + "Co-moving smoothing lengths (FWHM of the kernel) of the particles"); + + if (with_cosmology) { + list[5] = io_make_output_field( + "BirthScaleFactors", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + birth_scale_factor, "Scale-factors at which the stars were born"); + } else { + list[5] = io_make_output_field("BirthTimes", FLOAT, 1, UNIT_CONV_TIME, 0.f, + sparts, birth_time, + "Times at which the stars were born"); + } + + list[6] = io_make_output_field( + "BirthDensities", FLOAT, 1, UNIT_CONV_DENSITY, 0.f, sparts, birth.density, + "Physical densities at the time of birth of the gas particles that " + "turned into stars (note that " + "we store the physical density at the birth redshift, no conversion is " + "needed)"); + + list[7] = + io_make_output_field("BirthTemperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, + 0.f, sparts, birth.temperature, + "Temperatures at the time of birth of the gas " + "particles that turned into stars"); + + list[7] = io_make_output_field("BirthMasses", FLOAT, 1, UNIT_CONV_MASS, 0.f, + sparts, birth.mass, + "Masses of the star particles at birth time"); + +#ifdef DEBUG_INTERACTIONS_STARS + + list += *num_fields; + *num_fields += 4; + + list[0] = io_make_output_field("Num_ngb_density", INT, 1, UNIT_CONV_NO_UNITS, + sparts, num_ngb_density); + list[1] = io_make_output_field("Num_ngb_force", INT, 1, UNIT_CONV_NO_UNITS, + sparts, num_ngb_force); + list[2] = io_make_output_field("Ids_ngb_density", LONGLONG, + MAX_NUM_OF_NEIGHBOURS_STARS, + UNIT_CONV_NO_UNITS, sparts, ids_ngbs_density); + list[3] = io_make_output_field("Ids_ngb_force", LONGLONG, + MAX_NUM_OF_NEIGHBOURS_STARS, + UNIT_CONV_NO_UNITS, sparts, ids_ngbs_force); +#endif +} + +/** + * @brief Initialize the global properties of the stars scheme. + * + * By default, takes the values provided by the hydro. + * + * @param sp The #stars_props. + * @param phys_const The physical constants in the internal unit system. + * @param us The internal unit system. + * @param params The parsed parameters. + * @param p The already read-in properties of the hydro scheme. + * @param cosmo The cosmological model. + */ +INLINE static void stars_props_init(struct stars_props *sp, + const struct phys_const *phys_const, + const struct unit_system *us, + struct swift_params *params, + const struct hydro_props *p, + const struct cosmology *cosmo) { + + /* Kernel properties */ + sp->eta_neighbours = parser_get_opt_param_float( + params, "Stars:resolution_eta", p->eta_neighbours); + + /* Tolerance for the smoothing length Newton-Raphson scheme */ + sp->h_tolerance = + parser_get_opt_param_float(params, "Stars:h_tolerance", p->h_tolerance); + + /* Get derived properties */ + sp->target_neighbours = pow_dimension(sp->eta_neighbours) * kernel_norm; + const float delta_eta = sp->eta_neighbours * (1.f + sp->h_tolerance); + sp->delta_neighbours = + (pow_dimension(delta_eta) - pow_dimension(sp->eta_neighbours)) * + kernel_norm; + + /* Number of iterations to converge h */ + sp->max_smoothing_iterations = parser_get_opt_param_int( + params, "Stars:max_ghost_iterations", p->max_smoothing_iterations); + + /* Time integration properties */ + const float max_volume_change = + parser_get_opt_param_float(params, "Stars:max_volume_change", -1); + if (max_volume_change == -1) + sp->log_max_h_change = p->log_max_h_change; + else + sp->log_max_h_change = logf(powf(max_volume_change, hydro_dimension_inv)); +} + +/** + * @brief Print the global properties of the stars scheme. + * + * @param sp The #stars_props. + */ +INLINE static void stars_props_print(const struct stars_props *sp) { + + /* Now stars */ + message("Stars kernel: %s with eta=%f (%.2f neighbours).", kernel_name, + sp->eta_neighbours, sp->target_neighbours); + + message("Stars relative tolerance in h: %.5f (+/- %.4f neighbours).", + sp->h_tolerance, sp->delta_neighbours); + + message( + "Stars integration: Max change of volume: %.2f " + "(max|dlog(h)/dt|=%f).", + pow_dimension(expf(sp->log_max_h_change)), sp->log_max_h_change); + + message("Maximal iterations in ghost task set to %d", + sp->max_smoothing_iterations); +} + +#if defined(HAVE_HDF5) +INLINE static void stars_props_print_snapshot(hid_t h_grpstars, + const struct stars_props *sp) { + + io_write_attribute_s(h_grpstars, "Kernel function", kernel_name); + io_write_attribute_f(h_grpstars, "Kernel target N_ngb", + sp->target_neighbours); + io_write_attribute_f(h_grpstars, "Kernel delta N_ngb", sp->delta_neighbours); + io_write_attribute_f(h_grpstars, "Kernel eta", sp->eta_neighbours); + io_write_attribute_f(h_grpstars, "Smoothing length tolerance", + sp->h_tolerance); + io_write_attribute_f(h_grpstars, "Volume log(max(delta h))", + sp->log_max_h_change); + io_write_attribute_f(h_grpstars, "Volume max change time-step", + pow_dimension(expf(sp->log_max_h_change))); + io_write_attribute_i(h_grpstars, "Max ghost iterations", + sp->max_smoothing_iterations); +} +#endif + +/** + * @brief Write a #stars_props struct to the given FILE as a stream of bytes. + * + * @param p the struct + * @param stream the file stream + */ +INLINE static void stars_props_struct_dump(const struct stars_props *p, + FILE *stream) { + restart_write_blocks((void *)p, sizeof(struct stars_props), 1, stream, + "starsprops", "stars props"); +} + +/** + * @brief Restore a stars_props struct from the given FILE as a stream of + * bytes. + * + * @param p the struct + * @param stream the file stream + */ +INLINE static void stars_props_struct_restore(const struct stars_props *p, + FILE *stream) { + restart_read_blocks((void *)p, sizeof(struct stars_props), 1, stream, NULL, + "stars props"); +} + +#endif /* SWIFT_GEAR_STAR_IO_H */ diff --git a/src/stars/GEAR/stars_part.h b/src/stars/GEAR/stars_part.h new file mode 100644 index 0000000000000000000000000000000000000000..bf68a580ef009f5a814fa17123301b5585d6084c --- /dev/null +++ b/src/stars/GEAR/stars_part.h @@ -0,0 +1,154 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Coypright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_GEAR_STAR_PART_H +#define SWIFT_GEAR_STAR_PART_H + +/* Some standard headers. */ +#include <stdlib.h> + +/* Read additional subgrid models */ +#include "chemistry_struct.h" +#include "feedback_struct.h" +#include "tracers_struct.h" + +/** + * @brief Particle fields for the star particles. + * + * All quantities related to gravity are stored in the associate #gpart. + */ +struct spart { + + /*! Particle ID. */ + long long id; + + /*! Pointer to corresponding gravity part. */ + struct gpart* gpart; + + /*! Particle position. */ + double x[3]; + + /* Offset between current position and position at last tree rebuild. */ + float x_diff[3]; + + /* Offset between current position and position at last tree rebuild. */ + float x_diff_sort[3]; + + /*! Particle velocity. */ + float v[3]; + + /*! Star mass */ + float mass; + + /* Particle cutoff radius. */ + float h; + + /*! Union for the birth time and birth scale factor */ + union { + + /*! Birth time */ + float birth_time; + + /*! Birth scale factor */ + float birth_scale_factor; + }; + + /*! Particle time bin */ + timebin_t time_bin; + + struct { + + /* Number of neighbours. */ + float wcount; + + /* Number of neighbours spatial derivative. */ + float wcount_dh; + + } density; + + struct { + /*! birth density*/ + float density; + + /*! birth temperature*/ + float temperature; + + /*! birth mass */ + float mass; + } birth; + + /*! Feedback structure */ + struct feedback_spart_data feedback_data; + + /*! Tracer structure */ + struct tracers_xpart_data tracers_data; + + /*! Chemistry structure */ + struct chemistry_part_data chemistry_data; + +#ifdef SWIFT_DEBUG_CHECKS + + /* Time of the last drift */ + integertime_t ti_drift; + + /* Time of the last kick */ + integertime_t ti_kick; + +#endif + +#ifdef DEBUG_INTERACTIONS_STARS + /*! Number of interactions in the density SELF and PAIR */ + int num_ngb_density; + + /*! List of interacting particles in the density SELF and PAIR */ + long long ids_ngbs_density[MAX_NUM_OF_NEIGHBOURS_STARS]; + + /*! Number of interactions in the force SELF and PAIR */ + int num_ngb_force; + + /*! List of interacting particles in the force SELF and PAIR */ + long long ids_ngbs_force[MAX_NUM_OF_NEIGHBOURS_STARS]; +#endif + +} SWIFT_STRUCT_ALIGN; + +/** + * @brief Contains all the constants and parameters of the stars scheme + */ +struct stars_props { + + /*! Resolution parameter */ + float eta_neighbours; + + /*! Target weightd number of neighbours (for info only)*/ + float target_neighbours; + + /*! Smoothing length tolerance */ + float h_tolerance; + + /*! Tolerance on neighbour number (for info only)*/ + float delta_neighbours; + + /*! Maximal number of iterations to converge h */ + int max_smoothing_iterations; + + /*! Maximal change of h over one time-step */ + float log_max_h_change; +}; + +#endif /* SWIFT_GEAR_STAR_PART_H */ diff --git a/src/task.c b/src/task.c index 03b860b0087f30b207920da73fdc8108fdd9b289..312b0574e104acf64eb3ad61e1a715fe47d0d457 100644 --- a/src/task.c +++ b/src/task.c @@ -994,7 +994,8 @@ void task_dump_all(struct engine *e, int step) { * Note that when running under MPI all the tasks can be summed into this single * file. In the fuller, human readable file, the statistics included are the * number of task of each type/subtype followed by the minimum, maximum, mean - * and total time, in millisec and then the fixed costs value. + * and total time taken and the same numbers for the start of the task, + * in millisec and then the fixed costs value. * * If header is set, only the fixed costs value is written into the output * file in a format that is suitable for inclusion in SWIFT (as @@ -1011,16 +1012,22 @@ void task_dump_stats(const char *dumpfile, struct engine *e, int header, /* Need arrays for sum, min and max across all types and subtypes. */ double sum[task_type_count][task_subtype_count]; + double tsum[task_type_count][task_subtype_count]; double min[task_type_count][task_subtype_count]; + double tmin[task_type_count][task_subtype_count]; double max[task_type_count][task_subtype_count]; + double tmax[task_type_count][task_subtype_count]; int count[task_type_count][task_subtype_count]; for (int j = 0; j < task_type_count; j++) { for (int k = 0; k < task_subtype_count; k++) { sum[j][k] = 0.0; + tsum[j][k] = 0.0; count[j][k] = 0; min[j][k] = DBL_MAX; + tmin[j][k] = DBL_MAX; max[j][k] = 0.0; + tmax[j][k] = 0.0; } } @@ -1028,21 +1035,28 @@ void task_dump_stats(const char *dumpfile, struct engine *e, int header, for (int l = 0; l < e->sched.nr_tasks; l++) { int type = e->sched.tasks[l].type; - /* Skip implicit tasks, tasks that didn't run and MPI send/recv as these - * are not interesting (or meaningfully measured). */ - if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0 && - type != task_type_send && type != task_type_recv) { + /* Skip implicit tasks, tasks that didn't run. */ + if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0) { int subtype = e->sched.tasks[l].subtype; double dt = e->sched.tasks[l].toc - e->sched.tasks[l].tic; sum[type][subtype] += dt; + + double tic = (double)e->sched.tasks[l].tic; + tsum[type][subtype] += tic; count[type][subtype] += 1; if (dt < min[type][subtype]) { min[type][subtype] = dt; } + if (tic < tmin[type][subtype]) { + tmin[type][subtype] = tic; + } if (dt > max[type][subtype]) { max[type][subtype] = dt; } + if (tic > tmax[type][subtype]) { + tmax[type][subtype] = tic; + } total[0] += dt; } } @@ -1056,6 +1070,10 @@ void task_dump_stats(const char *dumpfile, struct engine *e, int header, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task sums"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tsum), tsum, size, + MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); + if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task tsums"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : count), count, size, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task counts"); @@ -1064,10 +1082,18 @@ void task_dump_stats(const char *dumpfile, struct engine *e, int header, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tmin), tmin, size, + MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); + if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : max), max, size, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tmax), tmax, size, + MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); + if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima"); + res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : total), total, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task total time"); @@ -1081,29 +1107,36 @@ void task_dump_stats(const char *dumpfile, struct engine *e, int header, fprintf(dfile, "/* use as src/partition_fixed_costs.h */\n"); fprintf(dfile, "#define HAVE_FIXED_COSTS 1\n"); } else { - fprintf(dfile, "# task ntasks min max sum mean percent fixed_cost\n"); + fprintf(dfile, + "# task ntasks min max sum mean percent mintic maxtic" + " meantic fixed_cost\n"); } for (int j = 0; j < task_type_count; j++) { const char *taskID = taskID_names[j]; for (int k = 0; k < task_subtype_count; k++) { if (sum[j][k] > 0.0) { - double mean = sum[j][k] / (double)count[j][k]; - double perc = 100.0 * sum[j][k] / total[0]; /* Fixed cost is in .1ns as we want to compare between runs in * some absolute units. */ + double mean = sum[j][k] / (double)count[j][k]; int fixed_cost = (int)(clocks_from_ticks(mean) * 10000.f); if (header) { fprintf(dfile, "repartition_costs[%d][%d] = %10d; /* %s/%s */\n", j, k, fixed_cost, taskID, subtaskID_names[k]); } else { + double perc = 100.0 * sum[j][k] / total[0]; + double mintic = tmin[j][k] - e->tic_step; + double maxtic = tmax[j][k] - e->tic_step; + double meantic = tsum[j][k] / (double)count[j][k] - e->tic_step; fprintf(dfile, - "%15s/%-10s %10d %14.4f %14.4f %14.4f %14.4f %14.4f %10d\n", + "%15s/%-10s %10d %14.4f %14.4f %14.4f %14.4f %14.4f" + " %14.4f %14.4f %14.4f %10d\n", taskID, subtaskID_names[k], count[j][k], clocks_from_ticks(min[j][k]), clocks_from_ticks(max[j][k]), clocks_from_ticks(sum[j][k]), clocks_from_ticks(mean), perc, - fixed_cost); + clocks_from_ticks(mintic), clocks_from_ticks(maxtic), + clocks_from_ticks(meantic), fixed_cost); } } } diff --git a/src/tracers/EAGLE/tracers_io.h b/src/tracers/EAGLE/tracers_io.h index 038cc1c8d3f92c2105d5b5c3ead958f60486ce9f..e058a02749a86f12838268af1aa719c9bc0cdeeb 100644 --- a/src/tracers/EAGLE/tracers_io.h +++ b/src/tracers/EAGLE/tracers_io.h @@ -54,20 +54,23 @@ __attribute__((always_inline)) INLINE static int tracers_write_particles( const struct part* parts, const struct xpart* xparts, struct io_props* list, const int with_cosmology) { - list[0] = io_make_output_field("Maximal Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, xparts, - tracers_data.maximum_temperature); + list[0] = io_make_output_field( + "MaximalTemperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, xparts, + tracers_data.maximum_temperature, + "Maximal temperatures ever reached by the particles"); if (with_cosmology) { list[1] = io_make_output_field( - "Maximal Temperature scale-factor", FLOAT, 1, UNIT_CONV_NO_UNITS, - xparts, tracers_data.maximum_temperature_scale_factor); + "MaximalTemperatureScaleFactors", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, + xparts, tracers_data.maximum_temperature_scale_factor, + "Scale-factors at which the maximal temperature was reached"); } else { - list[1] = io_make_output_field("Maximal Temperature time", FLOAT, 1, - UNIT_CONV_NO_UNITS, xparts, - tracers_data.maximum_temperature_time); + list[1] = io_make_output_field( + "MaximalTemperatureTimes", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, xparts, + tracers_data.maximum_temperature_time, + "Times at which the maximal temperature was reached"); } return 2; @@ -77,22 +80,27 @@ __attribute__((always_inline)) INLINE static int tracers_write_sparticles( const struct spart* sparts, struct io_props* list, const int with_cosmology) { - list[0] = io_make_output_field("Maximal Temperature", FLOAT, 1, - UNIT_CONV_TEMPERATURE, sparts, - tracers_data.maximum_temperature); + list[0] = io_make_output_field( + "MaximalTemperatures", FLOAT, 1, UNIT_CONV_TEMPERATURE, 0.f, sparts, + tracers_data.maximum_temperature, + "Maximal temperatures ever reached by the particles before they got " + "converted to stars"); if (with_cosmology) { list[1] = io_make_output_field( - "Maximal Temperature scale-factor", FLOAT, 1, UNIT_CONV_NO_UNITS, - sparts, tracers_data.maximum_temperature_scale_factor); + "MaximalTemperatureScaleFactors", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, + sparts, tracers_data.maximum_temperature_scale_factor, + "Scale-factors at which the maximal temperature was reached"); } else { - list[1] = io_make_output_field("Maximal Temperature time", FLOAT, 1, - UNIT_CONV_NO_UNITS, sparts, - tracers_data.maximum_temperature_time); + list[1] = io_make_output_field( + "MaximalTemperatureTimes", FLOAT, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + tracers_data.maximum_temperature_time, + "Times at which the maximal temperature was reached"); } return 2; } + #endif /* SWIFT_TRACERS_EAGLE_IO_H */ diff --git a/src/units.c b/src/units.c index 807640a1d2b5844e721fa5b2815acd84e968efba..58422bfffe6605daed4a4eae4dd0c2755006b2e9 100644 --- a/src/units.c +++ b/src/units.c @@ -427,31 +427,19 @@ float units_h_factor(const struct unit_system* us, return units_general_h_factor(us, baseUnitsExp); } -/** - * @brief Returns the scaling factor exponentiation for a given unit - * @param us The system of units in use - * @param unit The unit to convert - */ -float units_a_factor(const struct unit_system* us, - enum unit_conversion_factor unit) { - float baseUnitsExp[5] = {0.f}; - - units_get_base_unit_exponents_array(baseUnitsExp, unit); - - return units_general_a_factor(us, baseUnitsExp); -} - /** * @brief Returns a string containing the exponents of the base units making up * the conversion factors */ void units_cgs_conversion_string(char* buffer, const struct unit_system* us, - enum unit_conversion_factor unit) { + enum unit_conversion_factor unit, + float scale_factor_exponent) { float baseUnitsExp[5] = {0.f}; units_get_base_unit_exponents_array(baseUnitsExp, unit); - units_general_cgs_conversion_string(buffer, us, baseUnitsExp); + units_general_cgs_conversion_string(buffer, us, baseUnitsExp, + scale_factor_exponent); } /** @@ -490,22 +478,6 @@ float units_general_h_factor(const struct unit_system* us, return factor_exp; } -/** - * @brief Returns the scaling factor exponentiation for a given unit (expressed - * in terms of the 5 fundamental units) - * @param us The unit system used - * @param baseUnitsExponents The exponent of each base units required to form - * the desired quantity. See conversionFactor() for a working example - */ -float units_general_a_factor(const struct unit_system* us, - const float baseUnitsExponents[5]) { - float factor_exp = 0.f; - - factor_exp += baseUnitsExponents[UNIT_LENGTH]; - - return factor_exp; -} - /** * @brief Returns a string containing the exponents of the base units making up * the conversion factors (expressed in terms of the 5 fundamental units) @@ -521,9 +493,10 @@ float units_general_a_factor(const struct unit_system* us, */ void units_general_cgs_conversion_string(char* buffer, const struct unit_system* us, - const float baseUnitsExponents[5]) { - char temp[32]; - const double a_exp = units_general_a_factor(us, baseUnitsExponents); + const float baseUnitsExponents[5], + float scale_factor_exponent) { + char temp[32] = {0}; + const double a_exp = scale_factor_exponent; const double h_exp = 0.; /* There are no h-factors in SWIFT outputs. */ /* Check whether we are unitless or not */ diff --git a/src/units.h b/src/units.h index b1eeeb33d63e3aee072d67b09768b7d3aa508f06..4769fa80edbe0c10fd24e064528251e551282153 100644 --- a/src/units.h +++ b/src/units.h @@ -125,10 +125,6 @@ float units_general_h_factor(const struct unit_system* us, const float baseUnitsExponents[5]); float units_h_factor(const struct unit_system* us, enum unit_conversion_factor unit); -float units_general_a_factor(const struct unit_system* us, - const float baseUnitsExponents[5]); -float units_a_factor(const struct unit_system* us, - enum unit_conversion_factor unit); /* Conversion to CGS */ double units_general_cgs_conversion_factor(const struct unit_system* us, @@ -137,9 +133,11 @@ double units_cgs_conversion_factor(const struct unit_system* us, enum unit_conversion_factor unit); void units_general_cgs_conversion_string(char* buffer, const struct unit_system* us, - const float baseUnitsExponents[5]); + const float baseUnitsExponents[5], + float scale_factor_exponent); void units_cgs_conversion_string(char* buffer, const struct unit_system* us, - enum unit_conversion_factor unit); + enum unit_conversion_factor unit, + float scale_factor_exponent); /* Conversion between systems */ double units_general_conversion_factor(const struct unit_system* from, diff --git a/src/velociraptor_io.h b/src/velociraptor_io.h index d535e54815139e243b9a3bc40ec8dd4de2af1ac1..446fc3131bfc638a0b37307110a2e802c3dd01ea 100644 --- a/src/velociraptor_io.h +++ b/src/velociraptor_io.h @@ -61,8 +61,9 @@ __attribute__((always_inline)) INLINE static int velociraptor_write_parts( struct io_props* list) { list[0] = io_make_output_field_convert_part( - "GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, parts, xparts, - velociraptor_convert_part_groupID); + "VELOCIraptorGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, parts, + xparts, velociraptor_convert_part_groupID, + "Group IDs of the particles in the VELOCIraptor catalogue"); return 1; } @@ -70,8 +71,9 @@ __attribute__((always_inline)) INLINE static int velociraptor_write_parts( __attribute__((always_inline)) INLINE static int velociraptor_write_gparts( const struct velociraptor_gpart_data* group_data, struct io_props* list) { - list[0] = io_make_output_field("GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, - group_data, groupID); + list[0] = io_make_output_field( + "VELOCIraptorGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, group_data, + groupID, "Group IDs of the particles in the VELOCIraptor catalogue"); return 1; } @@ -80,8 +82,9 @@ __attribute__((always_inline)) INLINE static int velociraptor_write_sparts( const struct spart* sparts, struct io_props* list) { list[0] = io_make_output_field_convert_spart( - "GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, sparts, - velociraptor_convert_spart_groupID); + "VELOCIraptorGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, sparts, + velociraptor_convert_spart_groupID, + "Group IDs of the particles in the VELOCIraptor catalogue"); return 1; } @@ -90,8 +93,9 @@ __attribute__((always_inline)) INLINE static int velociraptor_write_bparts( const struct bpart* bparts, struct io_props* list) { list[0] = io_make_output_field_convert_bpart( - "GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, bparts, - velociraptor_convert_bpart_groupID); + "VELOCIraptorGroupIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS, 0.f, bparts, + velociraptor_convert_bpart_groupID, + "Group IDs of the particles in the VELOCIraptor catalogue"); return 1; } diff --git a/tests/testSelectOutput.c b/tests/testSelectOutput.c index 53d65adea152269c54cb5befcdb2970780cf063d..0be250e48cb2748093c8cd8c1381303c3060c9ba 100644 --- a/tests/testSelectOutput.c +++ b/tests/testSelectOutput.c @@ -138,6 +138,7 @@ int main(int argc, char *argv[]) { /* pseudo initialization of the engine */ message("Initialization of the engine."); struct engine e; + sprintf(e.run_name, "Select Output Test"); select_output_engine_init(&e, &s, &cosmo, ¶m_file, &cooling, &hydro_properties); diff --git a/tests/testSelectOutput.py b/tests/testSelectOutput.py index aec7f4671fb2768acde768fd9929168559ebb3cb..97e1c865d133b161c5661c7ac63f728065461bd6 100644 --- a/tests/testSelectOutput.py +++ b/tests/testSelectOutput.py @@ -39,8 +39,8 @@ if "Coordinates" not in part0: if "Masses" not in part0: raise Exception("`Masses` not present in HDF5 but should be written") -if "Density" not in part0: - raise Exception("`Density` not present in HDF5 but should be written") +if "Densities" not in part0: + raise Exception("`Densities` not present in HDF5 but should be written") # check error detection