diff --git a/doc/RTD/source/Neutrinos/index.rst b/doc/RTD/source/Neutrinos/index.rst index ddcd49844b04dc7211d5f4870be8322691abe2ab..47da4e0db10473216a25356639c679b4598320df 100644 --- a/doc/RTD/source/Neutrinos/index.rst +++ b/doc/RTD/source/Neutrinos/index.rst @@ -32,7 +32,8 @@ neutrino mass specified in the cosmology and generate new velocities based on the homogeneous (unperturbed) Fermi-Dirac distribution. In this case, placeholder neutrino particles should be provided in the initial conditions with arbitrary masses and velocities, distributed -uniformly in the box. +uniformly in the box. Placeholders can be spawned with the python +script ``tools/spawn_neutrinos.py``. Relativistic Drift ------------------ @@ -79,4 +80,51 @@ to species :math:`i = \ell\; \% \;N_\nu\in[0,N_\nu-1]`. The sampled Fermi-Dirac speeds and neutrino masses are written into the snapshot files as ``SampledSpeeds`` and ``MicroscopicMasses``. +Mesh Neutrinos +-------------- + +There are two additional implementations of neutrino physics. The first +is an option to only apply the delta-f weighting scheme on the mesh. In +this case, particle neutrinos participate like dark matter in the remaining +gravity calculations. This mode can be activated with +``Neutrino:use_delta_f_mesh_only``. + +The second option is an implementation of the linear response method, +once again on the mesh only, which requires a separate data file with +transfer functions. Example settings in the paramter file for this mode +are: + +.. code:: YAML + + Neutrino: + use_linear_response: 1 # Option to use the linear response method + transfer_functions_filename: perturb.hdf5 # For linear response neutrinos, path to an hdf5 file with transfer functions, redshifts, and wavenumbers + dataset_redshifts: Redshifts # For linear response neutrinos, name of the dataset with the redshifts (a vector of length N_z) + dataset_wavenumbers: Wavenumbers # For linear response neutrinos, name of the dataset with the wavenumbers (a vector of length N_k) + dataset_delta_cdm: Functions/d_cdm # For linear response neutrinos, name of the dataset with the cdm density transfer function (N_z x N_k) + dataset_delta_baryon: Functions/d_b # For linear response neutrinos, name of the dataset with the baryon density transfer function (N_z x N_k) + dataset_delta_nu: Functions/d_ncdm[0] # For linear response neutrinos, name of the dataset with the neutrino density transfer function (N_z x N_k) + fixed_bg_density: 1 # For linear response neutrinos, whether to use a fixed present-day background density + +In this example, the code reads an HDF5 file "perturb.hdf5" with transfer +functions. The file must contain a vector with redshifts of length :math:`N_z`, +a vector with wavenumbers :math:`N_k`, and three arrays with dimensions +:math:`N_z \times N_k` of density transfer functions for cdm, baryons, and +neutrinos respectively. It is recommended to store the units of the wavenumbers +as an attribute at "Units/Unit length in cgs (U_L)". The ``fixed_bg_density`` +flag determines whether the linear response scales as :math:`\Omega_\nu(a)` +or the present-day value :math:`\Omega_{\nu,0}`, either of which may be +appropriate depending on the particle initial conditions. An HDF5 file +can be generated using classy with the script ``tools/create_perturb_file.py``. + +The linear response mode currently only supports degenerate mass models +with a single neutrino transfer function. + +Background Neutrinos Only +------------------------- + +It is also possible to run without neutrino perturbations, even when +specifying neutrinos in the background cosmology. This mode can be +activated with ``Neutrino:use_model_none``. + .. [#f1] Currently, it is not guaranteed that a particle ID is unique. diff --git a/examples/main.c b/examples/main.c index 8a8620e3af06fe713e6254b6d326db2ae278ce6a..1539c7dfe685578fc9499275abe0ee094eeb4575 100644 --- a/examples/main.c +++ b/examples/main.c @@ -98,6 +98,7 @@ int main(int argc, char *argv[]) { struct stars_props stars_properties; struct sink_props sink_properties; struct neutrino_props neutrino_properties; + struct neutrino_response neutrino_response; struct feedback_props feedback_properties; struct rt_props rt_properties; struct entropy_floor_properties entropy_floor; @@ -1286,11 +1287,10 @@ int main(int argc, char *argv[]) { /* Do we have neutrino DM particles? */ const int with_neutrinos = N_total[swift_type_neutrino] > 0; - /* Initialise the neutrino properties if we have neutrino particles */ + /* Initialise the neutrino properties */ bzero(&neutrino_properties, sizeof(struct neutrino_props)); - if (with_neutrinos) - neutrino_props_init(&neutrino_properties, &prog_const, &us, params, - &cosmo); + neutrino_props_init(&neutrino_properties, &prog_const, &us, params, &cosmo, + with_neutrinos); /* Initialize the space with these data. */ if (myrank == 0) clocks_gettime(&tic); @@ -1319,6 +1319,13 @@ int main(int argc, char *argv[]) { with_external_gravity, with_baryon_particles, with_DM_particles, with_neutrinos, with_DM_background_particles, periodic, s.dim); + /* Initialize the neutrino response if used */ + bzero(&neutrino_response, sizeof(struct neutrino_response)); + if (neutrino_properties.use_linear_response) + neutrino_response_init(&neutrino_response, params, &us, s.dim, &cosmo, + &neutrino_properties, &gravity_properties, myrank, + verbose); + /* Initialise the external potential properties */ bzero(&potential, sizeof(struct external_potential)); if (with_external_gravity) @@ -1441,9 +1448,9 @@ int main(int argc, char *argv[]) { &reparttype, &us, &prog_const, &cosmo, &hydro_properties, &entropy_floor, &gravity_properties, &stars_properties, &black_holes_properties, &sink_properties, &neutrino_properties, - &feedback_properties, &rt_properties, &mesh, &potential, - &cooling_func, &starform, &chemistry, &fof_properties, - &los_properties, &ics_metadata); + &neutrino_response, &feedback_properties, &rt_properties, &mesh, + &potential, &cooling_func, &starform, &chemistry, + &fof_properties, &los_properties, &ics_metadata); engine_config(/*restart=*/0, /*fof=*/0, &e, params, nr_nodes, myrank, nr_threads, nr_pool_threads, with_aff, talking, restart_file); @@ -1503,7 +1510,10 @@ int main(int argc, char *argv[]) { /* Check that the matter content matches the cosmology given in the * parameter file. */ if (with_cosmology && with_self_gravity && !dry_run) { - const int check_neutrinos = !neutrino_properties.use_delta_f; + /* Only check neutrino particle masses if we have neutrino particles + * and if the masses are stored unweighted. */ + const int check_neutrinos = + s.with_neutrinos && !neutrino_properties.use_delta_f; space_check_cosmology(&s, &cosmo, with_hydro, myrank, check_neutrinos); neutrino_check_cosmology(&s, &cosmo, &prog_const, params, &neutrino_properties, myrank, verbose); @@ -1793,6 +1803,8 @@ int main(int argc, char *argv[]) { /* Clean everything */ if (with_verbose_timers) timers_close_file(); if (with_cosmology) cosmology_clean(e.cosmology); + if (e.neutrino_properties->use_linear_response) + neutrino_response_clean(e.neutrino_response); if (with_self_gravity) pm_mesh_clean(e.mesh); if (with_stars) stars_props_clean(e.stars_properties); if (with_cooling || with_temperature) cooling_clean(e.cooling_func); diff --git a/examples/main_fof.c b/examples/main_fof.c index 7bc544d58d1413cfbb80421b533b3d59af657dea..039b464e2b4c6f98895b7274af41def79aca6441 100644 --- a/examples/main_fof.c +++ b/examples/main_fof.c @@ -650,8 +650,8 @@ int main(int argc, char *argv[]) { /*hydro_properties=*/NULL, /*entropy_floor=*/NULL, &gravity_properties, /*stars_properties=*/NULL, /*black_holes_properties=*/NULL, /*sink_properties=*/NULL, &neutrino_properties, - /*feedback_properties=*/NULL, /*rt_properties=*/NULL, &mesh, - /*potential=*/NULL, + /*neutrino_response=*/NULL, /*feedback_properties=*/NULL, + /*rt_properties=*/NULL, &mesh, /*potential=*/NULL, /*cooling_func=*/NULL, /*starform=*/NULL, /*chemistry=*/NULL, &fof_properties, /*los_properties=*/NULL, &ics_metadata); engine_config(/*restart=*/0, /*fof=*/1, &e, params, nr_nodes, myrank, diff --git a/examples/parameter_example.yml b/examples/parameter_example.yml index be79c2fd669562aa9d89a2fe1c2c171d4e081026..b4b7f9cafbb557e34b4ed475653d94a683e5437f 100644 --- a/examples/parameter_example.yml +++ b/examples/parameter_example.yml @@ -646,11 +646,21 @@ EAGLEAGN: merger_max_distance_ratio: 3.0 # Maximal distance over which two BHs can merge, in units of the softening length. minimum_timestep_Myr: 1.0 # Minimum time-step size for BHs in Mega-years. The time-step size is computed based on the accretion rate and this serves as a minimum. Defaults to FLT_MAX. -# Parameters related to the neutrino particles ---------------------------------- +# Parameters related to the neutrinos -------------------------------------------- Neutrino: use_delta_f: 1 # Use the delta-f method for shot noise reduction + use_delta_f_mesh_only: 0 # Use the delta-f method, but only in the mesh gravity calculation and outputs generate_ics: 0 # Automatically generate neutrino velocities at the start of the run - neutrino_seed: 1234 # Seed used in the delta-f weighting step for Fermi-Dirac momentum generation + neutrino_seed: 0 # Seed used in the delta-f weighting step for Fermi-Dirac momentum generation + use_linear_response: 0 # Option to use the linear response method + transfer_functions_filename: perturb.hdf5 # For linear response neutrinos, path to an hdf5 file with transfer functions, redshifts, and wavenumbers + dataset_redshifts: Redshifts # For linear response neutrinos, name of the dataset with the redshifts (a vector of length N_z) + dataset_wavenumbers: Wavenumbers # For linear response neutrinos, name of the dataset with the wavenumbers (a vector of length N_k) + dataset_delta_cdm: Functions/d_cdm # For linear response neutrinos, name of the dataset with the cdm density transfer function (N_z x N_k) + dataset_delta_baryon: Functions/d_b # For linear response neutrinos, name of the dataset with the baryon density transfer function (N_z x N_k) + dataset_delta_nu: Functions/d_ncdm[0] # For linear response neutrinos, name of the dataset with the neutrino density transfer function (N_z x N_k) + fixed_bg_density: 1 # For linear response neutrinos, whether to use a fixed present-day background density + use_model_none: 0 # Option to use no neutrino model # Parameters related to the sink particles --------------------------------------- diff --git a/src/Makefile.am b/src/Makefile.am index 35d1064fa3a8a956d690d0498d61a354f3044d1e..ce041d3473b15c25ccb0294b07b78edb3d6f080c 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -146,7 +146,7 @@ AM_SOURCES += fof.c fof_catalogue_io.c AM_SOURCES += hashmap.c pressure_floor.c AM_SOURCES += mesh_gravity.c mesh_gravity_mpi.c mesh_gravity_patch.c mesh_gravity_sort.c AM_SOURCES += runner_neutrino.c -AM_SOURCES += neutrino/Default/fermi_dirac.c neutrino/Default/neutrino.c +AM_SOURCES += neutrino/Default/fermi_dirac.c neutrino/Default/neutrino.c neutrino/Default/neutrino_response.c AM_SOURCES += rt_parameters.c AM_SOURCES += hdf5_object_to_blob.c ic_info.c AM_SOURCES += $(QLA_COOLING_SOURCES) $(QLA_EAGLE_COOLING_SOURCES) @@ -395,6 +395,7 @@ nobase_noinst_HEADERS += sink.h sink_io.h sink_properties.h nobase_noinst_HEADERS += neutrino.h neutrino_properties.h neutrino_io.h nobase_noinst_HEADERS += neutrino/Default/neutrino.h neutrino/Default/relativity.h neutrino/Default/fermi_dirac.h nobase_noinst_HEADERS += neutrino/Default/neutrino_properties.h neutrino/Default/neutrino_io.h +nobase_noinst_HEADERS += neutrino/Default/neutrino_response.h # Sources and special flags for the gravity library libgrav_la_SOURCES = runner_doiact_grav.c diff --git a/src/engine.c b/src/engine.c index 4ebeef328534f41e7feda764e2b690d330492abf..9c6625ccf54826da7d8d7ce06e4a0998cad5551f 100644 --- a/src/engine.c +++ b/src/engine.c @@ -2819,8 +2819,9 @@ void engine_init( const struct entropy_floor_properties *entropy_floor, struct gravity_props *gravity, struct stars_props *stars, const struct black_holes_props *black_holes, const struct sink_props *sinks, - const struct neutrino_props *neutrinos, struct feedback_props *feedback, - struct rt_props *rt, struct pm_mesh *mesh, + const struct neutrino_props *neutrinos, + struct neutrino_response *neutrino_response, + struct feedback_props *feedback, struct rt_props *rt, struct pm_mesh *mesh, const struct external_potential *potential, struct cooling_function_data *cooling_func, const struct star_formation *starform, @@ -2931,6 +2932,7 @@ void engine_init( e->black_holes_properties = black_holes; e->sink_properties = sinks; e->neutrino_properties = neutrinos; + e->neutrino_response = neutrino_response; e->mesh = mesh; e->external_potential = potential; e->cooling_func = cooling_func; @@ -3433,6 +3435,7 @@ void engine_struct_dump(struct engine *e, FILE *stream) { black_holes_struct_dump(e->black_holes_properties, stream); sink_struct_dump(e->sink_properties, stream); neutrino_struct_dump(e->neutrino_properties, stream); + neutrino_response_struct_dump(e->neutrino_response, stream); chemistry_struct_dump(e->chemistry, stream); #ifdef WITH_FOF fof_struct_dump(e->fof_properties, stream); @@ -3568,6 +3571,11 @@ void engine_struct_restore(struct engine *e, FILE *stream) { neutrino_struct_restore(neutrino_properties, stream); e->neutrino_properties = neutrino_properties; + struct neutrino_response *neutrino_response = + (struct neutrino_response *)malloc(sizeof(struct neutrino_response)); + neutrino_response_struct_restore(neutrino_response, stream); + e->neutrino_response = neutrino_response; + struct chemistry_global_data *chemistry = (struct chemistry_global_data *)malloc( sizeof(struct chemistry_global_data)); diff --git a/src/engine.h b/src/engine.h index 4ab913686c7cc00008297d9f8df46a5cb286d0d5..7d72b6d3e62855e31ef15f37cfdc99dd66a55e82 100644 --- a/src/engine.h +++ b/src/engine.h @@ -472,6 +472,9 @@ struct engine { /* Properties of the neutrino model */ const struct neutrino_props *neutrino_properties; + /* The linear neutrino response */ + struct neutrino_response *neutrino_response; + /* Properties of the self-gravity scheme */ struct gravity_props *gravity_properties; @@ -611,8 +614,9 @@ void engine_init( const struct entropy_floor_properties *entropy_floor, struct gravity_props *gravity, struct stars_props *stars, const struct black_holes_props *black_holes, const struct sink_props *sinks, - const struct neutrino_props *neutrinos, struct feedback_props *feedback, - struct rt_props *rt, struct pm_mesh *mesh, + const struct neutrino_props *neutrinos, + struct neutrino_response *neutrino_response, + struct feedback_props *feedback, struct rt_props *rt, struct pm_mesh *mesh, const struct external_potential *potential, struct cooling_function_data *cooling_func, const struct star_formation *starform, diff --git a/src/engine_config.c b/src/engine_config.c index 48c78a9357612840f5f7eec4d352971bbf303676..15956aacab688512e20dde6a401ff90305ad08c0 100644 --- a/src/engine_config.c +++ b/src/engine_config.c @@ -183,45 +183,6 @@ void engine_config(int restart, int fof, struct engine *e, /* Welcome message */ if (e->nodeID == 0) message("Running simulation '%s'.", e->run_name); - if (!restart && e->total_nr_neutrino_gparts > 0) { - /* For diagnostics, collect the range of neutrino masses in eV */ - float neutrino_mass_min = FLT_MAX; - float neutrino_mass_max = -FLT_MAX; - - if (e->s->nr_gparts > 0) { - for (size_t k = 0; k < e->s->nr_gparts; k++) { - if (e->s->gparts[k].type == swift_type_neutrino) { - struct gpart *gp = &e->s->gparts[k]; - float neutrino_mass = gp->mass; - if (neutrino_mass > neutrino_mass_max) - neutrino_mass_max = neutrino_mass; - if (neutrino_mass < neutrino_mass_min) - neutrino_mass_min = neutrino_mass; - } - } - - float min_max_mass[2] = {neutrino_mass_min, neutrino_mass_max}; - -#ifdef WITH_MPI - min_max_mass[1] = -min_max_mass[1]; - - MPI_Allreduce(MPI_IN_PLACE, min_max_mass, 2, MPI_FLOAT, MPI_MIN, - MPI_COMM_WORLD); - - min_max_mass[1] = -min_max_mass[1]; -#endif - - if (e->nodeID == 0) { - float mass_mult = e->neutrino_mass_conversion_factor; - float deg_nu_tot = e->cosmology->deg_nu_tot; - message("Neutrino mass multiplier: %.5e eV / U_M", mass_mult); - message("Neutrino simulation particle masses in range: [%.4f, %.4f] eV", - min_max_mass[0] * mass_mult / deg_nu_tot, - min_max_mass[1] * mass_mult / deg_nu_tot); - } - } - } - /* Get the number of queues */ int nr_queues = parser_get_opt_param_int(params, "Scheduler:nr_queues", e->nr_threads); diff --git a/src/mesh_gravity.c b/src/mesh_gravity.c index c0ea3238cb79882a80e2363b064e242cb127bd55..d19c70bebe47e438ed876ce26f069dfd2723e08d 100644 --- a/src/mesh_gravity.c +++ b/src/mesh_gravity.c @@ -39,6 +39,7 @@ #include "kernel_long_gravity.h" #include "mesh_gravity_mpi.h" #include "mesh_gravity_patch.h" +#include "neutrino.h" #include "part.h" #include "restart.h" #include "row_major_id.h" @@ -131,10 +132,12 @@ __attribute__((always_inline)) INLINE static void CIC_set( * @param N the size of the mesh along one axis. * @param fac The width of a mesh cell. * @param dim The dimensions of the simulation box. + * @param nu_model Struct with neutrino constants */ INLINE static void gpart_to_mesh_CIC(const struct gpart* gp, double* rho, const int N, const double fac, - const double dim[3]) { + const double dim[3], + const struct neutrino_model* nu_model) { /* Box wrap the multipole's position */ const double pos_x = box_wrap(gp->x[0], 0., dim[0]); @@ -166,10 +169,16 @@ INLINE static void gpart_to_mesh_CIC(const struct gpart* gp, double* rho, if (k < 0 || k >= N) error("Invalid gpart position in z"); #endif + /* Compute weight (for neutrino delta-f weighting) */ + double weight = 1.0; + if (gp->type == swift_type_neutrino) + gpart_neutrino_weight_mesh_only(gp, nu_model, &weight); + const double mass = gp->mass; + const double value = mass * weight; /* CIC ! */ - CIC_set(rho, N, i, j, k, tx, ty, tz, dx, dy, dz, mass); + CIC_set(rho, N, i, j, k, tx, ty, tz, dx, dy, dz, value); } /** @@ -181,9 +190,11 @@ INLINE static void gpart_to_mesh_CIC(const struct gpart* gp, double* rho, * @param N the size of the mesh along one axis. * @param fac The width of a mesh cell. * @param dim The dimensions of the simulation box. + * @param nu_model Struct with neutrino constants */ void cell_gpart_to_mesh_CIC(const struct cell* c, double* rho, const int N, - const double fac, const double dim[3]) { + const double fac, const double dim[3], + const struct neutrino_model* nu_model) { const int gcount = c->grav.count; const struct gpart* gparts = c->grav.parts; @@ -191,7 +202,7 @@ void cell_gpart_to_mesh_CIC(const struct cell* c, double* rho, const int N, /* Assign all the gpart of that cell to the mesh */ for (int i = 0; i < gcount; ++i) { if (gparts[i].time_bin == time_bin_inhibited) continue; - gpart_to_mesh_CIC(&gparts[i], rho, N, fac, dim); + gpart_to_mesh_CIC(&gparts[i], rho, N, fac, dim, nu_model); } } @@ -207,6 +218,7 @@ struct cic_mapper_data { double fac; double dim[3]; float const_G; + struct neutrino_model* nu_model; }; void gpart_to_mesh_CIC_mapper(void* map_data, int num, void* extra) { @@ -216,13 +228,14 @@ void gpart_to_mesh_CIC_mapper(void* map_data, int num, void* extra) { const int N = data->N; const double fac = data->fac; const double dim[3] = {data->dim[0], data->dim[1], data->dim[2]}; + const struct neutrino_model* nu_model = data->nu_model; /* Pointer to the chunk to be processed */ const struct gpart* gparts = (const struct gpart*)map_data; for (int i = 0; i < num; ++i) { if (gparts[i].time_bin == time_bin_inhibited) continue; - gpart_to_mesh_CIC(&gparts[i], rho, N, fac, dim); + gpart_to_mesh_CIC(&gparts[i], rho, N, fac, dim, nu_model); } } @@ -242,6 +255,7 @@ void cell_gpart_to_mesh_CIC_mapper(void* map_data, int num, void* extra) { const int N = data->N; const double fac = data->fac; const double dim[3] = {data->dim[0], data->dim[1], data->dim[2]}; + const struct neutrino_model* nu_model = data->nu_model; /* Pointer to the chunk to be processed */ int* local_cells = (int*)map_data; @@ -258,7 +272,7 @@ void cell_gpart_to_mesh_CIC_mapper(void* map_data, int num, void* extra) { const struct cell* c = &cells[local_cells[i]]; /* Assign this cell's content to the mesh */ - cell_gpart_to_mesh_CIC(c, rho, N, fac, dim); + cell_gpart_to_mesh_CIC(c, rho, N, fac, dim, nu_model); } } @@ -735,6 +749,18 @@ void compute_potential_distributed(struct pm_mesh* mesh, const struct space* s, tic = getticks(); + /* If using linear response neutrinos, apply to local slice of the MPI mesh */ + if (s->e->neutrino_properties->use_linear_response) { + neutrino_response_compute(s, mesh, tp, frho_slice, local_0_start, local_n0, + verbose); + + if (verbose) + message("Applying neutrino response took %.3f %s.", + clocks_from_ticks(getticks() - tic), clocks_getunit()); + + tic = getticks(); + } + /* Carry out the reverse MPI Fourier transform */ fftw_plan mpi_inverse_plan = fftw_mpi_plan_dft_c2r_3d( N, N, N, frho_slice, rho_slice, MPI_COMM_WORLD, @@ -842,6 +868,12 @@ void compute_potential_global(struct pm_mesh* mesh, const struct space* s, /* Zero everything */ bzero(rho, N * N * N * sizeof(double)); + /* Gather some neutrino constants if using delta-f weighting on the mesh */ + struct neutrino_model nu_model; + bzero(&nu_model, sizeof(struct neutrino_model)); + if (s->e->neutrino_properties->use_delta_f_mesh_only) + gather_neutrino_consts(s, &nu_model); + /* Gather the mesh shared information to be used by the threads */ struct cic_mapper_data data; data.cells = s->cells_top; @@ -853,6 +885,7 @@ void compute_potential_global(struct pm_mesh* mesh, const struct space* s, data.dim[1] = dim[1]; data.dim[2] = dim[2]; data.const_G = 0.f; + data.nu_model = &nu_model; if (nr_local_cells == 0) { @@ -916,6 +949,18 @@ void compute_potential_global(struct pm_mesh* mesh, const struct space* s, tic = getticks(); + /* If using linear response neutrinos, apply the response to the mesh */ + if (s->e->neutrino_properties->use_linear_response) { + neutrino_response_compute(s, mesh, tp, frho, /*slice_offset=*/0, + /*slice_width=*/N, verbose); + + if (verbose) + message("Applying neutrino response took %.3f %s.", + clocks_from_ticks(getticks() - tic), clocks_getunit()); + + tic = getticks(); + } + /* Fourier transform to come back from magic-land */ fftw_execute(inverse_plan); diff --git a/src/mesh_gravity_mpi.c b/src/mesh_gravity_mpi.c index f1078b4afdefbaaf3924739d2f0b970afeed8f34..fb7c5710dcfd4b0d715cc2c97efc0c7142d3ff9d 100644 --- a/src/mesh_gravity_mpi.c +++ b/src/mesh_gravity_mpi.c @@ -36,6 +36,7 @@ #include "lock.h" #include "mesh_gravity_patch.h" #include "mesh_gravity_sort.h" +#include "neutrino.h" #include "part.h" #include "periodic.h" #include "row_major_id.h" @@ -51,14 +52,16 @@ * * @param N The size of the mesh * @param fac Inverse of the cell size - * @param c The #cell containing the particles. - * @param map The hashmap in which to store the results - * @param lock A lock used to prevent concurrent access to map + * @param dim The dimensions of the simulation box. + * @param cell The #cell containing the particles. + * @param patch The local mesh patch + * @param nu_model Struct with neutrino constants * */ void accumulate_cell_to_local_patch(const int N, const double fac, const double *dim, const struct cell *cell, - struct pm_mesh_patch *patch) { + struct pm_mesh_patch *patch, + const struct neutrino_model *nu_model) { /* If the cell is empty, then there's nothing to do (and the code to find the extent of the cell would fail) */ @@ -99,9 +102,15 @@ void accumulate_cell_to_local_patch(const int N, const double fac, const int jj = j - patch->mesh_min[1]; const int kk = k - patch->mesh_min[2]; + /* Compute weight (for neutrino delta-f weighting) */ + double weight = 1.0; + if (gp->type == swift_type_neutrino) + gpart_neutrino_weight_mesh_only(gp, nu_model, &weight); + /* Accumulate contributions to the local mesh patch */ const double mass = gp->mass; - pm_mesh_patch_CIC_set(patch, ii, jj, kk, tx, ty, tz, dx, dy, dz, mass); + const double value = mass * weight; + pm_mesh_patch_CIC_set(patch, ii, jj, kk, tx, ty, tz, dx, dy, dz, value); } } @@ -116,6 +125,7 @@ struct accumulate_mapper_data { int N; double fac; double dim[3]; + struct neutrino_model *nu_model; }; /** @@ -135,6 +145,7 @@ void accumulate_cell_to_local_patches_mapper(void *map_data, int num, const int N = data->N; const double fac = data->fac; const double dim[3] = {data->dim[0], data->dim[1], data->dim[2]}; + const struct neutrino_model *nu_model = data->nu_model; /* Pointer to the chunk to be processed */ int *local_cells = (int *)map_data; @@ -150,7 +161,7 @@ void accumulate_cell_to_local_patches_mapper(void *map_data, int num, const struct cell *c = &cells[local_cells[i]]; /* Assign this cell's content to the mesh */ - accumulate_cell_to_local_patch(N, fac, dim, c, &local_patches[i]); + accumulate_cell_to_local_patch(N, fac, dim, c, &local_patches[i], nu_model); } } @@ -176,6 +187,12 @@ void mpi_mesh_accumulate_gparts_to_local_patches( const int nr_local_cells = s->nr_local_cells; const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]}; + /* Gather some neutrino constants if using delta-f weighting on the mesh */ + struct neutrino_model nu_model; + bzero(&nu_model, sizeof(struct neutrino_model)); + if (s->e->neutrino_properties->use_delta_f_mesh_only) + gather_neutrino_consts(s, &nu_model); + /* Use the threadpool to parallelize over cells */ struct accumulate_mapper_data data; data.cells = s->cells_top; @@ -186,6 +203,7 @@ void mpi_mesh_accumulate_gparts_to_local_patches( data.dim[0] = dim[0]; data.dim[1] = dim[1]; data.dim[2] = dim[2]; + data.nu_model = &nu_model; threadpool_map(tp, accumulate_cell_to_local_patches_mapper, (void *)local_cells, nr_local_cells, sizeof(int), threadpool_auto_chunk_size, (void *)&data); diff --git a/src/neutrino/Default/neutrino.c b/src/neutrino/Default/neutrino.c index b6e70a0f6381ec23fb4169d580c73109997f9ced..b51e2521d3838b2c086bb1686e8456da9a09ee02 100644 --- a/src/neutrino/Default/neutrino.c +++ b/src/neutrino/Default/neutrino.c @@ -23,6 +23,103 @@ /* Standard headers */ #include <math.h> +/* Compute the dimensionless neutrino momentum (units of kb*T). + * + * @param v The internal 3-velocity + * @param m_eV The neutrino mass in electron-volts + * @param fac Conversion factor = 1. / (speed_of_light * T_nu_eV) + */ +INLINE static double neutrino_momentum(const float v[3], const double m_eV, + const double fac) { + + float v2 = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; + float vmag = sqrtf(v2); + double p = vmag * fac * m_eV; + return p; +} + +/** + * @brief Gather neutrino constants + * + * @param s The #space for this run. + * @param nm Struct with neutrino constants + */ +void gather_neutrino_consts(const struct space *s, struct neutrino_model *nm) { + nm->use_delta_f_mesh_only = s->e->neutrino_properties->use_delta_f_mesh_only; + nm->M_nu_eV = s->e->cosmology->M_nu_eV; + nm->deg_nu = s->e->cosmology->deg_nu; + nm->N_nu = s->e->cosmology->N_nu; + nm->fac = 1.0 / (s->e->physical_constants->const_speed_light_c * + s->e->cosmology->T_nu_0_eV); + nm->inv_mass_factor = 1. / s->e->neutrino_mass_conversion_factor; + nm->neutrino_seed = s->e->neutrino_properties->neutrino_seed; +} + +/** + * @brief Compute delta-f weight of a neutrino particle, but *only* when using + * the delta-f method exclusively on the mesh (otherwise the mass is already + * weighted). + * + * @param gp The #gpart. + * @param nm Properties of the neutrino model + * @param weight The resulting weight (output) + */ +void gpart_neutrino_weight_mesh_only(const struct gpart *gp, + const struct neutrino_model *nm, + double *weight) { + /* Anything to do? */ + if (!nm->use_delta_f_mesh_only) return; + + /* Use a particle id dependent seed */ + const long long seed = gp->id_or_neg_offset + nm->neutrino_seed; + + /* Compute the initial dimensionless momentum from the seed */ + const double pi = neutrino_seed_to_fermi_dirac(seed); + + /* The neutrino mass and degeneracy (we cycle based on the seed) */ + const double m_eV = neutrino_seed_to_mass(nm->N_nu, nm->M_nu_eV, seed); + + /* Compute the current dimensionless momentum */ + double p = neutrino_momentum(gp->v_full, m_eV, nm->fac); + + /* Compute the initial and current background phase-space density */ + double fi = fermi_dirac_density(pi); + double f = fermi_dirac_density(p); + *weight = 1.0 - f / fi; +} + +/** + * @brief Compute the mass and delta-f weight of a neutrino particle + * + * @param gp The #gpart. + * @param nm Properties of the neutrino model + * @param mass The mass (output) + * @param weight The resulting weight (output) + */ +void gpart_neutrino_mass_weight(const struct gpart *gp, + const struct neutrino_model *nm, double *mass, + double *weight) { + + /* Use a particle id dependent seed */ + const long long seed = gp->id_or_neg_offset + nm->neutrino_seed; + + /* Compute the initial dimensionless momentum from the seed */ + const double pi = neutrino_seed_to_fermi_dirac(seed); + + /* The neutrino mass and degeneracy (we cycle based on the seed) */ + const double m_eV = neutrino_seed_to_mass(nm->N_nu, nm->M_nu_eV, seed); + const double deg = neutrino_seed_to_degeneracy(nm->N_nu, nm->deg_nu, seed); + *mass = deg * m_eV * nm->inv_mass_factor; + + /* Compute the current dimensionless momentum */ + const double p = neutrino_momentum(gp->v_full, m_eV, nm->fac); + + /* Compute the initial and current background phase-space density */ + const double fi = fermi_dirac_density(pi); + const double f = fermi_dirac_density(p); + *weight = 1.0 - f / fi; +} + /** * @brief Compute diagnostics for the neutrino delta-f method, including * the mean squared weight. @@ -42,7 +139,10 @@ void compute_neutrino_diagnostics( const struct neutrino_props *neutrino_properties, const int rank, double *r, double *I_df, double *mass_tot) { - if (!neutrino_properties->use_delta_f) { + int use_df = neutrino_properties->use_delta_f; + int use_df_mesh = neutrino_properties->use_delta_f_mesh_only; + + if (!use_df && !use_df_mesh) { error("Neutrino diagnostics only defined when using the delta-f method."); } @@ -165,19 +265,33 @@ void neutrino_check_cosmology(const struct space *s, const struct neutrino_props *neutrino_props, const int rank, const int verbose) { - /* Check that we are not missing any neutrino particles */ - if (!s->with_neutrinos) { - int use_df = parser_get_opt_param_int(params, "Neutrino:use_delta_f", 0); - int genics = parser_get_opt_param_int(params, "Neutrino:generate_ics", 0); - if (use_df || genics) - error( - "Running without neutrino particles, but specified a neutrino " - "model in the parameter file."); + /* Check that we have neutrino particles if and only if we need them */ + int use_df = neutrino_props->use_delta_f; + int use_df_mesh = neutrino_props->use_delta_f_mesh_only; + int use_linres = neutrino_props->use_linear_response; + int use_none = neutrino_props->use_model_none; + int genics = neutrino_props->generate_ics; + int with_neutrinos = s->with_neutrinos; + + if ((use_df || use_df_mesh || genics) && !with_neutrinos) { + error( + "Running without neutrino particles, but specified a neutrino " + "model that requires them."); + } else if ((use_linres || use_none) && with_neutrinos) { + error( + "Running with neutrino particles, but specified a neutrino " + "model that is incompatible with particles."); + } else if (cosmo->Omega_nu_0 > 0. && !(use_linres || use_none) && + !with_neutrinos) { + error( + "Running without neutrino particles, but specified neutrinos " + "in the background cosmology and not using a neutrino model that does " + "not use particles."); } - /* We are done if the delta-f method is not used, since in that case the - * total mass has already been checked in space_check_cosmology. */ - if (!neutrino_props->use_delta_f) return; + /* We are done if the delta-f method is not used, since the total mass + * has otherwise already been checked in space_check_cosmology. */ + if (!use_df && !use_df_mesh) return; /* Compute neutrino diagnostics, including the total mass */ double r, I_df, total_mass; diff --git a/src/neutrino/Default/neutrino.h b/src/neutrino/Default/neutrino.h index d2fd637943ba80e0a2a83a957528c5b82154e3a5..e728274d6b2af7bd0c187ad93b3d9884c5750cb1 100644 --- a/src/neutrino/Default/neutrino.h +++ b/src/neutrino/Default/neutrino.h @@ -26,11 +26,33 @@ #include "../../engine.h" #include "fermi_dirac.h" #include "neutrino_properties.h" +#include "neutrino_response.h" #include "relativity.h" /* Riemann function zeta(3) */ #define M_ZETA_3 1.2020569031595942853997 +/** + * @brief Shared information for delta-f neutrino weighting of a cell. + */ +struct neutrino_model { + char use_delta_f_mesh_only; + double *M_nu_eV; + double *deg_nu; + int N_nu; + double fac; + double inv_mass_factor; + long long neutrino_seed; +}; + +void gather_neutrino_consts(const struct space *s, struct neutrino_model *nm); +void gpart_neutrino_weight_mesh_only(const struct gpart *gp, + const struct neutrino_model *nm, + double *weight); +void gpart_neutrino_mass_weight(const struct gpart *gp, + const struct neutrino_model *nm, double *mass, + double *weight); + /* Compute the ratio of macro particle mass in internal mass units to * the mass of one microscopic neutrino in eV. * diff --git a/src/neutrino/Default/neutrino_io.h b/src/neutrino/Default/neutrino_io.h index c51220ec0f54f6e2b28db1ca3918643bac4bc00d..312b3a042ef6571847e6511245d712ef15afbb0b 100644 --- a/src/neutrino/Default/neutrino_io.h +++ b/src/neutrino/Default/neutrino_io.h @@ -38,7 +38,8 @@ INLINE static void convert_gpart_vi(const struct engine* e, const struct gpart* gp, float* ret) { /* When we are running with the delta-f method, resample the momentum */ - if (e->neutrino_properties->use_delta_f) { + if (e->neutrino_properties->use_delta_f || + e->neutrino_properties->use_delta_f_mesh_only) { /* Retrieve physical constants, including the neutrino mass array */ const double a_scale = e->cosmology->a; const double N_nu = e->cosmology->N_nu; @@ -82,6 +83,7 @@ INLINE static void convert_gpart_mnu(const struct engine* e, /* Resample if running with the delta-f method or neutrino ic generation */ if (e->neutrino_properties->use_delta_f || + e->neutrino_properties->use_delta_f_mesh_only || e->neutrino_properties->generate_ics) { /* Use a particle id dependent seed (sum of global seed and ID) */ @@ -106,6 +108,34 @@ INLINE static void convert_gpart_mnu(const struct engine* e, ret[0] = micro_mass * eV_mass; } +/** + * @brief Obtain the statistical delta-f weight of a neutrino + * + * @param e The engine of the run + * @param gp The neutrino gpart in question + * @param ret Output + */ +INLINE static void convert_gpart_weight(const struct engine* e, + const struct gpart* gp, double* ret) { + + /* Resample if running with the delta-f method or neutrino ic generation */ + if (e->neutrino_properties->use_delta_f || + e->neutrino_properties->use_delta_f_mesh_only) { + + /* Gather neutrino constants */ + struct neutrino_model nu_model; + gather_neutrino_consts(e->s, &nu_model); + + /* Compute the weight */ + double mass, weight; + gpart_neutrino_mass_weight(gp, &nu_model, &mass, &weight); + + ret[0] = weight; + } else { + ret[0] = 1.0; + } +} + /** * @brief Specifies which particle fields to write to a dataset * @@ -126,7 +156,11 @@ __attribute__((always_inline)) INLINE static int neutrino_write_particles( "MicroscopicMasses", DOUBLE, 1, UNIT_CONV_MASS, 0.f, gparts, convert_gpart_mnu, "Microscopic masses of individual neutrino particles"); - return 2; + list[2] = io_make_output_field_convert_gpart( + "Weights", DOUBLE, 1, UNIT_CONV_NO_UNITS, 0.f, gparts, + convert_gpart_weight, "Statistical weights of neutrino particles"); + + return 3; } #endif /* SWIFT_DEFAULT_NEUTRINO_IO_H */ diff --git a/src/neutrino/Default/neutrino_properties.h b/src/neutrino/Default/neutrino_properties.h index 8dbc4cb568086fea6e8dc6210ff4a276e2fb4416..24add9619555f70aca486b237a8ccd21618ab3a5 100644 --- a/src/neutrino/Default/neutrino_properties.h +++ b/src/neutrino/Default/neutrino_properties.h @@ -29,11 +29,20 @@ struct neutrino_props { /* Whether to run with the delta-f method for neutrino weighting */ char use_delta_f; + /* Whether to run with the delta-f method on the mesh only */ + char use_delta_f_mesh_only; + + /* Whether to run without any neutrino perturbations */ + char use_model_none; + /* Whether to generate random neutrino velocities in the initial conditions */ char generate_ics; /* Random seed for the neutrino weighting task */ long long neutrino_seed; + + /* Whether to use the linear respose method */ + char use_linear_response; }; /** @@ -44,17 +53,35 @@ struct neutrino_props { * @param us The internal unit system. * @param params The parsed parameters. * @param cosmo The cosmological model. + * @param with_neutrinos Are we running with neutrino particles? */ INLINE static void neutrino_props_init(struct neutrino_props *np, const struct phys_const *phys_const, const struct unit_system *us, struct swift_params *params, - const struct cosmology *cosmo) { + const struct cosmology *cosmo, + const int with_neutrinos) { - np->use_delta_f = parser_get_param_int(params, "Neutrino:use_delta_f"); - np->generate_ics = parser_get_param_int(params, "Neutrino:generate_ics"); + np->use_delta_f = parser_get_opt_param_int(params, "Neutrino:use_delta_f", 0); + np->use_delta_f_mesh_only = + parser_get_opt_param_int(params, "Neutrino:use_delta_f_mesh_only", 0); + np->use_model_none = + parser_get_opt_param_int(params, "Neutrino:use_model_none", 0); + np->generate_ics = + parser_get_opt_param_int(params, "Neutrino:generate_ics", 0); np->neutrino_seed = parser_get_opt_param_longlong(params, "Neutrino:neutrino_seed", 0); + np->use_linear_response = + parser_get_opt_param_int(params, "Neutrino:use_linear_response", 0); + + int number_of_models = 0; + number_of_models += np->use_delta_f; + number_of_models += np->use_delta_f_mesh_only; + number_of_models += np->use_model_none; + number_of_models += np->use_linear_response; + + if (number_of_models > 1) + error("Cannot use multiple neutrino implementations concurrently."); } /** diff --git a/src/neutrino/Default/neutrino_response.c b/src/neutrino/Default/neutrino_response.c new file mode 100644 index 0000000000000000000000000000000000000000..d44a9a62c17cf92576026884e519cc70d0f0533c --- /dev/null +++ b/src/neutrino/Default/neutrino_response.c @@ -0,0 +1,635 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Copyright (c) 2021 Willem Elbers (whe@willemelbers.com) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ + +/* Config parameters. */ +#include "../config.h" +#include "parser.h" + +#ifdef HAVE_LIBGSL +#include <gsl/gsl_interp2d.h> +#include <gsl/gsl_math.h> +#include <gsl/gsl_spline2d.h> +#endif + +/* Local headers */ +#include "neutrino.h" + +/*! Number of time steps in the neutrino density interpolation table */ +const hsize_t timestep_length = 10000; +/*! Number of wavenumbers in the neutrino density interpolation table */ +const hsize_t wavenumber_length = 10000; + +/** + * @brief Determine the length of an HDF5 dataset + * + * @param h_file The file object + * @param title The title of the dataset + * @param length (Output) The vector length + */ +void read_vector_length(hid_t h_file, char title[PARSER_MAX_LINE_SIZE], + hsize_t *length) { + + /* Open the dataset */ + hid_t h_data = H5Dopen2(h_file, title, H5P_DEFAULT); + + /* Open the dataspace and fetch the dimensions */ + hid_t h_space = H5Dget_space(h_data); + + /* Fetch the rank and size of the dataset */ + int ndims = H5Sget_simple_extent_ndims(h_space); + hsize_t dims[ndims]; + H5Sget_simple_extent_dims(h_space, dims, NULL); + + /* Close the dataspace */ + H5Sclose(h_space); + + /* Verify that this is a vector and return the length */ + if (ndims != 1 || dims[0] <= 0) error("We expected a non-empty vector."); + length[0] = dims[0]; + + /* Close the dataset */ + H5Dclose(h_data); +} + +/** + * @brief Read transfer funtion from an HDF5 dataset with expected size N_z*N_k + * + * @param h_file The file object + * @param title The title of the dataset + * @param length (Output) Memory fot the transfer function data + * @param N_z Expected number of timesteps + * @param N_k Expected number of wavenumbers + */ +void read_transfer_function(hid_t h_file, char title[PARSER_MAX_LINE_SIZE], + double *dest, hsize_t N_z, hsize_t N_k) { + + /* Open the dataset */ + hid_t h_data = H5Dopen2(h_file, title, H5P_DEFAULT); + + /* Open the dataspace and fetch the dimensions */ + hid_t h_space = H5Dget_space(h_data); + int ndims = H5Sget_simple_extent_ndims(h_space); + hsize_t dims[ndims]; + H5Sget_simple_extent_dims(h_space, dims, NULL); + + /* Close the dataspace */ + H5Sclose(h_space); + + /* Verify the dimensions */ + if (ndims != 2) error("We expected a rank-2 tensor."); + if (dims[0] != N_z) + error("Number of redshifts does not match array dimensions."); + if (dims[1] != N_k) + error("Number of wavenumbers does not match array dimensions."); + + /* Read out the data */ + hid_t h_err = + H5Dread(h_data, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dest); + if (h_err < 0) error("Error reading dataset '%s'.\n", title); + + /* Close the dataset */ + H5Dclose(h_data); +} + +/** + * @brief Initialize the #neutrino_response object by reading an HDF5 file + * with transfer functions and pre-computing a transfer function ratio. + * + * @param numesh The #neutrino_mesh to be initialized + * @param params The parsed parameter file. + * @param us The system of units used internally. + * @param dim Spatial dimensions of the domain. + * @param c The #cosmology used for this run. + * @param np The #neutrino_props used for this run. + * @param gp The #gravity_props used for this run. + * @param verbose Are we talkative ? + */ +void neutrino_response_init(struct neutrino_response *numesh, + struct swift_params *params, + const struct unit_system *us, const double dim[3], + const struct cosmology *c, + const struct neutrino_props *np, + const struct gravity_props *gp, int rank, + int verbose) { + + /* Do we need to do anything? */ + if (!np->use_linear_response) return; + + /* Check that we have a degenerate neutrino mass spectrum */ + if (c->N_nu > 1) + error( + "Non-degenerate neutrino mass spectra not supported with the linear " + "response method."); + +#ifdef HAVE_LIBGSL + + /* Parse file name parameter */ + char filename[PARSER_MAX_LINE_SIZE]; + parser_get_param_string(params, "Neutrino:transfer_functions_filename", + filename); + + /* Titles of the redshift, wavenumber, and transfer functions datasets */ + char z_name[PARSER_MAX_LINE_SIZE]; + char k_name[PARSER_MAX_LINE_SIZE]; + char d_cdm_name[PARSER_MAX_LINE_SIZE]; + char d_b_name[PARSER_MAX_LINE_SIZE]; + char d_ncdm_name[PARSER_MAX_LINE_SIZE]; + parser_get_param_string(params, "Neutrino:dataset_redshifts", z_name); + parser_get_param_string(params, "Neutrino:dataset_wavenumbers", k_name); + parser_get_param_string(params, "Neutrino:dataset_delta_cdm", d_cdm_name); + parser_get_param_string(params, "Neutrino:dataset_delta_baryon", d_b_name); + parser_get_param_string(params, "Neutrino:dataset_delta_nu", d_ncdm_name); + + /* Read additional parameters */ + numesh->fixed_bg_density = + parser_get_param_int(params, "Neutrino:fixed_bg_density"); + + if (rank == 0 && verbose) + message("Reading transfer functions file '%s'", filename); + + /* Open the hdf5 file */ + hid_t h_file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + hid_t h_data, h_err, h_status; + + /* Obtain the number of redshifts and wavenumbers in the file */ + hsize_t N_z; // redshifts + hsize_t N_k; // wavenumbers + read_vector_length(h_file, z_name, &N_z); + read_vector_length(h_file, k_name, &N_k); + + /* The transfer function should be a rank-2 array of size (N_z, N_k) */ + const hsize_t tf_size = N_z * N_k; + + /* Allocate temporary memory for the transfer functions */ + double *delta_cdm; + double *delta_b; + double *delta_ncdm; + double *ncdm_over_cb; + if ((delta_cdm = (double *)swift_malloc("delta_cdm", + sizeof(double) * tf_size)) == NULL) + error("Failed to allocate memory for delta_cdm."); + if ((delta_b = (double *)swift_malloc("delta_b", sizeof(double) * tf_size)) == + NULL) + error("Failed to allocate memory for delta_b."); + if ((delta_ncdm = (double *)swift_malloc("delta_ncdm", + sizeof(double) * tf_size)) == NULL) + error("Failed to allocate memory for delta_ncdm."); + if ((ncdm_over_cb = (double *)swift_malloc("ncdm_over_cb", + sizeof(double) * tf_size)) == NULL) + error("Failed to allocate memory for ncdm_over_cb."); + + /* Read the necessary transfer functions */ + read_transfer_function(h_file, d_cdm_name, delta_cdm, N_z, N_k); + read_transfer_function(h_file, d_b_name, delta_b, N_z, N_k); + read_transfer_function(h_file, d_ncdm_name, delta_ncdm, N_z, N_k); + + /* Compute the background mass ratio of cdm to cb (= cdm + baryon) */ + const double f_cdm = c->Omega_cdm / (c->Omega_cdm + c->Omega_b); + + /* Compute the transfer function ratio */ + for (hsize_t i = 0; i < N_z; i++) { + for (hsize_t j = 0; j < N_k; j++) { + /* Fetch the data */ + double d_cdm = delta_cdm[N_k * i + j]; + double d_b = delta_b[N_k * i + j]; + double d_ncdm = delta_ncdm[N_k * i + j]; + + /* Weighted baryon-cdm density */ + double d_cb = f_cdm * d_cdm + (1.0 - f_cdm) * d_b; + + /* Store the ratio */ + ncdm_over_cb[N_k * i + j] = d_ncdm / d_cb; + } + } + + /* Free temporary transfer function memory */ + swift_free("delta_cdm", delta_cdm); + swift_free("delta_b", delta_b); + swift_free("delta_ncdm", delta_ncdm); + + /* The length unit used by the transfer function file */ + double UnitLengthCGS; + + /* Check if a Units group exists */ + h_status = H5Eset_auto1(NULL, NULL); // turn off error printing + h_status = H5Gget_objinfo(h_file, "/Units", 0, NULL); + + /* If the group exists */ + if (h_status == 0) { + hid_t h_attr, h_grp; + + /* Open the Units group */ + h_grp = H5Gopen(h_file, "Units", H5P_DEFAULT); + + /* Read the length unit in CGS units */ + h_attr = H5Aopen(h_grp, "Unit length in cgs (U_L)", H5P_DEFAULT); + h_err = H5Aread(h_attr, H5T_NATIVE_DOUBLE, &UnitLengthCGS); + H5Aclose(h_attr); + + /* Close the Units group */ + H5Gclose(h_grp); + } else { + /* Assume the internal unit system */ + UnitLengthCGS = us->UnitLength_in_cgs; + } + + /* Determine the range of wavenumbers needed for this simulation */ + const double boxlen = dim[0]; + const int mesh_size = gp->mesh_size; + const double k_margin = 1.1; // safety margin around the edges + const double k_min = 2.0 * M_PI / boxlen / k_margin; + const double k_max = sqrt(3.0) * k_min * mesh_size * k_margin * k_margin; + const double k_unit = UnitLengthCGS / us->UnitLength_in_cgs; + + /* Determine the range of redshifts needed for this simulation */ + const double a_min = c->a_begin; + const double a_max = c->a_end; + const double z_max = 1.0 / a_min - 1.0; + const double z_min = 1.0 / a_max - 1.0; + + /* Allocate temporary memory for the redshifts and wavenumbers */ + double *redshifts; + double *wavenumbers; + if ((redshifts = (double *)swift_malloc("redshifts", sizeof(double) * N_z)) == + NULL) + error("Failed to allocate memory for redshifts."); + if ((wavenumbers = + (double *)swift_malloc("wavenumbers", sizeof(double) * N_k)) == NULL) + error("Failed to allocate memory for wavenumbers."); + + /* Open the redshifts dataset, read the data, then close it */ + h_data = H5Dopen2(h_file, z_name, H5P_DEFAULT); + h_err = H5Dread(h_data, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + redshifts); + if (h_err < 0) error("Error reading dataset '%s'.\n", z_name); + H5Dclose(h_data); + + /* Open the wavenumbers dataset, read the data, then close it */ + h_data = H5Dopen2(h_file, k_name, H5P_DEFAULT); + h_err = H5Dread(h_data, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, + wavenumbers); + if (h_err < 0) error("Error reading dataset '%s'.\n", k_name); + H5Dclose(h_data); + + /* Ensure that the data is ascending in time and wavenumber */ + for (hsize_t i = 1; i < N_z; i++) + if (redshifts[i] > redshifts[i - 1]) error("Redshifts not descending."); + for (hsize_t i = 1; i < N_k; i++) + if (wavenumbers[i] < wavenumbers[i - 1]) + error("Wavenumbers not ascending."); + + /* Ensure that we have data covering the required domain */ + if (redshifts[0] < z_max || redshifts[N_z - 1] > z_min) + error("Redshifts do not cover the interval (%g, %g)", z_min, z_max); + if (wavenumbers[0] * k_unit > k_min || wavenumbers[N_k - 1] * k_unit < k_max) + error("Wavenumbers do not cover the interval (%g, %g)", k_min, k_max); + + if (rank == 0 && verbose) { + message("We have transfer functions covering the domain:"); + message("(k_min, k_max) = (%g, %g) U_L^-1", k_min, k_max); + message("(a_min, a_max) = (%g, %g)", a_min, a_max); + } + + /* We will remap the data such that it just covers the required domain + * with a constant log spacing, allowing for faster interpolation. + * We only use the slower GSL interpolation for the remapping. */ + + /* Determine the dimensions of the remapped data */ + const hsize_t remap_tf_size = timestep_length * wavenumber_length; + + /* Determine the constant log spacing and bounding values */ + const double log_a_min = log(a_min); + const double log_a_max = log(a_max); + const double log_k_min = log(k_min); + const double log_k_max = log(k_max); + const double delta_log_a = (log_a_max - log_a_min) / timestep_length; + const double delta_log_k = (log_k_max - log_k_min) / wavenumber_length; + + /* Store the remapped bounding values and log spacing */ + numesh->log_a_min = log_a_min; + numesh->log_a_max = log_a_max; + numesh->log_k_min = log_k_min; + numesh->log_k_max = log_k_max; + numesh->delta_log_a = delta_log_a; + numesh->delta_log_k = delta_log_k; + + /* Allocate temporary memory for the log of scale factors and wavenumbers */ + double *log_scale_factors; + double *log_wavenumbers; + if ((log_scale_factors = (double *)swift_malloc( + "log_scale_factors", sizeof(double) * N_z)) == NULL) + error("Failed to allocate memory for log_scale_factors."); + if ((log_wavenumbers = (double *)swift_malloc("log_wavenumbers", + sizeof(double) * N_k)) == NULL) + error("Failed to allocate memory for log_wavenumbers."); + + /* Convert units and compute logarithms */ + for (hsize_t i = 0; i < N_z; i++) { + log_scale_factors[i] = -log(1.0 + redshifts[i]); + } + for (hsize_t i = 0; i < N_k; i++) { + log_wavenumbers[i] = log(wavenumbers[i] * k_unit); + } + + /* Free the temporary buffers for the original redshifts and wavenumbers */ + swift_free("redshifts", redshifts); + swift_free("wavenumbers", wavenumbers); + + /* Initialize GSL interpolation */ + /* NB: GSL uses column major indices, but we use row major. This is why we + * feed the transpose into spline2d_init. */ + const gsl_interp2d_type *T = gsl_interp2d_bilinear; + gsl_spline2d *spline = gsl_spline2d_alloc(T, N_k, N_z); + gsl_interp_accel *a_acc = gsl_interp_accel_alloc(); + gsl_interp_accel *k_acc = gsl_interp_accel_alloc(); + gsl_spline2d_init(spline, log_wavenumbers, log_scale_factors, ncdm_over_cb, + N_k, N_z); + + /* Allocate memory for the transfer function ratio with constant spacing */ + if ((numesh->pt_density_ratio = (double *)swift_malloc( + "numesh.pt_density_ratio", sizeof(double) * remap_tf_size)) == NULL) + error("Failed to allocate memory for numesh.pt_density_ratio."); + + /* Remap the transfer function ratio */ + for (hsize_t i = 0; i < timestep_length; i++) { + for (hsize_t j = 0; j < wavenumber_length; j++) { + double log_a = log_a_min + i * delta_log_a; + double log_k = log_k_min + j * delta_log_k; + numesh->pt_density_ratio[wavenumber_length * i + j] = + gsl_spline2d_eval(spline, log_k, log_a, k_acc, a_acc); + } + } + + /* Store the array size */ + numesh->tf_size = wavenumber_length * timestep_length; + + /* Clean up GSL interpolation */ + gsl_spline2d_free(spline); + gsl_interp_accel_free(a_acc); + gsl_interp_accel_free(k_acc); + + /* Free the temporary buffers */ + swift_free("log_scale_factors", log_scale_factors); + swift_free("log_wavenumbers", log_wavenumbers); + swift_free("ncdm_over_cb", ncdm_over_cb); +#else + error("No GSL library found. Cannot remap the transfer functions."); +#endif +} + +void neutrino_response_clean(struct neutrino_response *numesh) { + swift_free("numesh.pt_density_ratio", numesh->pt_density_ratio); +} + +/** + * @brief Shared information about the neutrino response used by the threads. + */ +struct neutrino_response_tp_data { + + /* Mesh properties */ + int N; + fftw_complex *frho; + double boxlen; + int slice_offset; + int slice_width; + + /* Interpolation properties */ + double inv_delta_log_k; + double log_k_min; + int a_index; + double u_a; + + /* Background and perturbed density ratios */ + double bg_density_ratio; + const double *pt_density_ratio; +}; + +/** + * @brief Mapper function for the application of the linear neutrino response. + * + * @param map_data The array of the density field Fourier transform. + * @param num The number of elements to iterate on (along the x-axis). + * @param extra The properties of the neutrino mesh. + */ +void neutrino_response_apply_neutrino_response_mapper(void *map_data, + const int num, + void *extra) { + + struct neutrino_response_tp_data *data = + (struct neutrino_response_tp_data *)extra; + + /* Unpack the mesh properties */ + fftw_complex *const frho = data->frho; + const int N = data->N; + const int N_half = N / 2; + const double delta_k = 2.0 * M_PI / data->boxlen; + + /* Unpack the interpolation properties */ + const hsize_t a_index = data->a_index; + const double u_a = data->u_a; + const double inv_delta_log_k = data->inv_delta_log_k; + const double log_k_min = data->log_k_min; + const int N_k = wavenumber_length; + + /* Unpack the density ratios (background & perturbation) */ + const double bg_density_ratio = data->bg_density_ratio; + const double *pt_density_ratio = data->pt_density_ratio; + + /* Find what slice of the full mesh is stored on this MPI rank */ + const int slice_offset = data->slice_offset; + + /* Range of x coordinates in the full mesh handled by this call */ + const int x_start = ((fftw_complex *)map_data - frho) + slice_offset; + const int x_end = x_start + num; + + /* Loop over the x range corresponding to this thread */ + for (int x = x_start; x < x_end; x++) { + for (int y = 0; y < N; y++) { + for (int z = 0; z < N_half + 1; z++) { + + /* Compute the wavevector (U_L^-1) */ + const double k_x = (x > N_half) ? (x - N) * delta_k : x * delta_k; + const double k_y = (y > N_half) ? (y - N) * delta_k : y * delta_k; + const double k_z = (z > N_half) ? (z - N) * delta_k : z * delta_k; + const double k2 = k_x * k_x + k_y * k_y + k_z * k_z; + + /* Skip the DC mode */ + if (k2 == 0.) continue; + + /* Interpolate along the k-axis */ + const double log_k = 0.5 * log(k2); + const double log_k_steps = (log_k - log_k_min) * inv_delta_log_k; + const hsize_t k_index = (hsize_t)log_k_steps; + const double u_k = log_k_steps - k_index; + + /* Retrieve the bounding values */ + const double T11 = pt_density_ratio[N_k * a_index + k_index]; + const double T21 = pt_density_ratio[N_k * a_index + k_index + 1]; + const double T12 = pt_density_ratio[N_k * (a_index + 1) + k_index]; + const double T22 = pt_density_ratio[N_k * (a_index + 1) + k_index + 1]; + + /* Bilinear interpolation of the tranfer function ratio */ + double pt_ratio_interp = (1.0 - u_a) * ((1.0 - u_k) * T11 + u_k * T21) + + u_a * ((1.0 - u_k) * T12 + u_k * T22); + double correction = 1.0 + pt_ratio_interp * bg_density_ratio; + +#ifdef SWIFT_DEBUG_CHECKS + if (u_k < 0 || u_a < 0 || u_k > 1 || u_a > 1 || + k_index > wavenumber_length || a_index > timestep_length) + error("Interpolation out of bounds error: %g %g %g %g %llu %llu\n", + u_k, u_a, sqrt(k2), pt_ratio_interp, k_index, a_index); +#endif + + /* Apply to the mesh */ + const int index = + N * (N_half + 1) * (x - slice_offset) + (N_half + 1) * y + z; + frho[index][0] *= correction; + frho[index][1] *= correction; + } + } + } +} + +/** + * @brief Apply the linear neutrino response to the Fourier transform of the + * gravitational potential. + * + * @param s The current #space + * @param mesh The #pm_mesh used to store the potential + * @param tp The #threadpool object used for parallelisation + * @param frho The NxNx(N/2) complex array of the Fourier transform of the + * density field + * @param slice_offset The x coordinate of the start of the slice on this MPI + * rank + * @param slice_width The width of the local slice on this MPI rank + * @param verbose Are we talkative? + */ +void neutrino_response_compute(const struct space *s, struct pm_mesh *mesh, + struct threadpool *tp, fftw_complex *frho, + const int slice_offset, const int slice_width, + int verbose) { +#ifdef HAVE_FFTW + + const struct cosmology *c = s->e->cosmology; + struct neutrino_response *numesh = s->e->neutrino_response; + + /* Grid size */ + const int N = mesh->N; + const double boxlen = mesh->dim[0]; + + /* Calculate the background neutrino density */ + const double a = numesh->fixed_bg_density ? 1.0 : c->a; + const double Omega_nu = cosmology_get_neutrino_density(c, a); + const double Omega_m = c->Omega_cdm + c->Omega_b; // does not include nu's + /* The comoving density is (Omega_nu * a^-4) * a^3 = Omega_nu / a */ + const double bg_density_ratio = (Omega_nu / a) / Omega_m; + + /* Transfer function bounds and spacing */ + const double inv_delta_log_a = 1.0 / numesh->delta_log_a; + const double inv_delta_log_k = 1.0 / numesh->delta_log_k; + const double log_a_min = numesh->log_a_min; + const double log_k_min = numesh->log_k_min; + + /* Interpolate along the a-axis */ + const double log_a = log(c->a); + const double log_a_steps = (log_a - log_a_min) * inv_delta_log_a; + hsize_t a_index = (hsize_t)log_a_steps; + double u_a = log_a_steps - a_index; + + /* Perform bounds checks for a */ + if (log_a_steps < 0) { + a_index = 0; + u_a = 0; + } else if (a_index > timestep_length - 2) { + a_index = timestep_length - 2; + u_a = 1.0; + } + + /* Some common factors */ + struct neutrino_response_tp_data data; + data.frho = frho; + data.N = N; + data.boxlen = boxlen; + data.slice_offset = slice_offset; + data.slice_width = slice_width; + data.inv_delta_log_k = inv_delta_log_k; + data.log_k_min = log_k_min; + data.a_index = a_index; + data.u_a = u_a; + data.bg_density_ratio = bg_density_ratio; + data.pt_density_ratio = numesh->pt_density_ratio; + + /* Parallelize the neutrino linear response application using the threadpool + to split the x-axis loop over the threads. The array is N x N x (N/2). + We use the thread to each deal with a range [i_min, i_max[ x N x (N/2) */ + threadpool_map(tp, neutrino_response_apply_neutrino_response_mapper, frho, + slice_width, sizeof(fftw_complex), threadpool_auto_chunk_size, + &data); + + /* Correct singularity at (0,0,0) */ + if (slice_offset == 0 && slice_width > 0) { + frho[0][0] = 0.; + frho[0][1] = 0.; + } + +#else + error("No FFTW library found. Cannot compute periodic long-range forces."); +#endif +} + +/** + * @brief Write a neutrino response struct to the given FILE as a stream of + * bytes. + * + * @param numesh the struct + * @param stream the file stream + */ +void neutrino_response_struct_dump(const struct neutrino_response *numesh, + FILE *stream) { + restart_write_blocks((void *)numesh, sizeof(struct neutrino_response), 1, + stream, "numesh", "neutrino mesh"); + + /* Store the perturbation data */ + if (numesh->tf_size > 0) { + restart_write_blocks((double *)numesh->pt_density_ratio, sizeof(double), + numesh->tf_size, stream, "pt_density_ratio", + "pt_density_ratio"); + } +} + +/** + * @brief Restore a neutrino response struct from the given FILE as a stream + * of bytes. + * + * @param numesh the struct + * @param stream the file stream + */ +void neutrino_response_struct_restore(struct neutrino_response *numesh, + FILE *stream) { + restart_read_blocks((void *)numesh, sizeof(struct neutrino_response), 1, + stream, NULL, "neutrino mesh"); + + /* Restore the perturbation data */ + if (numesh->tf_size > 0) { + numesh->pt_density_ratio = (double *)swift_malloc( + "numesh.pt_density_ratio", numesh->tf_size * sizeof(double)); + restart_read_blocks((double *)numesh->pt_density_ratio, sizeof(double), + numesh->tf_size, stream, NULL, "pt_density_ratio"); + } +} \ No newline at end of file diff --git a/src/neutrino/Default/neutrino_response.h b/src/neutrino/Default/neutrino_response.h new file mode 100644 index 0000000000000000000000000000000000000000..161fc0e7db423c669364650627cfe60a6ba00109 --- /dev/null +++ b/src/neutrino/Default/neutrino_response.h @@ -0,0 +1,82 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Copyright (c) 2021 Willem Elbers (whe@willemelbers.com) + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ + +#ifndef SWIFT_DEFAULT_NEUTRINO_RESPONSE_H +#define SWIFT_DEFAULT_NEUTRINO_RESPONSE_H + +#ifdef HAVE_FFTW +#include <fftw3.h> +#endif + +#include "cosmology.h" +#include "neutrino_properties.h" +#include "physical_constants.h" +#include "units.h" + +/** + * @brief Structure for handling the linear neutrino response on the mesh + */ +struct neutrino_response { + + /*! Logarithm of minimum scale factor for which we have transfer functions */ + double log_a_min; + + /*! Logarithm of maximum scale factor for which we have transfer functions */ + double log_a_max; + + /*! Logarithm of minimum wavenumber for which we have transfer functions */ + double log_k_min; + + /*! Logarithm of maximum wavenumber for which we have transfer functions */ + double log_k_max; + + /*! Spacing of log scale factor for transfer function interpolation */ + double delta_log_a; + + /*! Spacing of log wavenumber for transfer function interpolation */ + double delta_log_k; + + /*! Table of the neutrino overdensity to cdm & baryon overdensity ratio */ + double *pt_density_ratio; + + /*! Size of the transfer function ratio table */ + hsize_t tf_size; + + /*! Whether to use a fixed present-day background density */ + char fixed_bg_density; +}; + +void neutrino_response_init(struct neutrino_response *numesh, + struct swift_params *params, + const struct unit_system *us, const double dim[3], + const struct cosmology *c, + const struct neutrino_props *np, + const struct gravity_props *gp, int rank, + int verbose); +void neutrino_response_clean(struct neutrino_response *numesh); +void neutrino_response_compute(const struct space *s, struct pm_mesh *mesh, + struct threadpool *tp, fftw_complex *frho, + const int slice_offset, const int slice_width, + int verbose); +void neutrino_response_struct_dump(const struct neutrino_response *numesh, + FILE *stream); +void neutrino_response_struct_restore(struct neutrino_response *numesh, + FILE *stream); + +#endif /* SWIFT_DEFAULT_NEUTRINO_RESPONSE_H */ diff --git a/src/runner_neutrino.c b/src/runner_neutrino.c index 14de108e50acc2c822614561ed8f65786f4690be..a771cfc115047536e23dbd99838fb83a77fd9168 100644 --- a/src/runner_neutrino.c +++ b/src/runner_neutrino.c @@ -33,20 +33,6 @@ #include "engine.h" #include "timers.h" -/* Compute the dimensionless neutrino momentum (units of kb*T). - * - * @param v The internal 3-velocity - * @param m_eV The neutrino mass in electron-volts - * @param fac Conversion factor = 1. / (speed_of_light * T_nu_eV) - */ -INLINE static double neutrino_momentum(float v[3], double m_eV, double fac) { - - float v2 = v[0] * v[0] + v[1] * v[1] + v[2] * v[2]; - float vmag = sqrtf(v2); - double p = vmag * fac * m_eV; - return p; -} - /** * @brief Weight the active neutrino particles in a cell using the delta-f * method. @@ -71,14 +57,8 @@ void runner_do_neutrino_weighting(struct runner *r, struct cell *c, int timer) { error("Phase space weighting without cosmology not implemented."); /* Retrieve physical and cosmological constants */ - const double c_vel = e->physical_constants->const_speed_light_c; - const double *m_eV_array = e->cosmology->M_nu_eV; - const double *deg_array = e->cosmology->deg_nu; - const int N_nu = e->cosmology->N_nu; - const double T_eV = e->cosmology->T_nu_0_eV; - const double fac = 1.0 / (c_vel * T_eV); - const double inv_mass_factor = 1. / e->neutrino_mass_conversion_factor; - const long long neutrino_seed = e->neutrino_properties->neutrino_seed; + struct neutrino_model nu_model; + gather_neutrino_consts(e->s, &nu_model); /* Recurse? */ if (c->split) { @@ -95,24 +75,9 @@ void runner_do_neutrino_weighting(struct runner *r, struct cell *c, int timer) { if (!(gp->type == swift_type_neutrino && gpart_is_starting(gp, e))) continue; - /* Use a particle id dependent seed */ - const long long seed = gp->id_or_neg_offset + neutrino_seed; - - /* Compute the initial dimensionless momentum from the seed */ - const double pi = neutrino_seed_to_fermi_dirac(seed); - - /* The neutrino mass and degeneracy (we cycle based on the seed) */ - const double m_eV = neutrino_seed_to_mass(N_nu, m_eV_array, seed); - const double deg = neutrino_seed_to_degeneracy(N_nu, deg_array, seed); - const double mass = deg * m_eV * inv_mass_factor; - - /* Compute the current dimensionless momentum */ - double p = neutrino_momentum(gp->v_full, m_eV, fac); - - /* Compute the initial and current background phase-space density */ - double fi = fermi_dirac_density(pi); - double f = fermi_dirac_density(p); - double weight = 1.0 - f / fi; + /* Compute the mass and delta-f weight */ + double mass, weight; + gpart_neutrino_mass_weight(gp, &nu_model, &mass, &weight); /* Set the statistically weighted mass */ gp->mass = mass * weight; diff --git a/src/space.c b/src/space.c index cece68be2ca0124e98505a84bfcbf3f4bcc6fe34..b9395ce1052cb24f62299dfcc5ba6a6ecc489229 100644 --- a/src/space.c +++ b/src/space.c @@ -49,7 +49,6 @@ #include "kernel_hydro.h" #include "lock.h" #include "minmax.h" -#include "neutrino/Default/fermi_dirac.h" #include "proxy.h" #include "restart.h" #include "rt.h" @@ -1943,6 +1942,7 @@ void space_generate_gas(struct space *s, const struct cosmology *cosmo, * @param cosmo The current cosmology model. * @param with_hydro Are we running with hydro switched on? * @param rank The MPI rank of this #space. + * @param check_neutrinos Should neutrino masses be checked? */ void space_check_cosmology(struct space *s, const struct cosmology *cosmo, const int with_hydro, const int rank, diff --git a/tools/create_perturb_file.py b/tools/create_perturb_file.py new file mode 100755 index 0000000000000000000000000000000000000000..52f32ca1235eff5a263593aba0109d2c85c84f2a --- /dev/null +++ b/tools/create_perturb_file.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# Utility to create an HDF5 file with cosmological transfer functions using +# CLASS (python package classy) + +import numpy as np +import h5py +from classy import Class + +# Name of the perturbations file to be create +fname = "perturbations.hdf5" + +# Open the file +f = h5py.File("perturbations.hdf5", mode="w") + +# Cosmological parameters +h = 0.681 +Omega_b = 0.0486 +Omega_cdm = 0.2560110606 +A_s = 2.0993736148e-09 +n_s = 0.967 + +# Neutrino and radiation parameters +T_cmb = 2.7255 +T_ncdm = 0.71611 +N_ur = 2.0308 +N_ncdm = 1 +deg_ncdm = [1] +m_ncdm = [0.06] + +# Maximum wavenumber and redshift +kmax = 30.0 +zmax = 1e3 +amin = 1.0 / (zmax + 1) + +# CLASS output distance unit +Mpc_cgs = 3.085677581282e24 + +# CLASS parameters +params = { + "h": h, + "Omega_b": Omega_b, + "Omega_cdm": Omega_cdm, + "T_cmb": T_cmb, + "N_ncdm": N_ncdm, + "N_ur": N_ur, + "T_ncdm": T_ncdm, + "deg_ncdm": "".join(str(x) + "," for x in deg_ncdm)[:-1], + "m_ncdm": "".join(str(x) + "," for x in m_ncdm)[:-1], + "A_s": A_s, + "n_s": n_s, + "output": "dTk, vTk", + "z_max_pk": zmax, + "P_k_max_1/Mpc": kmax, + "reio_parametrization": "reio_none", + "YHe": "BBN", + "k_output_values": kmax, + "k_per_decade_for_pk": 100, +} + +print("Running CLASS") + +# Run CLASS +model = Class() +model.set(params) +model.compute() + +# Extract wavenumbers and prepare redshifts +k = model.get_transfer(0)["k (h/Mpc)"] * h +a = np.exp(np.arange(0, np.log(amin), -0.01))[::-1] +z = 1.0 / a - 1.0 +nk = len(k) +nz = len(z) + +print("We have", nk, "wavenumbers and", nz, "redshifts") + +keys = model.get_transfer(0).keys() + +print("Available transfer functions:") +print(keys) + +# Prepare dictionary +pt = {} +for key in keys: + pt[key] = np.zeros((nz, nk)) + +# Extract transfer functions +for i in range(nz): + pti = model.get_transfer(z[i]) + for key in pti: + pt[key][i, :] = pti[key] + +# Export the perturbations file +f.create_group("Functions") +f["Redshifts"] = z +f["Wavenumbers"] = k +f.create_group("Units") +f["Units"].attrs["Unit length in cgs (U_L)"] = Mpc_cgs + +# Write the perturbations +for key in keys: + f["Functions/" + key.replace("/", "\\")] = pt[key] + +# Close the file +f.close() + +print("Done.") diff --git a/tools/spawn_neutrinos.py b/tools/spawn_neutrinos.py new file mode 100755 index 0000000000000000000000000000000000000000..4f454a0acf8ed46d3f86a8774964f23f2bda70c3 --- /dev/null +++ b/tools/spawn_neutrinos.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 + +# Utility to spawn placeholder neutrinos in an existing initial conditions file. +# The neutrinos will be distributed uniformly in space and with with zero mass +# and velocity. + +import h5py +import numpy as np +import sys + +# This script does not need to be changed for different particle numbers. +# Usage: ./spawn_neutrinos.py filename + +# Constants +Mpc_cgs = 3.08567758e24 +Default_N_nu_per100Mpc = 72 # 72^3 neutrinos for a (100 Mpc)^3 box +Default_nr_neutrinos_per_Mpc3 = (Default_N_nu_per100Mpc / 100.0) ** 3 + +# Read command line arguments +if len(sys.argv) <= 1 or sys.argv[1] == "--help" or sys.argv[1] == "-h": + print("Usage: ./spawn_neutrinos.py filename (use -h to show this message)") + exit(0) + +# Open the hdf5 file +fname = sys.argv[1] +f = h5py.File(fname, "r+") +print("Operating on '" + fname + "'") +print("") + +# Check the unit system +if "Units" in f.keys() and "Unit length in cgs (U_L)" in f["Units"].attrs.keys(): + Length_Unit = f["Units"].attrs["Unit length in cgs (U_L)"] / Mpc_cgs # Mpc +else: + Length_Unit = 1.0 # Mpc + +# Extract the box dimensions and volume +L = f["Header"].attrs["BoxSize"] / Length_Unit # Mpc +V = L ** 3 if np.isscalar(L) else np.product(L) # Mpc^3 +if not np.isscalar(L) and len(L) != 3: + raise ValueError("Box dimensions are not cubic") + +# Check that the file does not have any neutrinos +nparts = f["Header"].attrs["NumPart_Total"] +while len(nparts) < 6: + nparts = np.append(nparts, 0) +if nparts[6] != 0 or "PartType6" in f.keys(): + raise IOError("This file already has neutrinos.") + +# Compute the default number of neutrinos (round to nearest cubic number) +Default_N_nu = round((Default_nr_neutrinos_per_Mpc3 * V) ** (1.0 / 3.0)) +Default_Nr_neutrinos = int(Default_N_nu ** 3) + +print("The box dimensions are " + str(L) + " Mpc.") +print( + "The default number of neutrinos is " + + "%g" % Default_N_nu_per100Mpc + + "^3 per (100 Mpc)^3." +) +print( + "The default number of neutrinos is " + + "%g" % Default_N_nu + + "^3 = " + + str(Default_Nr_neutrinos) + + "." +) +print("") + +# Request the number of neutrino particles to be spawned (with default option) +Nr_neutrinos = int( + input( + "Enter the number of neutrinos (default " + "%d" % Default_Nr_neutrinos + "): " + ) + or "%d" % Default_Nr_neutrinos +) + +nparts[6] = Nr_neutrinos + +print("") +print("The number of particles per type will be:") +print("{:25s}: {:12d}".format("Gas", nparts[0])) +print("{:25s}: {:12d}".format("Dark Matter", nparts[1])) +print("{:25s}: {:12d}".format("Background Dark Matter", nparts[2])) +print("{:25s}: {:12d}".format("Sinks", nparts[3])) +print("{:25s}: {:12d}".format("Stars", nparts[4])) +print("{:25s}: {:12d}".format("Black Holes", nparts[5])) +print("{:25s}: {:12d}".format("Neutrinos", nparts[6])) +print("") + +firstID = int(nparts[0:6].sum() + 1) +print("The first particle ID of the first neutrino will be: " + str(firstID)) +print("") + +confirm = input("Enter y to confirm: ") +if confirm != "y": + print("Not confirmed. Done for now.") + exit(0) + +print("") +print("Generating particle data...") + +# Generate particle data +x = np.random.uniform(0, L, (Nr_neutrinos, 3)) * Length_Unit +v = np.zeros((Nr_neutrinos, 3)) +m = np.zeros(Nr_neutrinos) +ids = np.arange(firstID, firstID + Nr_neutrinos) + +print("Writing particle data...") + +# Store the particle data +f.create_group("PartType6") +f["PartType6/Coordinates"] = x +f["PartType6/Velocities"] = v +f["PartType6/Masses"] = m +f["PartType6/ParticleIDs"] = ids + +# Update the header +f["Header"].attrs["NumPart_Total"] = nparts + +print("All done.") + +# And close +f.close()