Commit 3e46f750 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Added a command-line option '-d' to execute a dry run which read the parameter...

Added a command-line option '-d' to execute a dry run which read the parameter file and opens the ICs to check whether everything is OK.
parent 91a413a2
......@@ -50,6 +50,15 @@ void print_help_message() {
printf("\nUsage: swift [OPTION] PARAMFILE\n\n");
printf("Valid options are:\n");
printf(" %2s %8s %s\n", "-c", "", "Run with cosmological time integration");
printf(
" %2s %8s %s\n", "-d", "",
"Dry run. Read the parameter file, allocate memory but does not read ");
printf(
" %2s %8s %s\n", "", "",
"the particles from ICs and exit before the start of time integration.");
printf(" %2s %8s %s\n", "", "",
"Allows user to check validy of parameter and IC files as well as "
"memory limits.");
printf(" %2s %8s %s\n", "-e", "",
"Enable floating-point exceptions (debugging mode)");
printf(" %2s %8s %s\n", "-f", "{int}",
......@@ -59,12 +68,14 @@ void print_help_message() {
printf(" %2s %8s %s\n", "-G", "", "Run with self-gravity");
printf(" %2s %8s %s\n", "-s", "", "Run with SPH");
printf(" %2s %8s %s\n", "-v", "[12]",
"Increase the level of verbosity 1: MPI-rank 0 writes "
"2: All MPI-ranks write");
"Increase the level of verbosity 1: MPI-rank 0 writes ");
printf(" %2s %8s %s\n", "", "", "2: All MPI-ranks write");
printf(" %2s %8s %s\n", "-y", "{int}",
"Time-step frequency at which task graphs are dumped");
printf(" %2s %8s %s\n", "-h", "", "Print this help message and exit");
printf("\nSee the file example.yml for an example of parameter file.\n");
printf(
"\nSee the file parameter_example.yml for an example of "
"parameter file.\n");
}
/**
......@@ -122,6 +133,7 @@ int main(int argc, char *argv[]) {
/* Welcome to SWIFT, you made the right choice */
if (myrank == 0) greetings();
int dry_run = 0;
int dump_tasks = 0;
int with_cosmology = 0;
int with_external_gravity = 0;
......@@ -134,10 +146,13 @@ int main(int argc, char *argv[]) {
/* Parse the parameters */
int c;
while ((c = getopt(argc, argv, "cef:gGhsv:y")) != -1) switch (c) {
while ((c = getopt(argc, argv, "cdef:gGhsv:y")) != -1) switch (c) {
case 'c':
with_cosmology = 1;
break;
case 'd':
dry_run = 1;
break;
case 'e':
with_fp_exceptions = 1;
break;
......@@ -145,7 +160,7 @@ int main(int argc, char *argv[]) {
if (sscanf(optarg, "%llu", &cpufreq) != 1) {
if (myrank == 0) printf("Error parsing CPU frequency (-f).\n");
if (myrank == 0) print_help_message();
exit(1);
return 1;
}
break;
case 'g':
......@@ -156,7 +171,7 @@ int main(int argc, char *argv[]) {
break;
case 'h':
if (myrank == 0) print_help_message();
exit(0);
return 0;
case 's':
with_hydro = 1;
break;
......@@ -164,19 +179,19 @@ int main(int argc, char *argv[]) {
if (sscanf(optarg, "%d", &verbose) != 1) {
if (myrank == 0) printf("Error parsing verbosity level (-v).\n");
if (myrank == 0) print_help_message();
exit(1);
return 1;
}
break;
case 'y':
if (sscanf(optarg, "%d", &dump_tasks) != 1) {
if (myrank == 0) printf("Error parsing dump_tasks (-y). \n");
if (myrank == 0) print_help_message();
exit(1);
return 1;
}
break;
case '?':
if (myrank == 0) print_help_message();
exit(1);
return 1;
break;
}
if (optind == argc - 1) {
......@@ -195,6 +210,10 @@ int main(int argc, char *argv[]) {
/* Genesis 1.1: And then, there was time ! */
clocks_set_cpufreq(cpufreq);
if (myrank == 0 && dry_run)
message(
"Executing a dry run. No i/o or time integration will be performed.");
/* Report CPU frequency. */
if (myrank == 0) {
cpufreq = clocks_get_cpufreq();
......@@ -261,6 +280,7 @@ int main(int argc, char *argv[]) {
/* Read particles and space information from (GADGET) ICs */
char ICfileName[200] = "";
parser_get_param_string(&params, "InitialConditions:file_name", ICfileName);
if (myrank == 0) message("Reading ICs from file '%s'", ICfileName);
struct part *parts = NULL;
struct gpart *gparts = NULL;
size_t Ngas = 0, Ngpart = 0;
......@@ -270,13 +290,14 @@ int main(int argc, char *argv[]) {
#if defined(WITH_MPI)
#if defined(HAVE_PARALLEL_HDF5)
read_ic_parallel(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL, dry_run);
#else
read_ic_serial(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL, dry_run);
#endif
#else
read_ic_single(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic);
read_ic_single(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
dry_run);
#endif
if (myrank == 0) {
clocks_gettime(&toc);
......@@ -310,7 +331,8 @@ int main(int argc, char *argv[]) {
/* Initialize the space with these data. */
if (myrank == 0) clocks_gettime(&tic);
struct space s;
space_init(&s, &params, dim, parts, gparts, Ngas, Ngpart, periodic, talking);
space_init(&s, &params, dim, parts, gparts, Ngas, Ngpart, periodic, talking,
dry_run);
if (talking) {
clocks_gettime(&toc);
message("space_init took %.3f %s.", clocks_diff(&tic, &toc),
......@@ -331,14 +353,14 @@ int main(int argc, char *argv[]) {
}
/* Verify that each particle is in it's proper cell. */
if (talking) {
if (talking && !dry_run) {
int icount = 0;
space_map_cells_pre(&s, 0, &map_cellcheck, &icount);
message("map_cellcheck picked up %i parts.", icount);
}
/* Verify the maximal depth of cells. */
if (talking) {
if (talking && !dry_run) {
int data[2] = {s.maxdepth, 0};
space_map_cells_pre(&s, 0, &map_maxdepth, data);
message("nr of cells at depth %i is %i.", data[0], data[1]);
......@@ -362,14 +384,8 @@ int main(int argc, char *argv[]) {
fflush(stdout);
}
#ifdef WITH_MPI
/* Split the space. */
engine_split(&e, &initial_partition);
engine_redistribute(&e);
#endif
int with_outputs = 1;
if (with_outputs) {
if (with_outputs && !dry_run) {
/* Write the state of the system before starting time integration. */
if (myrank == 0) clocks_gettime(&tic);
#if defined(WITH_MPI)
......@@ -406,6 +422,20 @@ int main(int argc, char *argv[]) {
fflush(stdout);
}
/* Time to say good-bye if this was not a serious run. */
if (dry_run) {
if (myrank == 0)
message(
"Time integration ready to start. Everything OK. End of dry-run.");
return 0;
}
#ifdef WITH_MPI
/* Split the space. */
engine_split(&e, &initial_partition);
engine_redistribute(&e);
#endif
/* Initialise the particles */
engine_init_particles(&e);
......
......@@ -349,6 +349,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
* @param dry_run If 1, don't read the particle. Only allocates the arrays.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
......@@ -363,7 +364,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info) {
MPI_Info info, int dry_run) {
hid_t h_file = 0, h_grp = 0;
/* GADGET has only cubic boxes (in cosmological mode) */
double boxSize[3] = {0.0, -1.0, -1.0};
......@@ -469,13 +470,15 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
switch (ptype) {
case GAS:
hydro_read_particles(h_grp, N[ptype], N_total[ptype], offset[ptype],
*parts);
if (!dry_run)
hydro_read_particles(h_grp, N[ptype], N_total[ptype], offset[ptype],
*parts);
break;
case DM:
darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
offset[ptype], *gparts);
if (!dry_run)
darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
offset[ptype], *gparts);
break;
default:
......@@ -487,10 +490,10 @@ void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
}
/* Prepare the DM particles */
prepare_dm_gparts(*gparts, Ndm);
if (!dry_run) prepare_dm_gparts(*gparts, Ndm);
/* Now duplicate the hydro particle into gparts */
duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
if (!dry_run) duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
/* message("Done Reading particles..."); */
......
......@@ -34,7 +34,7 @@
void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info);
MPI_Info info, int dry_run);
void write_output_parallel(struct engine* e, struct UnitSystem* us,
int mpi_rank, int mpi_size, MPI_Comm comm,
......
......@@ -412,6 +412,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
* @param mpi_size The number of MPI ranks
* @param comm The MPI communicator
* @param info The MPI information object
* @param dry_run If 1, don't read the particle. Only allocates the arrays.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
......@@ -420,13 +421,11 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_serial(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info) {
MPI_Info info, int dry_run) {
hid_t h_file = 0, h_grp = 0;
/* GADGET has only cubic boxes (in cosmological mode) */
double boxSize[3] = {0.0, -1.0, -1.0};
......@@ -516,10 +515,12 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */
/* (1024.*1024.)); */
/* message("BoxSize = %lf", dim[0]); */
/* message("NumPart = [%zd, %zd] Total = %zd", *Ngas, Ndm, *Ngparts); */
/* For dry runs, only need to do this on rank 0 */
if (dry_run) mpi_size = 1;
/* Now loop over ranks and read the data */
for (int rank = 0; rank < mpi_size; ++rank) {
......@@ -549,13 +550,15 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
switch (ptype) {
case GAS:
hydro_read_particles(h_grp, N[ptype], N_total[ptype], offset[ptype],
*parts);
if (!dry_run)
hydro_read_particles(h_grp, N[ptype], N_total[ptype],
offset[ptype], *parts);
break;
case DM:
darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
offset[ptype], *gparts);
if (!dry_run)
darkmatter_read_particles(h_grp, N[ptype], N_total[ptype],
offset[ptype], *gparts);
break;
default:
......@@ -575,10 +578,10 @@ void read_ic_serial(char* fileName, double dim[3], struct part** parts,
}
/* Prepare the DM particles */
prepare_dm_gparts(*gparts, Ndm);
if (!dry_run) prepare_dm_gparts(*gparts, Ndm);
/* Now duplicate the hydro particle into gparts */
duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
if (!dry_run) duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
/* message("Done Reading particles..."); */
}
......
......@@ -34,7 +34,7 @@
void read_ic_serial(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info);
MPI_Info info, int dry_run);
void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank,
int mpi_size, MPI_Comm comm, MPI_Info info);
......
......@@ -319,6 +319,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
* @param Ngas (output) number of Gas particles read.
* @param Ngparts (output) The number of #gpart read.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
* @param dry_run If 1, don't read the particle. Only allocates the arrays.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
......@@ -327,12 +328,10 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile,
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_single(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ngparts,
int* periodic) {
int* periodic, int dry_run) {
hid_t h_file = 0, h_grp = 0;
/* GADGET has only cubic boxes (in cosmological mode) */
double boxSize[3] = {0.0, -1.0, -1.0};
......@@ -426,11 +425,11 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
switch (ptype) {
case GAS:
hydro_read_particles(h_grp, *Ngas, *Ngas, 0, *parts);
if (!dry_run) hydro_read_particles(h_grp, *Ngas, *Ngas, 0, *parts);
break;
case DM:
darkmatter_read_particles(h_grp, Ndm, Ndm, 0, *gparts);
if (!dry_run) darkmatter_read_particles(h_grp, Ndm, Ndm, 0, *gparts);
break;
default:
......@@ -442,10 +441,10 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts,
}
/* Prepare the DM particles */
prepare_dm_gparts(*gparts, Ndm);
if (!dry_run) prepare_dm_gparts(*gparts, Ndm);
/* Now duplicate the hydro particle into gparts */
duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
if (!dry_run) duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm);
/* message("Done Reading particles..."); */
......
......@@ -28,7 +28,7 @@
void read_ic_single(char* fileName, double dim[3], struct part** parts,
struct gpart** gparts, size_t* Ngas, size_t* Ndm,
int* periodic);
int* periodic, int dry_run);
void write_output_single(struct engine* e, struct UnitSystem* us);
......
......@@ -1270,6 +1270,7 @@ struct cell *space_getcell(struct space *s) {
* @param Ngpart The number of Gravity particles in the space.
* @param periodic flag whether the domain is periodic or not.
* @param verbose Print messages to stdout or not
* @param dry_run If 1, just initialise stuff, don't do anything with the parts.
*
* Makes a grid of edge length > r_max and fills the particles
* into the respective cells. Cells containing more than #space_splitsize
......@@ -1279,7 +1280,8 @@ struct cell *space_getcell(struct space *s) {
void space_init(struct space *s, const struct swift_params *params,
double dim[3], struct part *parts, struct gpart *gparts,
size_t Npart, size_t Ngpart, int periodic, int verbose) {
size_t Npart, size_t Ngpart, int periodic, int verbose,
int dry_run) {
/* Clean-up everything */
bzero(s, sizeof(struct space));
......@@ -1307,7 +1309,7 @@ void space_init(struct space *s, const struct swift_params *params,
/* Apply h scaling */
const double scaling =
parser_get_param_double(params, "InitialConditions:h_scaling");
if (scaling != 1.0) {
if (scaling != 1.0 && !dry_run) {
message("Re-scaling smoothing lengths by a factor %e", scaling);
for (size_t k = 0; k < Npart; k++) parts[k].h *= scaling;
}
......@@ -1317,7 +1319,7 @@ void space_init(struct space *s, const struct swift_params *params,
shift[0] = parser_get_param_double(params, "InitialConditions:shift_x");
shift[1] = parser_get_param_double(params, "InitialConditions:shift_y");
shift[2] = parser_get_param_double(params, "InitialConditions:shift_z");
if (shift[0] != 0 || shift[1] != 0 || shift[2] != 0) {
if ((shift[0] != 0 || shift[1] != 0 || shift[2] != 0) && !dry_run) {
message("Shifting particles by [%e %e %e]", shift[0], shift[1], shift[2]);
for (size_t k = 0; k < Npart; k++) {
parts[k].x[0] += shift[0];
......@@ -1331,32 +1333,35 @@ void space_init(struct space *s, const struct swift_params *params,
}
}
/* Check that all the part positions are reasonable, wrap if periodic. */
if (periodic) {
for (int k = 0; k < Npart; k++)
for (int j = 0; j < 3; j++) {
while (parts[k].x[j] < 0) parts[k].x[j] += dim[j];
while (parts[k].x[j] >= dim[j]) parts[k].x[j] -= dim[j];
}
} else {
for (int k = 0; k < Npart; k++)
for (int j = 0; j < 3; j++)
if (parts[k].x[j] < 0 || parts[k].x[j] >= dim[j])
error("Not all particles are within the specified domain.");
}
if (!dry_run) {
/* Same for the gparts */
if (periodic) {
for (int k = 0; k < Ngpart; k++)
for (int j = 0; j < 3; j++) {
while (gparts[k].x[j] < 0) gparts[k].x[j] += dim[j];
while (gparts[k].x[j] >= dim[j]) gparts[k].x[j] -= dim[j];
}
} else {
for (int k = 0; k < Ngpart; k++)
for (int j = 0; j < 3; j++)
if (gparts[k].x[j] < 0 || gparts[k].x[j] >= dim[j])
error("Not all g-particles are within the specified domain.");
/* Check that all the part positions are reasonable, wrap if periodic. */
if (periodic) {
for (int k = 0; k < Npart; k++)
for (int j = 0; j < 3; j++) {
while (parts[k].x[j] < 0) parts[k].x[j] += dim[j];
while (parts[k].x[j] >= dim[j]) parts[k].x[j] -= dim[j];
}
} else {
for (int k = 0; k < Npart; k++)
for (int j = 0; j < 3; j++)
if (parts[k].x[j] < 0 || parts[k].x[j] >= dim[j])
error("Not all particles are within the specified domain.");
}
/* Same for the gparts */
if (periodic) {
for (int k = 0; k < Ngpart; k++)
for (int j = 0; j < 3; j++) {
while (gparts[k].x[j] < 0) gparts[k].x[j] += dim[j];
while (gparts[k].x[j] >= dim[j]) gparts[k].x[j] -= dim[j];
}
} else {
for (int k = 0; k < Ngpart; k++)
for (int j = 0; j < 3; j++)
if (gparts[k].x[j] < 0 || gparts[k].x[j] >= dim[j])
error("Not all g-particles are within the specified domain.");
}
}
/* Allocate the extra parts array. */
......@@ -1369,7 +1374,7 @@ void space_init(struct space *s, const struct swift_params *params,
if (lock_init(&s->lock) != 0) error("Failed to create space spin-lock.");
/* Build the cells and the tasks. */
space_regrid(s, s->cell_min, verbose);
if (!dry_run) space_regrid(s, s->cell_min, verbose);
}
/**
......
......@@ -134,7 +134,8 @@ int space_getsid(struct space *s, struct cell **ci, struct cell **cj,
double *shift);
void space_init(struct space *s, const struct swift_params *params,
double dim[3], struct part *parts, struct gpart *gparts,
size_t Npart, size_t Ngpart, int periodic, int verbose);
size_t Npart, size_t Ngpart, int periodic, int verbose,
int dry_run);
void space_map_cells_pre(struct space *s, int full,
void (*fun)(struct cell *c, void *data), void *data);
void space_map_parts(struct space *s,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment