Commit 17295369 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Merge branch 'master' into parallel_gpart_sort

parents ba06e161 137cd5fc
......@@ -63,13 +63,15 @@ swift_fixdt_mpi_LDADD = ../src/.libs/libswiftsim_mpi.a $(HDF5_LDFLAGS) $(HDF5_L
# Scripts to generate ICs
EXTRA_DIST = UniformBox/makeIC.py \
PerturbedBox/makeIC.py \
UniformDMBox/makeIC.py \
PerturbedBox/makeIC.py \
SedovBlast/makeIC.py SedovBlast/makeIC_fcc.py SedovBlast/solution.py \
SodShock/makeIC.py SodShock/solution.py SodShock/glass_001.hdf5 SodShock/glass_002.hdf5 SodShock/rhox.py \
CosmoVolume/getIC.sh \
BigCosmoVolume/makeIC.py \
BigPerturbedBox/makeIC_fcc.py \
GreshoVortex/makeIC.py GreshoVortex/solution.py
GreshoVortex/makeIC.py GreshoVortex/solution.py \
MultiTypes/makeIC.py
# Scripts to plot task graphs
......
###############################################################################
# This file is part of SWIFT.
# Copyright (c) 2013 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
# Matthieu Schaller (matthieu.schaller@durham.ac.uk)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import h5py
import sys
from numpy import *
# Generates a swift IC file containing a cartesian distribution of DM particles
# with a density of 1
# Parameters
periodic= 1 # 1 For periodic box
boxSize = 1.
Lgas = int(sys.argv[1]) # Number of particles along one axis
rhoGas = 2. # Density
P = 1. # Pressure
gamma = 5./3. # Gas adiabatic index
rhoDM = 1.
Ldm = int(sys.argv[2]) # Number of particles along one axis
fileName = "multiTypes.hdf5"
#---------------------------------------------------
numGas = Lgas**3
massGas = boxSize**3 * rhoGas / numGas
internalEnergy = P / ((gamma - 1.)*rhoGas)
numDM = Ldm**3
massDM = boxSize**3 * rhoDM / numDM
#--------------------------------------------------
#File
file = h5py.File(fileName, 'w')
# Header
grp = file.create_group("/Header")
grp.attrs["BoxSize"] = boxSize
grp.attrs["NumPart_Total"] = [numGas, numDM, 0, 0, 0, 0]
grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
grp.attrs["NumPart_ThisFile"] = [numGas, numDM, 0, 0, 0, 0]
grp.attrs["Time"] = 0.0
grp.attrs["NumFilesPerSnapshot"] = 1
grp.attrs["MassTable"] = [0.0, massDM, 0.0, 0.0, 0.0, 0.0]
grp.attrs["Flag_Entropy_ICs"] = 0
#Runtime parameters
grp = file.create_group("/RuntimePars")
grp.attrs["PeriodicBoundariesOn"] = periodic
# Gas Particle group
grp = file.create_group("/PartType0")
v = zeros((numGas, 3))
ds = grp.create_dataset('Velocities', (numGas, 3), 'f')
ds[()] = v
v = zeros(1)
m = full((numGas, 1), massGas)
ds = grp.create_dataset('Masses', (numGas,1), 'f')
ds[()] = m
m = zeros(1)
h = full((numGas, 1), 1.1255 * boxSize / Lgas)
ds = grp.create_dataset('SmoothingLength', (numGas,1), 'f')
ds[()] = h
h = zeros(1)
u = full((numGas, 1), internalEnergy)
ds = grp.create_dataset('InternalEnergy', (numGas,1), 'f')
ds[()] = u
u = zeros(1)
ids = linspace(0, numGas, numGas, endpoint=False).reshape((numGas,1))
ds = grp.create_dataset('ParticleIDs', (numGas, 1), 'L')
ds[()] = ids + 1
x = ids % Lgas;
y = ((ids - x) / Lgas) % Lgas;
z = (ids - x - Lgas * y) / Lgas**2;
coords = zeros((numGas, 3))
coords[:,0] = z[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
coords[:,1] = y[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
coords[:,2] = x[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
ds = grp.create_dataset('Coordinates', (numGas, 3), 'd')
ds[()] = coords
# DM Particle group
grp = file.create_group("/PartType1")
v = zeros((numDM, 3))
ds = grp.create_dataset('Velocities', (numDM, 3), 'f')
ds[()] = v
v = zeros(1)
m = full((numDM, 1), massDM)
ds = grp.create_dataset('Masses', (numDM,1), 'f')
ds[()] = m
m = zeros(1)
ids = linspace(0, numDM, numDM, endpoint=False).reshape((numDM,1))
ds = grp.create_dataset('ParticleIDs', (numDM, 1), 'L')
ds[()] = ids + Lgas**3 + 1
x = ids % Ldm;
y = ((ids - x) / Ldm) % Ldm;
z = (ids - x - Ldm * y) / Ldm**2;
coords = zeros((numDM, 3))
coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
ds = grp.create_dataset('Coordinates', (numDM, 3), 'd')
ds[()] = coords
file.close()
/*******************************************************************************
* This file is part of SWIFT.
* Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
......@@ -79,7 +80,9 @@ int main(int argc, char *argv[]) {
int nr_nodes = 1, myrank = 0;
FILE *file_thread;
int with_outputs = 1;
int verbose = 0, talking;
int with_gravity = 0;
int engine_policies = 0;
int verbose = 0, talking = 0;
unsigned long long cpufreq = 0;
#ifdef WITH_MPI
......@@ -97,12 +100,15 @@ int main(int argc, char *argv[]) {
#endif
#endif
/* Choke on FP-exceptions. */
// feenableexcept( FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW );
/* Choke on FP-exceptions. */
// feenableexcept( FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW );
/* Initialize CPU frequency, this also starts time. */
clocks_set_cpufreq(cpufreq);
#ifdef WITH_MPI
/* Start by initializing MPI. */
int res, prov;
int res = 0, prov = 0;
if ((res = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &prov)) !=
MPI_SUCCESS)
error("Call to MPI_Init failed with error %i.", res);
......@@ -128,9 +134,6 @@ int main(int argc, char *argv[]) {
&initial_partition.grid[1], &initial_partition.grid[0]);
#endif
/* Initialize CPU frequency, this also starts time. */
clocks_set_cpufreq(cpufreq);
/* Greeting message */
if (myrank == 0) greetings();
......@@ -156,7 +159,7 @@ int main(int argc, char *argv[]) {
bzero(&s, sizeof(struct space));
/* Parse the options */
while ((c = getopt(argc, argv, "a:c:d:e:f:h:m:oP:q:R:s:t:v:w:y:z:")) != -1)
while ((c = getopt(argc, argv, "a:c:d:e:f:gh:m:oP:q:R:s:t:v:w:y:z:")) != -1)
switch (c) {
case 'a':
if (sscanf(optarg, "%lf", &scaling) != 1)
......@@ -185,6 +188,9 @@ int main(int argc, char *argv[]) {
case 'f':
if (!strcpy(ICfileName, optarg)) error("Error parsing IC file name.");
break;
case 'g':
with_gravity = 1;
break;
case 'h':
if (sscanf(optarg, "%llu", &cpufreq) != 1)
error("Error parsing CPU frequency.");
......@@ -356,11 +362,11 @@ int main(int argc, char *argv[]) {
if (myrank == 0) clocks_gettime(&tic);
#if defined(WITH_MPI)
#if defined(HAVE_PARALLEL_HDF5)
read_ic_parallel(ICfileName, dim, &parts, &Ngas, &periodic, myrank, nr_nodes,
MPI_COMM_WORLD, MPI_INFO_NULL);
read_ic_parallel(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
read_ic_serial(ICfileName, dim, &parts, &Ngas, &periodic, myrank, nr_nodes,
MPI_COMM_WORLD, MPI_INFO_NULL);
read_ic_serial(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic,
myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else
read_ic_single(ICfileName, dim, &parts, &gparts, &Ngas, &Ngpart, &periodic);
......@@ -376,6 +382,7 @@ int main(int argc, char *argv[]) {
#if defined(WITH_MPI)
long long N_long[2] = {Ngas, Ngpart};
MPI_Reduce(&N_long, &N_total, 2, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
N_total[1] -= N_total[0];
if (myrank == 0)
message("Read %lld gas particles and %lld DM particles from the ICs",
N_total[0], N_total[1]);
......@@ -383,9 +390,34 @@ int main(int argc, char *argv[]) {
N_total[0] = Ngas;
N_total[1] = Ngpart - Ngas;
message("Read %lld gas particles and %lld DM particles from the ICs",
N_total[0], N_total[1]);
N_total[0], N_total[1]);
#endif
/* MATTHIEU: Temporary fix to preserve master */
if (!with_gravity) {
free(gparts);
for(size_t k = 0; k < Ngas; ++k)
parts[k].gpart = NULL;
Ngpart = 0;
#if defined(WITH_MPI)
N_long[0] = Ngas;
N_long[1] = Ngpart;
MPI_Reduce(&N_long, &N_total, 2, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
if (myrank == 0)
message(
"AFTER FIX: Read %lld gas particles and %lld DM particles from the "
"ICs",
N_total[0], N_total[1]);
#else
N_total[0] = Ngas;
N_total[1] = Ngpart;
message(
"AFTER FIX: Read %lld gas particles and %lld DM particles from the ICs",
N_total[0], N_total[1]);
#endif
}
/* MATTHIEU: End temporary fix */
/* Apply h scaling */
if (scaling != 1.0)
for (size_t k = 0; k < Ngas; k++) parts[k].h *= scaling;
......@@ -448,12 +480,15 @@ int main(int argc, char *argv[]) {
message("nr of cells at depth %i is %i.", data[0], data[1]);
}
/* Construct the engine policy */
engine_policies = ENGINE_POLICY | engine_policy_steal | engine_policy_hydro;
if (with_gravity) engine_policies |= engine_policy_external_gravity;
/* Initialize the engine with this space. */
if (myrank == 0) clocks_gettime(&tic);
if (myrank == 0) message("nr_nodes is %i.", nr_nodes);
engine_init(&e, &s, dt_max, nr_threads, nr_queues, nr_nodes, myrank,
ENGINE_POLICY | engine_policy_steal | engine_policy_hydro, 0,
time_end, dt_min, dt_max, talking);
engine_policies, 0, time_end, dt_min, dt_max, talking);
if (myrank == 0 && verbose) {
clocks_gettime(&toc);
message("engine_init took %.3f %s.", clocks_diff(&tic, &toc),
......
......@@ -46,9 +46,9 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
# Include files for distribution, not installation.
nobase_noinst_HEADERS = approx_math.h atomic.h cycle.h error.h inline.h kernel.h vector.h \
runner_doiact.h runner_doiact_grav.h units.h intrinsics.h minmax.h \
gravity.h \
gravity/Default/gravity.h gravity/Default/runner_iact_grav.h \
gravity/Default/gravity_part.h \
gravity.h gravity_io.h \
gravity/Default/gravity.h gravity/Default/gravity_iact.h gravity/Default/gravity_io.h \
gravity/Default/gravity_debug.h gravity/Default/gravity_part.h \
hydro.h hydro_io.h \
hydro/Minimal/hydro.h hydro/Minimal/hydro_iact.h hydro/Minimal/hydro_io.h \
hydro/Minimal/hydro_debug.h hydro/Minimal/hydro_part.h \
......
......@@ -45,6 +45,9 @@
#include "kernel.h"
#include "version.h"
const char* particle_type_names[NUM_PARTICLE_TYPES] = {
"Gas", "DM", "Boundary", "Dummy", "Star", "BH"};
/**
* @brief Converts a C data type to the HDF5 equivalent.
*
......@@ -402,52 +405,68 @@ void createXMFfile() {
*snapshot
*
* @param xmfFile The file to write in.
* @param Nparts The number of particles.
* @param hdfFileName The name of the HDF5 file corresponding to this output.
* @param time The current simulation time.
*/
void writeXMFheader(FILE* xmfFile, long long Nparts, char* hdfFileName,
float time) {
void writeXMFoutputheader(FILE* xmfFile, char* hdfFileName, float time) {
/* Write end of file */
fprintf(xmfFile, "<!-- XMF description for file: %s -->\n", hdfFileName);
fprintf(xmfFile,
"<Grid GridType=\"Collection\" CollectionType=\"Spatial\">\n");
fprintf(xmfFile, "<Time Type=\"Single\" Value=\"%f\"/>\n", time);
fprintf(xmfFile, "<Grid Name=\"Gas\" GridType=\"Uniform\">\n");
fprintf(xmfFile,
"<Topology TopologyType=\"Polyvertex\" Dimensions=\"%lld\"/>\n",
Nparts);
fprintf(xmfFile, "<Geometry GeometryType=\"XYZ\">\n");
fprintf(xmfFile,
"<DataItem Dimensions=\"%lld 3\" NumberType=\"Double\" "
"Precision=\"8\" "
"Format=\"HDF\">%s:/PartType0/Coordinates</DataItem>\n",
Nparts, hdfFileName);
fprintf(xmfFile, "</Geometry>");
}
/**
* @brief Writes the end of the XMF file (closes all open markups)
*
* @param xmfFile The file to write in.
* @param output The number of this output.
* @param time The current simulation time.
*/
void writeXMFfooter(FILE* xmfFile) {
void writeXMFoutputfooter(FILE* xmfFile, int output, float time) {
/* Write end of the section of this time step */
fprintf(xmfFile, "\n</Grid>\n");
fprintf(xmfFile, "</Grid>\n");
fprintf(xmfFile, "\n</Grid>\n");
fprintf(xmfFile,
"\n</Grid> <!-- End of meta-data for output=%03i, time=%f -->\n",
output, time);
fprintf(xmfFile, "\n</Grid> <!-- timeSeries -->\n");
fprintf(xmfFile, "</Domain>\n");
fprintf(xmfFile, "</Xdmf>\n");
fclose(xmfFile);
}
void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N,
enum PARTICLE_TYPE ptype) {
fprintf(xmfFile, "\n<Grid Name=\"%s\" GridType=\"Uniform\">\n",
particle_type_names[ptype]);
fprintf(xmfFile,
"<Topology TopologyType=\"Polyvertex\" Dimensions=\"%zi\"/>\n", N);
fprintf(xmfFile, "<Geometry GeometryType=\"XYZ\">\n");
fprintf(xmfFile,
"<DataItem Dimensions=\"%zi 3\" NumberType=\"Double\" "
"Precision=\"8\" "
"Format=\"HDF\">%s:/PartType%d/Coordinates</DataItem>\n",
N, hdfFileName, ptype);
fprintf(xmfFile,
"</Geometry>\n <!-- Done geometry for %s, start of particle fields "
"list -->\n",
particle_type_names[ptype]);
}
void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype) {
fprintf(xmfFile, "</Grid> <!-- End of meta-data for parttype=%s -->\n",
particle_type_names[ptype]);
}
/**
* @brief Writes the lines corresponding to an array of the HDF5 output
*
* @param xmfFile The file in which to write
* @param fileName The name of the HDF5 file associated to this XMF descriptor.
* @param partTypeGroupName The name of the group containing the particles in
*the HDF5 file.
* @param name The name of the array in the HDF5 file.
* @param N The number of particles.
* @param dim The dimension of the quantity (1 for scalars, 3 for vectors).
......@@ -455,21 +474,21 @@ void writeXMFfooter(FILE* xmfFile) {
*
* @todo Treat the types in a better way.
*/
void writeXMFline(FILE* xmfFile, char* fileName, char* name, long long N,
int dim, enum DATA_TYPE type) {
void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName,
char* name, size_t N, int dim, enum DATA_TYPE type) {
fprintf(xmfFile,
"<Attribute Name=\"%s\" AttributeType=\"%s\" Center=\"Node\">\n",
name, dim == 1 ? "Scalar" : "Vector");
if (dim == 1)
fprintf(xmfFile,
"<DataItem Dimensions=\"%lld\" NumberType=\"Double\" "
"Precision=\"%d\" Format=\"HDF\">%s:/PartType0/%s</DataItem>\n",
N, type == FLOAT ? 4 : 8, fileName, name);
"<DataItem Dimensions=\"%zi\" NumberType=\"Double\" "
"Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n",
N, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name);
else
fprintf(xmfFile,
"<DataItem Dimensions=\"%lld %d\" NumberType=\"Double\" "
"Precision=\"%d\" Format=\"HDF\">%s:/PartType0/%s</DataItem>\n",
N, dim, type == FLOAT ? 4 : 8, fileName, name);
"<DataItem Dimensions=\"%zi %d\" NumberType=\"Double\" "
"Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n",
N, dim, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name);
fprintf(xmfFile, "</Attribute>\n");
}
......@@ -489,7 +508,8 @@ void prepare_dm_gparts(struct gpart* gparts, size_t Ndm) {
for (size_t i = 0; i < Ndm; ++i) {
/* 0 or negative ids are not allowed */
if (gparts[i].id <= 0) error("0 or negative ID for DM particle");
if (gparts[i].id <= 0)
error("0 or negative ID for DM particle %zd: ID=%lld", i, gparts[i].id);
gparts[i].id = -gparts[i].id;
}
......
......@@ -70,6 +70,11 @@ enum PARTICLE_TYPE {
NUM_PARTICLE_TYPES
};
extern const char* particle_type_names[];
#define FILENAME_BUFFER_SIZE 150
#define PARTICLE_GROUP_BUFFER_SIZE 20
hid_t hdf5Type(enum DATA_TYPE type);
size_t sizeOfType(enum DATA_TYPE type);
......@@ -92,10 +97,13 @@ void writeAttribute_s(hid_t grp, char* name, const char* str);
void createXMFfile();
FILE* prepareXMFfile();
void writeXMFfooter(FILE* xmfFile);
void writeXMFheader(FILE* xmfFile, long long N, char* hdfFileName, float time);
void writeXMFline(FILE* xmfFile, char* fileName, char* name, long long N,
int dim, enum DATA_TYPE type);
void writeXMFoutputheader(FILE* xmfFile, char* hdfFileName, float time);
void writeXMFoutputfooter(FILE* xmfFile, int outputCount, float time);
void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N,
enum PARTICLE_TYPE ptype);
void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype);
void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName,
char* name, size_t N, int dim, enum DATA_TYPE type);
void writeCodeDescription(hid_t h_file);
void writeSPHflavour(hid_t h_file);
......
......@@ -87,14 +87,17 @@ struct link *engine_addlink(struct engine *e, struct link *l, struct task *t) {
}
/**
* @brief Generate the ghost and kick tasks for a hierarchy of cells.
* @brief Generate the ghosts all the O(Npart) tasks for a hierarchy of cells.
*
* Tasks are only created here. The dependencies will be added later on.
*
* @param e The #engine.
* @param c The #cell.
* @param super The super #cell.
*/
void engine_mkghosts(struct engine *e, struct cell *c, struct cell *super) {
void engine_make_ghost_tasks(struct engine *e, struct cell *c,
struct cell *super) {
struct scheduler *s = &e->sched;
......@@ -128,7 +131,8 @@ void engine_mkghosts(struct engine *e, struct cell *c, struct cell *super) {
/* Recurse. */
if (c->split)
for (int k = 0; k < 8; k++)
if (c->progeny[k] != NULL) engine_mkghosts(e, c->progeny[k], super);
if (c->progeny[k] != NULL)
engine_make_ghost_tasks(e, c->progeny[k], super);
}
/**
......@@ -256,13 +260,11 @@ void engine_redistribute(struct engine *e) {
}
/* Verify that all parts are in the right place. */
/* for ( k = 0 ; k < nr_parts ; k++ ) {
cid = cell_getid( cdim , parts_new[k].x[0]*ih[0] , parts_new[k].x[1]*ih[1]
, parts_new[k].x[2]*ih[2] );
/* for ( int k = 0 ; k < nr_parts ; k++ ) {
int cid = cell_getid( cdim , parts_new[k].x[0]*ih[0], parts_new[k].x[1]*ih[1], parts_new[k].x[2]*ih[2] );
if ( cells[ cid ].nodeID != nodeID )
error( "Received particle (%i) that does not belong here (nodeID=%i)."
, k , cells[ cid ].nodeID );
} */
error( "Received particle (%i) that does not belong here (nodeID=%i).", k , cells[ cid ].nodeID );
} */
/* Set the new part data, free the old. */
free(parts);
......@@ -509,7 +511,7 @@ void engine_exchange_cells(struct engine *e) {
/* Wait for each count to come in and start the recv. */
for (int k = 0; k < nr_proxies; k++) {
int pid;
int pid = MPI_UNDEFINED;
if (MPI_Waitany(nr_proxies, reqs_in, &pid, &status) != MPI_SUCCESS ||
pid == MPI_UNDEFINED)
error("MPI_Waitany failed.");
......@@ -529,7 +531,7 @@ void engine_exchange_cells(struct engine *e) {
/* Wait for each pcell array to come in from the proxies. */
for (int k = 0; k < nr_proxies; k++) {
int pid;
int pid = MPI_UNDEFINED;
if (MPI_Waitany(nr_proxies, reqs_in, &pid, &status) != MPI_SUCCESS ||
pid == MPI_UNDEFINED)
error("MPI_Waitany failed.");
......@@ -633,7 +635,7 @@ int engine_exchange_strays(struct engine *e, int offset, size_t *ind,
/* Wait for each count to come in and start the recv. */
for (int k = 0; k < e->nr_proxies; k++) {
int pid;
int pid = MPI_UNDEFINED;
if (MPI_Waitany(e->nr_proxies, reqs_in, &pid, MPI_STATUS_IGNORE) !=
MPI_SUCCESS ||
pid == MPI_UNDEFINED)
......@@ -689,7 +691,7 @@ int engine_exchange_strays(struct engine *e, int offset, size_t *ind,
parts from the proxies. */
size_t count = 0;
for (int k = 0; k < 2 * (nr_in + nr_out); k++) {
int err, pid;
int err = 0, pid = MPI_UNDEFINED;
if ((err = MPI_Waitany(2 * e->nr_proxies, reqs_in, &pid,
MPI_STATUS_IGNORE)) != MPI_SUCCESS) {
char buff[MPI_MAX_ERROR_STRING];
......@@ -735,43 +737,42 @@ int engine_exchange_strays(struct engine *e, int offset, size_t *ind,
}
/**
* @brief Fill the #space's task list.
* @brief Constructs the top-level pair tasks for the first hydro loop over
*neighbours
*
* @param e The #engine we are working with.
* Here we construct all the tasks for all possible neighbouring non-empty
* local cells in the hierarchy. No dependencies are being added thus far.
* Additional loop over neighbours can later be added by simply duplicating
* all the tasks created by this function.
*
* @param e The #engine.
*/
void engine_maketasks(struct engine *e) {
void engine_make_hydroloop_tasks(struct engine *e) {
struct space *s = e->s;
struct scheduler *sched = &e->sched;
struct cell *cells = s->cells;
const int nr_cells = s->nr_cells;
const int nodeID = e->nodeID;
const int *cdim = s->cdim;
const ticks tic = getticks();
/* Re-set the scheduler. */
scheduler_reset(sched, s->tot_cells * engine_maxtaskspercell);
/* Add the space sorting tasks. */
for (int i = 0; i < e->nr_threads; i++) {
scheduler_addtask(sched, task_type_part_sort, task_subtype_none, i, 0, NULL,
NULL, 0);
scheduler_addtask(sched, task_type_gpart_sort, task_subtype_none, i, 0,
NULL, NULL, 0);
}
struct cell *cells = s->cells;
/* Run through the highest level of cells and add pairs. */
for (int i = 0; i < cdim[0]; i++)
for (int j = 0; j < cdim[1]; j++)
for (int i = 0; i < cdim[0]; i++) {
for (int j = 0; j < cdim[1]; j++) {
for (int k = 0; k < cdim[2]; k++) {
int cid = cell_getid(cdim, i, j, k);
if (cells[cid].count == 0) continue;
/* Get the cell */
const int cid = cell_getid(cdim, i, j, k);
struct cell *ci = &cells[cid];
/* Skip cells without hydro particles */
if (ci->count == 0) continue;
/* If the cells is local build a self-interaction */