Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
9435dfc5
Commit
9435dfc5
authored
Aug 29, 2018
by
Loic Hausammann
Browse files
Implement cell_unskip_stars_tasks + format
parent
e9e69cd2
Changes
23
Expand all
Hide whitespace changes
Inline
Side-by-side
examples/main.c
View file @
9435dfc5
...
@@ -676,7 +676,8 @@ int main(int argc, char *argv[]) {
...
@@ -676,7 +676,8 @@ int main(int argc, char *argv[]) {
/* Initialise the stars properties */
/* Initialise the stars properties */
if
(
with_stars
)
if
(
with_stars
)
stars_props_init
(
&
stars_properties
,
&
prog_const
,
&
us
,
params
,
&
hydro_properties
);
stars_props_init
(
&
stars_properties
,
&
prog_const
,
&
us
,
params
,
&
hydro_properties
);
else
else
bzero
(
&
stars_properties
,
sizeof
(
struct
stars_props
));
bzero
(
&
stars_properties
,
sizeof
(
struct
stars_props
));
...
@@ -894,8 +895,8 @@ int main(int argc, char *argv[]) {
...
@@ -894,8 +895,8 @@ int main(int argc, char *argv[]) {
if
(
myrank
==
0
)
clocks_gettime
(
&
tic
);
if
(
myrank
==
0
)
clocks_gettime
(
&
tic
);
engine_init
(
&
e
,
&
s
,
params
,
N_total
[
0
],
N_total
[
1
],
N_total
[
2
],
engine_init
(
&
e
,
&
s
,
params
,
N_total
[
0
],
N_total
[
1
],
N_total
[
2
],
engine_policies
,
talking
,
&
reparttype
,
&
us
,
&
prog_const
,
&
cosmo
,
engine_policies
,
talking
,
&
reparttype
,
&
us
,
&
prog_const
,
&
cosmo
,
&
hydro_properties
,
&
gravity_properties
,
&
stars_properties
,
&
mesh
,
&
potential
,
&
hydro_properties
,
&
gravity_properties
,
&
stars_properties
,
&
cooling_func
,
&
chemistry
,
&
sourceterms
);
&
mesh
,
&
potential
,
&
cooling_func
,
&
chemistry
,
&
sourceterms
);
engine_config
(
0
,
&
e
,
params
,
nr_nodes
,
myrank
,
nr_threads
,
with_aff
,
engine_config
(
0
,
&
e
,
params
,
nr_nodes
,
myrank
,
nr_threads
,
with_aff
,
talking
,
restart_file
);
talking
,
restart_file
);
...
...
src/cell.c
View file @
9435dfc5
...
@@ -1994,7 +1994,6 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
...
@@ -1994,7 +1994,6 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
void
cell_activate_subcell_stars_tasks
(
struct
cell
*
ci
,
struct
cell
*
cj
,
void
cell_activate_subcell_stars_tasks
(
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
scheduler
*
s
)
{}
struct
scheduler
*
s
)
{}
void
cell_activate_grav_mm_task
(
struct
cell
*
ci
,
struct
cell
*
cj
,
void
cell_activate_grav_mm_task
(
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
scheduler
*
s
)
{
struct
scheduler
*
s
)
{
/* Some constants */
/* Some constants */
...
@@ -2504,15 +2503,153 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
...
@@ -2504,15 +2503,153 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
* @brief Un-skips all the stars tasks associated with a given cell and checks
* @brief Un-skips all the stars tasks associated with a given cell and checks
* if the space needs to be rebuilt.
* if the space needs to be rebuilt.
*
*
* WARNING: TODO: Need to be implemented
*
* @param c the #cell.
* @param c the #cell.
* @param s the #scheduler.
* @param s the #scheduler.
*
*
* @return 1 If the space needs rebuilding. 0 otherwise.
* @return 1 If the space needs rebuilding. 0 otherwise.
*/
*/
int
cell_unskip_stars_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
)
{
int
cell_unskip_stars_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
)
{
return
0
;
struct
engine
*
e
=
s
->
space
->
e
;
const
int
nodeID
=
e
->
nodeID
;
int
rebuild
=
0
;
/* Un-skip the density tasks involved with this cell. */
for
(
struct
link
*
l
=
c
->
stars_density
;
l
!=
NULL
;
l
=
l
->
next
)
{
struct
task
*
t
=
l
->
t
;
struct
cell
*
ci
=
t
->
ci
;
struct
cell
*
cj
=
t
->
cj
;
const
int
ci_active
=
cell_is_active_stars
(
ci
,
e
);
const
int
cj_active
=
(
cj
!=
NULL
)
?
cell_is_active_stars
(
cj
,
e
)
:
0
;
/* Only activate tasks that involve a local active cell. */
if
((
ci_active
&&
ci
->
nodeID
==
nodeID
)
||
(
cj_active
&&
cj
->
nodeID
==
nodeID
))
{
scheduler_activate
(
s
,
t
);
/* Activate drifts */
if
(
t
->
type
==
task_type_self
)
{
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
ci
,
s
);
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_gpart
(
ci
,
s
);
}
/* Set the correct sorting flags and activate hydro drifts */
else
if
(
t
->
type
==
task_type_pair
)
{
/* Store some values. */
atomic_or
(
&
ci
->
requires_sorts
,
1
<<
t
->
flags
);
atomic_or
(
&
cj
->
requires_sorts
,
1
<<
t
->
flags
);
ci
->
dx_max_sort_old
=
ci
->
dx_max_sort
;
cj
->
dx_max_sort_old
=
cj
->
dx_max_sort
;
/* Activate the drift tasks. */
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
ci
,
s
);
if
(
cj
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
cj
,
s
);
/* Check the sorts and activate them if needed. */
cell_activate_sorts
(
ci
,
t
->
flags
,
s
);
cell_activate_sorts
(
cj
,
t
->
flags
,
s
);
}
/* Store current values of dx_max and h_max. */
else
if
(
t
->
type
==
task_type_sub_pair
||
t
->
type
==
task_type_sub_self
)
{
cell_activate_subcell_stars_tasks
(
t
->
ci
,
t
->
cj
,
s
);
}
}
/* Only interested in pair interactions as of here. */
if
(
t
->
type
==
task_type_pair
||
t
->
type
==
task_type_sub_pair
)
{
/* Check whether there was too much particle motion, i.e. the
cell neighbour conditions were violated. */
if
(
cell_need_rebuild_for_pair
(
ci
,
cj
))
rebuild
=
1
;
#ifdef WITH_MPI
error
(
"MPI with stars not implemented"
);
/* /\* Activate the send/recv tasks. *\/ */
/* if (ci->nodeID != nodeID) { */
/* /\* If the local cell is active, receive data from the foreign cell.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate(s, ci->recv_xv); */
/* if (ci_active) { */
/* scheduler_activate(s, ci->recv_rho); */
/* } */
/* } */
/* /\* If the foreign cell is active, we want its ti_end values. *\/ */
/* if (ci_active) scheduler_activate(s, ci->recv_ti); */
/* /\* Is the foreign cell active and will need stuff from us? *\/ */
/* if (ci_active) { */
/* scheduler_activate_send(s, cj->send_xv, ci->nodeID); */
/* /\* Drift the cell which will be sent; note that not all sent */
/* particles will be drifted, only those that are needed. *\/ */
/* cell_activate_drift_part(cj, s); */
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate_send(s, cj->send_rho, ci->nodeID); */
/* } */
/* } */
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
*/
/* } else if (cj->nodeID != nodeID) { */
/* /\* If the local cell is active, receive data from the foreign cell.
* *\/ */
/* if (ci_active) { */
/* scheduler_activate(s, cj->recv_xv); */
/* if (cj_active) { */
/* scheduler_activate(s, cj->recv_rho); */
/* } */
/* } */
/* /\* If the foreign cell is active, we want its ti_end values. *\/ */
/* if (cj_active) scheduler_activate(s, cj->recv_ti); */
/* /\* Is the foreign cell active and will need stuff from us? *\/ */
/* if (cj_active) { */
/* scheduler_activate_send(s, ci->send_xv, cj->nodeID); */
/* /\* Drift the cell which will be sent; note that not all sent */
/* particles will be drifted, only those that are needed. *\/ */
/* cell_activate_drift_part(ci, s); */
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (ci_active) { */
/* scheduler_activate_send(s, ci->send_rho, cj->nodeID); */
/* } */
/* } */
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
*/
/* } */
#endif
}
}
/* Unskip all the other task types. */
if
(
c
->
nodeID
==
nodeID
&&
cell_is_active_stars
(
c
,
e
))
{
if
(
c
->
stars_ghost_in
!=
NULL
)
scheduler_activate
(
s
,
c
->
stars_ghost_in
);
if
(
c
->
stars_ghost_out
!=
NULL
)
scheduler_activate
(
s
,
c
->
stars_ghost_out
);
if
(
c
->
stars_ghost
!=
NULL
)
scheduler_activate
(
s
,
c
->
stars_ghost
);
}
return
rebuild
;
}
}
/**
/**
...
@@ -2902,8 +3039,7 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
...
@@ -2902,8 +3039,7 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
/* Drift... */
/* Drift... */
drift_spart
(
sp
,
dt_drift
,
ti_old_gpart
,
ti_current
);
drift_spart
(
sp
,
dt_drift
,
ti_old_gpart
,
ti_current
);
if
(
spart_is_active
(
sp
,
e
))
if
(
spart_is_active
(
sp
,
e
))
stars_init_spart
(
sp
);
stars_init_spart
(
sp
);
/* Note: no need to compute dx_max as all spart have a gpart */
/* Note: no need to compute dx_max as all spart have a gpart */
}
}
...
...
src/cell.h
View file @
9435dfc5
...
@@ -534,6 +534,7 @@ void cell_check_gpart_drift_point(struct cell *c, void *data);
...
@@ -534,6 +534,7 @@ void cell_check_gpart_drift_point(struct cell *c, void *data);
void
cell_check_multipole_drift_point
(
struct
cell
*
c
,
void
*
data
);
void
cell_check_multipole_drift_point
(
struct
cell
*
c
,
void
*
data
);
void
cell_reset_task_counters
(
struct
cell
*
c
);
void
cell_reset_task_counters
(
struct
cell
*
c
);
int
cell_unskip_hydro_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
);
int
cell_unskip_hydro_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
);
int
cell_unskip_stars_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
);
int
cell_unskip_gravity_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
);
int
cell_unskip_gravity_tasks
(
struct
cell
*
c
,
struct
scheduler
*
s
);
void
cell_set_super
(
struct
cell
*
c
,
struct
cell
*
super
);
void
cell_set_super
(
struct
cell
*
c
,
struct
cell
*
super
);
void
cell_drift_part
(
struct
cell
*
c
,
const
struct
engine
*
e
,
int
force
);
void
cell_drift_part
(
struct
cell
*
c
,
const
struct
engine
*
e
,
int
force
);
...
@@ -546,6 +547,8 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
...
@@ -546,6 +547,8 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
struct
scheduler
*
s
);
struct
scheduler
*
s
);
void
cell_activate_subcell_grav_tasks
(
struct
cell
*
ci
,
struct
cell
*
cj
,
void
cell_activate_subcell_grav_tasks
(
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
scheduler
*
s
);
struct
scheduler
*
s
);
void
cell_activate_subcell_stars_tasks
(
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
scheduler
*
s
);
void
cell_activate_subcell_external_grav_tasks
(
struct
cell
*
ci
,
void
cell_activate_subcell_external_grav_tasks
(
struct
cell
*
ci
,
struct
scheduler
*
s
);
struct
scheduler
*
s
);
void
cell_activate_drift_part
(
struct
cell
*
c
,
struct
scheduler
*
s
);
void
cell_activate_drift_part
(
struct
cell
*
c
,
struct
scheduler
*
s
);
...
...
src/common_io.c
View file @
9435dfc5
...
@@ -768,9 +768,10 @@ void io_duplicate_hydro_sparts_mapper(void* restrict data, int Nstars,
...
@@ -768,9 +768,10 @@ void io_duplicate_hydro_sparts_mapper(void* restrict data, int Nstars,
* @param Nstars The number of stars particles read in.
* @param Nstars The number of stars particles read in.
* @param Ndm The number of DM and gas particles read in.
* @param Ndm The number of DM and gas particles read in.
*/
*/
void
io_duplicate_stars_gparts
(
struct
threadpool
*
tp
,
struct
spart
*
const
sparts
,
void
io_duplicate_stars_gparts
(
struct
threadpool
*
tp
,
struct
gpart
*
const
gparts
,
size_t
Nstars
,
struct
spart
*
const
sparts
,
size_t
Ndm
)
{
struct
gpart
*
const
gparts
,
size_t
Nstars
,
size_t
Ndm
)
{
struct
duplication_data
data
;
struct
duplication_data
data
;
data
.
gparts
=
gparts
;
data
.
gparts
=
gparts
;
...
...
src/common_io.h
View file @
9435dfc5
...
@@ -104,9 +104,10 @@ void io_prepare_dm_gparts(struct threadpool* tp, struct gpart* const gparts,
...
@@ -104,9 +104,10 @@ void io_prepare_dm_gparts(struct threadpool* tp, struct gpart* const gparts,
void
io_duplicate_hydro_gparts
(
struct
threadpool
*
tp
,
struct
part
*
const
parts
,
void
io_duplicate_hydro_gparts
(
struct
threadpool
*
tp
,
struct
part
*
const
parts
,
struct
gpart
*
const
gparts
,
size_t
Ngas
,
struct
gpart
*
const
gparts
,
size_t
Ngas
,
size_t
Ndm
);
size_t
Ndm
);
void
io_duplicate_stars_gparts
(
struct
threadpool
*
tp
,
struct
spart
*
const
sparts
,
void
io_duplicate_stars_gparts
(
struct
threadpool
*
tp
,
struct
gpart
*
const
gparts
,
size_t
Nstars
,
struct
spart
*
const
sparts
,
size_t
Ndm
);
struct
gpart
*
const
gparts
,
size_t
Nstars
,
size_t
Ndm
);
void
io_check_output_fields
(
const
struct
swift_params
*
params
,
void
io_check_output_fields
(
const
struct
swift_params
*
params
,
const
long
long
N_total
[
3
]);
const
long
long
N_total
[
3
]);
...
...
src/engine.c
View file @
9435dfc5
...
@@ -82,8 +82,8 @@
...
@@ -82,8 +82,8 @@
#include
"single_io.h"
#include
"single_io.h"
#include
"sort_part.h"
#include
"sort_part.h"
#include
"sourceterms.h"
#include
"sourceterms.h"
#include
"statistics.h"
#include
"stars_io.h"
#include
"stars_io.h"
#include
"statistics.h"
#include
"timers.h"
#include
"timers.h"
#include
"tools.h"
#include
"tools.h"
#include
"units.h"
#include
"units.h"
...
@@ -152,23 +152,25 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
...
@@ -152,23 +152,25 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
/**
/**
* @brief Recursively add non-implicit star ghost tasks to a cell hierarchy.
* @brief Recursively add non-implicit star ghost tasks to a cell hierarchy.
*/
*/
void
engine_add_stars_ghosts
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
task
*
stars_ghost_in
,
void
engine_add_stars_ghosts
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
task
*
stars_ghost_out
)
{
struct
task
*
stars_ghost_in
,
struct
task
*
stars_ghost_out
)
{
/* If we have reached the leaf OR have to few particles to play with*/
/* If we have reached the leaf OR have to few particles to play with*/
if
(
!
c
->
split
||
c
->
scount
<
engine_max_sparts_per_ghost
)
{
if
(
!
c
->
split
||
c
->
scount
<
engine_max_sparts_per_ghost
)
{
/* Add the ghost task and its dependencies */
/* Add the ghost task and its dependencies */
struct
scheduler
*
s
=
&
e
->
sched
;
struct
scheduler
*
s
=
&
e
->
sched
;
c
->
stars_ghost
=
c
->
stars_ghost
=
scheduler_addtask
(
s
,
task_type_stars_ghost
,
scheduler_addtask
(
s
,
task_type_stars_ghost
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
task_subtype_none
,
0
,
0
,
c
,
NULL
);
scheduler_addunlock
(
s
,
stars_ghost_in
,
c
->
stars_ghost
);
scheduler_addunlock
(
s
,
stars_ghost_in
,
c
->
stars_ghost
);
scheduler_addunlock
(
s
,
c
->
stars_ghost
,
stars_ghost_out
);
scheduler_addunlock
(
s
,
c
->
stars_ghost
,
stars_ghost_out
);
}
else
{
}
else
{
/* Keep recursing */
/* Keep recursing */
for
(
int
k
=
0
;
k
<
8
;
k
++
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
engine_add_stars_ghosts
(
e
,
c
->
progeny
[
k
],
stars_ghost_in
,
stars_ghost_out
);
engine_add_stars_ghosts
(
e
,
c
->
progeny
[
k
],
stars_ghost_in
,
stars_ghost_out
);
}
}
}
}
...
@@ -443,9 +445,8 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
...
@@ -443,9 +445,8 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
scheduler_addtask
(
s
,
task_type_stars_ghost_out
,
task_subtype_none
,
0
,
scheduler_addtask
(
s
,
task_type_stars_ghost_out
,
task_subtype_none
,
0
,
/* implicit = */
1
,
c
,
NULL
);
/* implicit = */
1
,
c
,
NULL
);
engine_add_stars_ghosts
(
e
,
c
,
c
->
stars_ghost_in
,
c
->
stars_ghost_out
);
engine_add_stars_ghosts
(
e
,
c
,
c
->
stars_ghost_in
,
c
->
stars_ghost_out
);
}
}
}
else
{
/* We are above the super-cell so need to go deeper */
}
else
{
/* We are above the super-cell so need to go deeper */
/* Recurse. */
/* Recurse. */
if
(
c
->
split
)
if
(
c
->
split
)
...
@@ -473,8 +474,7 @@ void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
...
@@ -473,8 +474,7 @@ void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
/* And the gravity stuff */
/* And the gravity stuff */
if
(
is_with_self_gravity
||
is_with_external_gravity
)
if
(
is_with_self_gravity
||
is_with_external_gravity
)
engine_make_hierarchical_tasks_gravity
(
e
,
c
);
engine_make_hierarchical_tasks_gravity
(
e
,
c
);
if
(
is_with_stars
)
if
(
is_with_stars
)
engine_make_hierarchical_tasks_stars
(
e
,
c
);
engine_make_hierarchical_tasks_stars
(
e
,
c
);
}
}
}
}
...
@@ -2696,11 +2696,11 @@ void engine_make_starsloop_tasks_mapper(void *map_data, int num_elements,
...
@@ -2696,11 +2696,11 @@ void engine_make_starsloop_tasks_mapper(void *map_data, int num_elements,
/* Skip cells without star particles */
/* Skip cells without star particles */
if
(
ci
->
scount
==
0
)
continue
;
if
(
ci
->
scount
==
0
)
continue
;
/* If the cells is local build a self-interaction */
/* If the cells is local build a self-interaction */
if
(
ci
->
nodeID
==
nodeID
)
if
(
ci
->
nodeID
==
nodeID
)
scheduler_addtask
(
sched
,
task_type_self
,
task_subtype_stars_density
,
0
,
0
,
ci
,
scheduler_addtask
(
sched
,
task_type_self
,
task_subtype_stars_density
,
0
,
0
,
NULL
);
ci
,
NULL
);
/* Now loop over all the neighbours of this cell */
/* Now loop over all the neighbours of this cell */
for
(
int
ii
=
-
1
;
ii
<
2
;
ii
++
)
{
for
(
int
ii
=
-
1
;
ii
<
2
;
ii
++
)
{
...
@@ -2727,15 +2727,14 @@ void engine_make_starsloop_tasks_mapper(void *map_data, int num_elements,
...
@@ -2727,15 +2727,14 @@ void engine_make_starsloop_tasks_mapper(void *map_data, int num_elements,
/* Construct the pair task */
/* Construct the pair task */
const
int
sid
=
sortlistID
[(
kk
+
1
)
+
3
*
((
jj
+
1
)
+
3
*
(
ii
+
1
))];
const
int
sid
=
sortlistID
[(
kk
+
1
)
+
3
*
((
jj
+
1
)
+
3
*
(
ii
+
1
))];
scheduler_addtask
(
sched
,
task_type_pair
,
task_subtype_stars_density
,
sid
,
0
,
scheduler_addtask
(
sched
,
task_type_pair
,
task_subtype_stars_density
,
ci
,
cj
);
sid
,
0
,
ci
,
cj
);
}
}
}
}
}
}
}
}
}
}
/**
/**
* @brief Counts the tasks associated with one cell and constructs the links
* @brief Counts the tasks associated with one cell and constructs the links
*
*
...
@@ -2776,7 +2775,7 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
...
@@ -2776,7 +2775,7 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
engine_addlink
(
e
,
&
ci
->
grav
,
t
);
engine_addlink
(
e
,
&
ci
->
grav
,
t
);
}
}
if
(
t
->
subtype
==
task_subtype_stars_density
)
{
if
(
t
->
subtype
==
task_subtype_stars_density
)
{
engine_addlink
(
e
,
&
ci
->
stars_density
,
t
);
engine_addlink
(
e
,
&
ci
->
stars_density
,
t
);
}
}
/* Link pair tasks to cells. */
/* Link pair tasks to cells. */
...
@@ -3038,7 +3037,6 @@ static inline void engine_make_hydro_loops_dependencies(struct scheduler *sched,
...
@@ -3038,7 +3037,6 @@ static inline void engine_make_hydro_loops_dependencies(struct scheduler *sched,
scheduler_addunlock
(
sched
,
c
->
super_hydro
->
ghost_out
,
force
);
scheduler_addunlock
(
sched
,
c
->
super_hydro
->
ghost_out
,
force
);
}
}
#endif
#endif
/**
/**
* @brief Creates the dependency network for the stars tasks of a given cell.
* @brief Creates the dependency network for the stars tasks of a given cell.
...
@@ -3317,7 +3315,7 @@ void engine_make_extra_hydroloop_tasks_mapper(void *map_data, int num_elements,
...
@@ -3317,7 +3315,7 @@ void engine_make_extra_hydroloop_tasks_mapper(void *map_data, int num_elements,
* all the dependencies for that cell.
* all the dependencies for that cell.
*/
*/
void
engine_link_stars_tasks_mapper
(
void
*
map_data
,
int
num_elements
,
void
engine_link_stars_tasks_mapper
(
void
*
map_data
,
int
num_elements
,
void
*
extra_data
)
{
void
*
extra_data
)
{
struct
engine
*
e
=
(
struct
engine
*
)
extra_data
;
struct
engine
*
e
=
(
struct
engine
*
)
extra_data
;
struct
scheduler
*
sched
=
&
e
->
sched
;
struct
scheduler
*
sched
=
&
e
->
sched
;
...
@@ -3336,11 +3334,13 @@ void engine_link_stars_tasks_mapper(void *map_data, int num_elements,
...
@@ -3336,11 +3334,13 @@ void engine_link_stars_tasks_mapper(void *map_data, int num_elements,
/* Now, build all the dependencies for the stars */
/* Now, build all the dependencies for the stars */
engine_make_stars_loops_dependencies
(
sched
,
t
,
t
->
ci
);
engine_make_stars_loops_dependencies
(
sched
,
t
,
t
->
ci
);
scheduler_addunlock
(
sched
,
t
->
ci
->
stars_ghost_out
,
t
->
ci
->
super
->
end_force
);
scheduler_addunlock
(
sched
,
t
->
ci
->
stars_ghost_out
,
t
->
ci
->
super
->
end_force
);
}
}
/* Otherwise, pair interaction? */
/* Otherwise, pair interaction? */
else
if
(
t
->
type
==
task_type_pair
&&
t
->
subtype
==
task_subtype_stars_density
)
{
else
if
(
t
->
type
==
task_type_pair
&&
t
->
subtype
==
task_subtype_stars_density
)
{
/* Make all density tasks depend on the drift and the sorts. */
/* Make all density tasks depend on the drift and the sorts. */
if
(
t
->
ci
->
nodeID
==
engine_rank
)
if
(
t
->
ci
->
nodeID
==
engine_rank
)
...
@@ -3419,8 +3419,8 @@ void engine_maketasks(struct engine *e) {
...
@@ -3419,8 +3419,8 @@ void engine_maketasks(struct engine *e) {
struct
cell
*
cells
=
s
->
cells_top
;
struct
cell
*
cells
=
s
->
cells_top
;
const
int
nr_cells
=
s
->
nr_cells
;
const
int
nr_cells
=
s
->
nr_cells
;
const
ticks
tic
=
getticks
();
const
ticks
tic
=
getticks
();
/* Re-set the scheduler. */
/* Re-set the scheduler. */
scheduler_reset
(
sched
,
engine_estimate_nr_tasks
(
e
));
scheduler_reset
(
sched
,
engine_estimate_nr_tasks
(
e
));
ticks
tic2
=
getticks
();
ticks
tic2
=
getticks
();
...
@@ -3566,16 +3566,14 @@ void engine_maketasks(struct engine *e) {
...
@@ -3566,16 +3566,14 @@ void engine_maketasks(struct engine *e) {
if
(
e
->
policy
&
engine_policy_stars
)
if
(
e
->
policy
&
engine_policy_stars
)
threadpool_map
(
&
e
->
threadpool
,
engine_link_stars_tasks_mapper
,
sched
->
tasks
,
threadpool_map
(
&
e
->
threadpool
,
engine_link_stars_tasks_mapper
,
sched
->
tasks
,
sched
->
nr_tasks
,
sizeof
(
struct
task
),
0
,
e
);
sched
->
nr_tasks
,
sizeof
(
struct
task
),
0
,
e
);
if
(
e
->
verbose
)
if
(
e
->
verbose
)
message
(
"Linking stars tasks took %.3f %s (including reweight)."
,
message
(
"Linking stars tasks took %.3f %s (including reweight)."
,
clocks_from_ticks
(
getticks
()
-
tic2
),
clocks_getunit
());
clocks_from_ticks
(
getticks
()
-
tic2
),
clocks_getunit
());
#ifdef WITH_MPI
#ifdef WITH_MPI
if
(
e
->
policy
&
engine_policy_stars
)
if
(
e
->
policy
&
engine_policy_stars
)
error
(
"Cannot run stars with MPI"
);
error
(
"Cannot run stars with MPI"
);
/* Add the communication tasks if MPI is being used. */
/* Add the communication tasks if MPI is being used. */
if
(
e
->
policy
&
engine_policy_mpi
)
{
if
(
e
->
policy
&
engine_policy_mpi
)
{
...
@@ -3695,7 +3693,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
...
@@ -3695,7 +3693,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if
(
ci
->
nodeID
!=
engine_rank
)
error
(
"Non-local self task found"
);
if
(
ci
->
nodeID
!=
engine_rank
)
error
(
"Non-local self task found"
);
/* Activate the hydro drift */
/* Activate the hydro drift */
if
(
t
->
type
==
task_type_self
&&
t
->
subtype
==
task_subtype_density
)
{
if
(
t
->
type
==
task_type_self
&&
t
->
subtype
==
task_subtype_density
)
{
if
(
cell_is_active_hydro
(
ci
,
e
))
{
if
(
cell_is_active_hydro
(
ci
,
e
))
{
scheduler_activate
(
s
,
t
);
scheduler_activate
(
s
,
t
);
...
@@ -3737,10 +3735,10 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
...
@@ -3737,10 +3735,10 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
else
if
(
t
->
type
==
task_type_self
&&
else
if
(
t
->
type
==
task_type_self
&&
t
->
subtype
==
task_subtype_stars_density
)
{
t
->
subtype
==
task_subtype_stars_density
)
{
if
(
cell_is_active_stars
(
ci
,
e
))
{
if
(
cell_is_active_stars
(
ci
,
e
))
{
scheduler_activate
(
s
,
t
);
scheduler_activate
(
s
,
t
);
cell_activate_drift_part
(
ci
,
s
);
cell_activate_drift_part
(
ci
,
s
);
cell_activate_drift_gpart
(
ci
,
s
);
cell_activate_drift_gpart
(
ci
,
s
);
}
}
}
}
/* Store current values of dx_max and h_max. */
/* Store current values of dx_max and h_max. */
...
@@ -3791,15 +3789,16 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
...
@@ -3791,15 +3789,16 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if
((
t
->
subtype
==
task_subtype_density
||
if
((
t
->
subtype
==
task_subtype_density
||
t
->
subtype
==
task_subtype_gradient
||
t
->
subtype
==
task_subtype_gradient
||
t
->
subtype
==
task_subtype_force
||
t
->
subtype
==
task_subtype_force
||
t
->
subtype
==
task_subtype_stars_density
)
&&
t
->
subtype
==
task_subtype_stars_density
)
&&
((
ci_active_hydro
&&
ci
->
nodeID
==
engine_rank
)
||
((
ci_active_hydro
&&
ci
->
nodeID
==
engine_rank
)
||
(
cj_active_hydro
&&
cj
->
nodeID
==
engine_rank
)))
{
(
cj_active_hydro
&&
cj
->
nodeID
==
engine_rank
)))
{
scheduler_activate
(
s
,
t
);
scheduler_activate
(
s
,
t
);
/* Set the correct sorting flags */
/* Set the correct sorting flags */
if
(
t
->
type
==
task_type_pair
&&
(
t
->
subtype
==
task_subtype_density
||
if
(
t
->
type
==
task_type_pair
&&
t
->
subtype
==
task_subtype_stars_density
))
{
(
t
->
subtype
==
task_subtype_density
||
t
->
subtype
==
task_subtype_stars_density
))
{
/* Store some values. */
/* Store some values. */
atomic_or
(
&
ci
->
requires_sorts
,
1
<<
t
->
flags
);
atomic_or
(
&
ci
->
requires_sorts
,
1
<<
t
->
flags
);
...
@@ -3820,7 +3819,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
...
@@ -3820,7 +3819,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
/* Store current values of dx_max and h_max. */
/* Store current values of dx_max and h_max. */
else
if
(
t
->
type
==
task_type_sub_pair
&&
else
if
(
t
->
type
==
task_type_sub_pair
&&
(
t
->
subtype
==
task_subtype_density
||
(
t
->
subtype
==
task_subtype_density
||
t
->
subtype
==
task_subtype_stars_density
))
{
t
->
subtype
==
task_subtype_stars_density
))
{
cell_activate_subcell_hydro_tasks
(
t
->
ci
,
t
->
cj
,
s
);
cell_activate_subcell_hydro_tasks
(
t
->
ci
,
t
->
cj
,
s
);
}
}
}
}
...
@@ -3945,11 +3944,12 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
...
@@ -3945,11 +3944,12 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if
(
cell_need_rebuild_for_pair
(
ci
,
cj
))
*
rebuild_space
=
1
;
if
(
cell_need_rebuild_for_pair
(
ci
,
cj
))
*
rebuild_space
=
1
;
#ifdef WITH_MPI
#ifdef WITH_MPI
error
(
"MPI with stars not implemented"
);
error
(
"MPI with stars not implemented"
);
/* /\* Activate the send/recv tasks. *\/ */
/* /\* Activate the send/recv tasks. *\/ */
/* if (ci->nodeID != engine_rank) { */
/* if (ci->nodeID != engine_rank) { */