Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
b6d45870
Commit
b6d45870
authored
Nov 14, 2017
by
Peter W. Draper
Browse files
Formatting
parent
75130af8
Changes
3
Hide whitespace changes
Inline
Side-by-side
src/partition.c
View file @
b6d45870
...
...
@@ -384,8 +384,8 @@ static void pick_metis(struct space *s, int nregions, int *vertexw, int *edgew,
* weights_v, NULL, weights_e);
*/
if
(
METIS_PartGraphKway
(
&
idx_ncells
,
&
one
,
xadj
,
adjncy
,
weights_v
,
NULL
,
weights_e
,
&
idx_nregions
,
NULL
,
NULL
,
options
,
&
objval
,
regionid
)
!=
METIS_OK
)
weights_e
,
&
idx_nregions
,
NULL
,
NULL
,
options
,
&
objval
,
regionid
)
!=
METIS_OK
)
error
(
"Call to METIS_PartGraphKway failed."
);
/* Check that the regionids are ok. */
...
...
@@ -519,8 +519,7 @@ static void repart_edge_metis(int partweights, int bothweights, int nodeID,
struct
task
*
t
=
&
tasks
[
j
];
/* Skip un-interesting tasks. */
if
(
t
->
cost
==
0
)
continue
;
if
(
t
->
cost
==
0
)
continue
;
/* Get the task weight. */
int
w
=
t
->
cost
*
wscale
;
...
...
src/scheduler.c
View file @
b6d45870
...
...
@@ -1317,50 +1317,46 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
if
(
t
->
subtype
==
task_subtype_tend
)
{
t
->
buff
=
malloc
(
sizeof
(
struct
pcell_step
)
*
t
->
ci
->
pcell_size
);
cell_pack_end_step
(
t
->
ci
,
t
->
buff
);
if
((
t
->
ci
->
pcell_size
*
sizeof
(
struct
pcell_step
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
buff
,
t
->
ci
->
pcell_size
*
sizeof
(
struct
pcell_step
),
MPI_BYTE
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
if
((
t
->
ci
->
pcell_size
*
sizeof
(
struct
pcell_step
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
buff
,
t
->
ci
->
pcell_size
*
sizeof
(
struct
pcell_step
),
MPI_BYTE
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
else
err
=
MPI_Issend
(
t
->
buff
,
t
->
ci
->
pcell_size
*
sizeof
(
struct
pcell_step
),
MPI_BYTE
,
t
->
c
j
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Issend
(
t
->
buff
,
t
->
c
i
->
pcell_size
*
sizeof
(
struct
pcell_step
)
,
MPI_BYTE
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
}
else
if
(
t
->
subtype
==
task_subtype_xv
||
t
->
subtype
==
task_subtype_rho
||
t
->
subtype
==
task_subtype_gradient
)
{
if
((
t
->
ci
->
count
*
sizeof
(
struct
part
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
ci
->
parts
,
t
->
ci
->
count
,
part_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Isend
(
t
->
ci
->
parts
,
t
->
ci
->
count
,
part_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
else
err
=
MPI_Issend
(
t
->
ci
->
parts
,
t
->
ci
->
count
,
part_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Issend
(
t
->
ci
->
parts
,
t
->
ci
->
count
,
part_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
// message( "sending %i parts with tag=%i from %i to %i." ,
// t->ci->count , t->flags , s->nodeID , t->cj->nodeID );
// fflush(stdout);
}
else
if
(
t
->
subtype
==
task_subtype_gpart
)
{
if
((
t
->
ci
->
gcount
*
sizeof
(
struct
gpart
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
ci
->
gparts
,
t
->
ci
->
gcount
,
gpart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Isend
(
t
->
ci
->
gparts
,
t
->
ci
->
gcount
,
gpart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
else
err
=
MPI_Issend
(
t
->
ci
->
gparts
,
t
->
ci
->
gcount
,
gpart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Issend
(
t
->
ci
->
gparts
,
t
->
ci
->
gcount
,
gpart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
}
else
if
(
t
->
subtype
==
task_subtype_spart
)
{
if
((
t
->
ci
->
scount
*
sizeof
(
struct
spart
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
ci
->
sparts
,
t
->
ci
->
scount
,
spart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Isend
(
t
->
ci
->
sparts
,
t
->
ci
->
scount
,
spart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
else
err
=
MPI_Issend
(
t
->
ci
->
sparts
,
t
->
ci
->
scount
,
spart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
err
=
MPI_Issend
(
t
->
ci
->
sparts
,
t
->
ci
->
scount
,
spart_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
}
else
if
(
t
->
subtype
==
task_subtype_multipole
)
{
if
((
t
->
ci
->
scount
*
sizeof
(
struct
gravity_tensors
))
>
s
->
mpi_message_limit
)
if
((
t
->
ci
->
scount
*
sizeof
(
struct
gravity_tensors
))
>
s
->
mpi_message_limit
)
err
=
MPI_Isend
(
t
->
ci
->
multipole
,
1
,
multipole_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
else
err
=
MPI_Issend
(
t
->
ci
->
multipole
,
1
,
multipole_mpi_type
,
t
->
cj
->
nodeID
,
t
->
flags
,
MPI_COMM_WORLD
,
&
t
->
req
);
...
...
@@ -1368,7 +1364,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
error
(
"Unknown communication sub-type"
);
}
if
(
err
!=
MPI_SUCCESS
)
{
mpi_error
(
err
,
"Failed to emit isend for particle data."
);
mpi_error
(
err
,
"Failed to emit isend for particle data."
);
}
qid
=
0
;
#else
...
...
src/scheduler.h
View file @
b6d45870
...
...
@@ -102,7 +102,7 @@ struct scheduler {
/* The node we are working on. */
int
nodeID
;
/* Maximum size of task messages, in bytes, to sent using non-buffered
* MPI. */
size_t
mpi_message_limit
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment