Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
swiftmpistepsim
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
SWIFT
swiftmpistepsim
Commits
9713ee72
Commit
9713ee72
authored
5 years ago
by
Peter W. Draper
Browse files
Options
Downloads
Patches
Plain Diff
Let there be life, albeit synchronous and very slowwww
parent
38d2a38b
No related branches found
No related tags found
2 merge requests
!11
Draft: Fast one-sided MPI version
,
!8
Draft: RDMA version with wrapped infinity calls
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
swiftmpirdmastepsim.c
+6
-24
6 additions, 24 deletions
swiftmpirdmastepsim.c
with
6 additions
and
24 deletions
swiftmpirdmastepsim.c
+
6
−
24
View file @
9713ee72
...
@@ -82,9 +82,6 @@ static MPI_Comm subtypeMPI_comms[task_subtype_count];
...
@@ -82,9 +82,6 @@ static MPI_Comm subtypeMPI_comms[task_subtype_count];
static
MPI_Win
mpi_window
[
task_subtype_count
];
static
MPI_Win
mpi_window
[
task_subtype_count
];
static
BLOCKTYPE
*
mpi_ptr
[
task_subtype_count
];
static
BLOCKTYPE
*
mpi_ptr
[
task_subtype_count
];
/* Size of a board for a rank. */
static
size_t
board_size
=
0
;
/* The local send queue. */
/* The local send queue. */
static
struct
mpiuse_log_entry
**
volatile
send_queue
;
static
struct
mpiuse_log_entry
**
volatile
send_queue
;
static
int
volatile
nr_send
=
0
;
static
int
volatile
nr_send
=
0
;
...
@@ -153,7 +150,7 @@ static int datacheck_test(size_t size, void *data) {
...
@@ -153,7 +150,7 @@ static int datacheck_test(size_t size, void *data) {
*/
*/
static
void
*
send_thread
(
void
*
arg
)
{
static
void
*
send_thread
(
void
*
arg
)
{
message
(
"%d: send thread starts"
,
*
((
int
*
)
arg
));
message
(
"%d: send thread starts
with %d messages
"
,
*
((
int
*
)
arg
)
,
nr_send
);
ticks
starttics
=
getticks
();
ticks
starttics
=
getticks
();
for
(
int
k
=
0
;
k
<
nr_send
;
k
++
)
{
for
(
int
k
=
0
;
k
<
nr_send
;
k
++
)
{
...
@@ -216,8 +213,6 @@ static void *send_thread(void *arg) {
...
@@ -216,8 +213,6 @@ static void *send_thread(void *arg) {
while
(
flag
==
0
)
{
while
(
flag
==
0
)
{
MPI_Test
(
&
request
,
&
flag
,
MPI_STATUS_IGNORE
);
MPI_Test
(
&
request
,
&
flag
,
MPI_STATUS_IGNORE
);
}
}
//message("waiting for unlock on %d/%d (%d)", log->otherrank, log->subtype,
// newval[0]);
}
}
message
(
"sent and received... %d/%d/%d"
,
k
,
nr_send
,
((
char
*
)
log
->
data
)[
0
]);
message
(
"sent and received... %d/%d/%d"
,
k
,
nr_send
,
((
char
*
)
log
->
data
)[
0
]);
...
@@ -252,27 +247,16 @@ static void *recv_thread(void *arg) {
...
@@ -252,27 +247,16 @@ static void *recv_thread(void *arg) {
while
(
todo_recv
>
0
)
{
while
(
todo_recv
>
0
)
{
for
(
int
n
=
0
;
n
<
nr_ranks
;
n
++
)
{
for
(
int
n
=
0
;
n
<
nr_ranks
;
n
++
)
{
if
(
todo_recv
<=
0
)
break
;
if
(
todo_recv
<=
0
)
break
;
if
(
n
==
myrank
)
continue
;
if
(
n
==
myrank
)
continue
;
for
(
int
j
=
0
;
j
<
task_subtype_count
;
j
++
)
{
for
(
int
j
=
0
;
j
<
task_subtype_count
;
j
++
)
{
if
(
todo_recv
<=
0
)
break
;
if
(
todo_recv
<=
0
)
break
;
MPI_Win_flush_all
(
mpi_window
[
j
]);
// XXX emergency measure
MPI_Win_flush_all
(
mpi_window
[
j
]);
// XXX emergency measure
BLOCKTYPE
lockval
=
mpi_ptr
[
j
][
n
*
MESSAGE_SIZE
];
BLOCKTYPE
lockval
=
mpi_ptr
[
j
][
n
*
MESSAGE_SIZE
];
message
(
"lockval check %d/%d at %zd: lockval %zd"
,
n
,
j
,
n
*
MESSAGE_SIZE
,
lockval
);
for
(
BLOCKTYPE
kk
=
n
*
MESSAGE_SIZE
;
kk
<
(
n
+
1
)
*
MESSAGE_SIZE
;
kk
++
)
{
if
(
mpi_ptr
[
j
][
kk
]
!=
0
)
{
message
(
"non-zero %zd at %zd"
,
mpi_ptr
[
j
][
kk
],
kk
);
}
}
if
(
lockval
==
UNLOCKED
)
{
if
(
lockval
==
UNLOCKED
)
{
message
(
"unlock message %d/%d at %zd: lockval %zd, possibles: %d"
,
n
,
j
,
n
*
MESSAGE_SIZE
,
lockval
,
todo_recv
);
/* We have a message waiting to be handled, find the log. */
/* We have a message waiting to be handled, find the log. */
int
found
=
0
;
int
found
=
0
;
for
(
int
k
=
0
;
k
<
nr_recv
;
k
++
)
{
for
(
int
k
=
0
;
k
<
nr_recv
;
k
++
)
{
...
@@ -300,8 +284,6 @@ static void *recv_thread(void *arg) {
...
@@ -300,8 +284,6 @@ static void *recv_thread(void *arg) {
mpi_ptr
[
j
][
n
*
MESSAGE_SIZE
]
=
LOCKED
;
mpi_ptr
[
j
][
n
*
MESSAGE_SIZE
]
=
LOCKED
;
break
;
break
;
}
else
{
message
(
"%d miss: %d/%d/%d"
,
k
,
log
->
otherrank
,
log
->
subtype
,
log
->
done
);
}
}
}
}
if
(
!
found
)
{
if
(
!
found
)
{
...
@@ -318,9 +300,8 @@ static void *recv_thread(void *arg) {
...
@@ -318,9 +300,8 @@ static void *recv_thread(void *arg) {
}
}
}
}
if
(
verbose
)
message
(
"took %.3f %s."
,
clocks_from_ticks
(
getticks
()
-
starttics
),
message
(
"took %.3f %s."
,
clocks_from_ticks
(
getticks
()
-
starttics
),
clocks_getunit
());
clocks_getunit
());
/* Thread exits. */
/* Thread exits. */
return
NULL
;
return
NULL
;
...
@@ -379,7 +360,8 @@ static size_t pick_logs() {
...
@@ -379,7 +360,8 @@ static size_t pick_logs() {
qsort
(
recv_queue
,
nr_recv
,
sizeof
(
struct
mpiuse_log_entry
*
),
cmp_logs
);
qsort
(
recv_queue
,
nr_recv
,
sizeof
(
struct
mpiuse_log_entry
*
),
cmp_logs
);
qsort
(
send_queue
,
nr_send
,
sizeof
(
struct
mpiuse_log_entry
*
),
cmp_logs
);
qsort
(
send_queue
,
nr_send
,
sizeof
(
struct
mpiuse_log_entry
*
),
cmp_logs
);
if
(
verbose
)
message
(
"maxsize = %zd"
,
maxsize
);
if
(
verbose
)
message
(
"maxsize = %zd, nr_send = %d, nr_recv = %d"
,
maxsize
,
nr_send
,
nr_recv
);
return
maxsize
;
return
maxsize
;
}
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment