Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
f5fc9ffc
Commit
f5fc9ffc
authored
May 25, 2017
by
James Willis
Browse files
Merge branch 'master' into doself2-vectorisation
parents
8a07c59a
0c4ceb01
Changes
27
Expand all
Hide whitespace changes
Inline
Side-by-side
examples/DiscPatch/HydroStatic/plot.py
0 → 100644
View file @
f5fc9ffc
################################################################################
# This file is part of SWIFT.
# Copyright (c) 2017 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
##
# This script plots the Disc-Patch_*.hdf5 snapshots.
# It takes two (optional) parameters: the counter value of the first and last
# snapshot to plot (default: 0 81).
##
import
numpy
as
np
import
h5py
import
matplotlib
matplotlib
.
use
(
"Agg"
)
import
pylab
as
pl
import
glob
import
sys
# Parameters
surface_density
=
10.
scale_height
=
100.
z_disc
=
200.
utherm
=
20.2615290634
gamma
=
5.
/
3.
start
=
0
stop
=
81
if
len
(
sys
.
argv
)
>
1
:
start
=
int
(
sys
.
argv
[
1
])
if
len
(
sys
.
argv
)
>
2
:
stop
=
int
(
sys
.
argv
[
2
])
# Get the analytic solution for the density
def
get_analytic_density
(
x
):
return
0.5
*
surface_density
/
scale_height
/
\
np
.
cosh
(
(
x
-
z_disc
)
/
scale_height
)
**
2
# Get the analytic solution for the (isothermal) pressure
def
get_analytic_pressure
(
x
):
return
(
gamma
-
1.
)
*
utherm
*
get_analytic_density
(
x
)
# Get the data fields to plot from the snapshot file with the given name:
# snapshot time, z-coord, density, pressure, velocity norm
def
get_data
(
name
):
file
=
h5py
.
File
(
name
,
"r"
)
coords
=
np
.
array
(
file
[
"/PartType0/Coordinates"
])
rho
=
np
.
array
(
file
[
"/PartType0/Density"
])
u
=
np
.
array
(
file
[
"/PartType0/InternalEnergy"
])
v
=
np
.
array
(
file
[
"/PartType0/Velocities"
])
P
=
(
gamma
-
1.
)
*
rho
*
u
vtot
=
np
.
sqrt
(
v
[:,
0
]
**
2
+
v
[:,
1
]
**
2
+
v
[:,
2
]
**
2
)
return
float
(
file
[
"/Header"
].
attrs
[
"Time"
]),
coords
[:,
2
],
rho
,
P
,
vtot
# scan the folder for snapshot files and plot all of them (within the requested
# range)
for
f
in
sorted
(
glob
.
glob
(
"Disc-Patch_*.hdf5"
)):
num
=
int
(
f
[
-
8
:
-
5
])
if
num
<
start
or
num
>
stop
:
continue
print
"processing"
,
f
,
"..."
zrange
=
np
.
linspace
(
0.
,
400.
,
1000
)
time
,
z
,
rho
,
P
,
v
=
get_data
(
f
)
fig
,
ax
=
pl
.
subplots
(
3
,
1
,
sharex
=
True
)
ax
[
0
].
plot
(
z
,
rho
,
"r."
)
ax
[
0
].
plot
(
zrange
,
get_analytic_density
(
zrange
),
"k-"
)
ax
[
0
].
set_ylabel
(
"density"
)
ax
[
1
].
plot
(
z
,
v
,
"r."
)
ax
[
1
].
plot
(
zrange
,
np
.
zeros
(
len
(
zrange
)),
"k-"
)
ax
[
1
].
set_ylabel
(
"velocity norm"
)
ax
[
2
].
plot
(
z
,
P
,
"r."
)
ax
[
2
].
plot
(
zrange
,
get_analytic_pressure
(
zrange
),
"k-"
)
ax
[
2
].
set_xlim
(
0.
,
400.
)
ax
[
2
].
set_xlabel
(
"z"
)
ax
[
2
].
set_ylabel
(
"pressure"
)
pl
.
suptitle
(
"t = {0:.2f}"
.
format
(
time
))
pl
.
savefig
(
"{name}.png"
.
format
(
name
=
f
[:
-
5
]))
pl
.
close
()
examples/analyse_tasks.py
0 → 100755
View file @
f5fc9ffc
#!/usr/bin/env python
"""
Usage:
analsyse_tasks.py [options] input.dat
where input.dat is a thread info file for a step. Use the '-y interval' flag
of the swift command to create these.
The output is an analysis of the task timings, including deadtime per thread
and step, total amount of time spent for each task type, for the whole step
and per thread and the minimum and maximum times spent per task type.
This file is part of SWIFT.
Copyright (c) 2017 Peter W. Draper (p.w.draper@durham.ac.uk)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import
matplotlib
matplotlib
.
use
(
"Agg"
)
import
matplotlib.collections
as
collections
import
matplotlib.ticker
as
plticker
import
pylab
as
pl
import
sys
import
argparse
# Handle the command line.
parser
=
argparse
.
ArgumentParser
(
description
=
"Analyse task dumps"
)
parser
.
add_argument
(
"input"
,
help
=
"Thread data file (-y output)"
)
parser
.
add_argument
(
"-v"
,
"--verbose"
,
dest
=
"verbose"
,
help
=
"Verbose output (default: False)"
,
default
=
False
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
infile
=
args
.
input
# Tasks and subtypes. Indexed as in tasks.h.
TASKTYPES
=
[
"none"
,
"sort"
,
"self"
,
"pair"
,
"sub_self"
,
"sub_pair"
,
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift_part"
,
"drift_gpart"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
SUBTYPES
=
[
"none"
,
"density"
,
"gradient"
,
"force"
,
"grav"
,
"external_grav"
,
"tend"
,
"xv"
,
"rho"
,
"gpart"
,
"multipole"
,
"spart"
,
"count"
]
# Read input.
data
=
pl
.
loadtxt
(
infile
)
maxthread
=
int
(
max
(
data
[:,
0
]))
+
1
print
"# Maximum thread id:"
,
maxthread
# Recover the start and end time
full_step
=
data
[
0
,:]
tic_step
=
int
(
full_step
[
4
])
toc_step
=
int
(
full_step
[
5
])
CPU_CLOCK
=
float
(
full_step
[
-
1
])
/
1000.0
data
=
data
[
1
:,:]
if
args
.
verbose
:
print
"CPU frequency:"
,
CPU_CLOCK
*
1000.0
# Avoid start and end times of zero.
data
=
data
[
data
[:,
4
]
!=
0
]
data
=
data
[
data
[:,
5
]
!=
0
]
# Calculate the time range.
total_t
=
(
toc_step
-
tic_step
)
/
CPU_CLOCK
print
"# Data range: "
,
total_t
,
"ms"
# Correct times to relative values.
start_t
=
float
(
tic_step
)
data
[:,
4
]
-=
start_t
data
[:,
5
]
-=
start_t
tasks
=
{}
tasks
[
-
1
]
=
[]
for
i
in
range
(
maxthread
):
tasks
[
i
]
=
[]
# Gather into by thread data.
num_lines
=
pl
.
size
(
data
)
/
10
for
line
in
range
(
num_lines
):
thread
=
int
(
data
[
line
,
0
])
tic
=
int
(
data
[
line
,
4
])
/
CPU_CLOCK
toc
=
int
(
data
[
line
,
5
])
/
CPU_CLOCK
tasktype
=
int
(
data
[
line
,
1
])
subtype
=
int
(
data
[
line
,
2
])
tasks
[
thread
].
append
([
tic
,
toc
,
tasktype
,
subtype
])
# Sort by tic and gather used thread ids.
threadids
=
[]
for
i
in
range
(
maxthread
):
if
len
(
tasks
[
i
])
>
0
:
tasks
[
i
]
=
sorted
(
tasks
[
i
],
key
=
lambda
task
:
task
[
0
])
threadids
.
append
(
i
)
# Times per task.
print
"# Task times:"
print
"# {0:<16s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"
\
.
format
(
"type/subtype"
,
"count"
,
"minimum"
,
"maximum"
,
"sum"
,
"mean"
,
"percent"
)
alltasktimes
=
{}
for
i
in
threadids
:
tasktimes
=
{}
for
task
in
tasks
[
i
]:
key
=
TASKTYPES
[
task
[
2
]]
+
"/"
+
SUBTYPES
[
task
[
3
]]
dt
=
task
[
1
]
-
task
[
0
]
if
not
key
in
tasktimes
:
tasktimes
[
key
]
=
[]
tasktimes
[
key
].
append
(
dt
)
if
not
key
in
alltasktimes
:
alltasktimes
[
key
]
=
[]
alltasktimes
[
key
].
append
(
dt
)
print
"# Thread : "
,
i
for
key
in
sorted
(
tasktimes
.
keys
()):
taskmin
=
min
(
tasktimes
[
key
])
taskmax
=
max
(
tasktimes
[
key
])
tasksum
=
sum
(
tasktimes
[
key
])
print
"{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
key
,
len
(
tasktimes
[
key
]),
taskmin
,
taskmax
,
tasksum
,
tasksum
/
len
(
tasktimes
[
key
]),
tasksum
/
total_t
*
100.0
)
print
print
"# All threads : "
for
key
in
sorted
(
alltasktimes
.
keys
()):
taskmin
=
min
(
alltasktimes
[
key
])
taskmax
=
max
(
alltasktimes
[
key
])
tasksum
=
sum
(
alltasktimes
[
key
])
print
"{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
key
,
len
(
alltasktimes
[
key
]),
taskmin
,
taskmax
,
tasksum
,
tasksum
/
len
(
alltasktimes
[
key
]),
tasksum
/
(
len
(
threadids
)
*
total_t
)
*
100.0
)
print
# Dead times.
print
"# Deadtimes:"
print
"# no. : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"
\
.
format
(
"count"
,
"minimum"
,
"maximum"
,
"sum"
,
"mean"
,
"percent"
)
alldeadtimes
=
[]
for
i
in
threadids
:
deadtimes
=
[]
last
=
0
for
task
in
tasks
[
i
]:
dt
=
task
[
0
]
-
last
deadtimes
.
append
(
dt
)
last
=
task
[
1
]
dt
=
total_t
-
last
deadtimes
.
append
(
dt
)
deadmin
=
min
(
deadtimes
)
deadmax
=
max
(
deadtimes
)
deadsum
=
sum
(
deadtimes
)
print
"thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
i
,
len
(
deadtimes
),
deadmin
,
deadmax
,
deadsum
,
deadsum
/
len
(
deadtimes
),
deadsum
/
total_t
*
100.0
)
alldeadtimes
.
extend
(
deadtimes
)
deadmin
=
min
(
alldeadtimes
)
deadmax
=
max
(
alldeadtimes
)
deadsum
=
sum
(
alldeadtimes
)
print
"all : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"
\
.
format
(
len
(
alldeadtimes
),
deadmin
,
deadmax
,
deadsum
,
deadsum
/
len
(
alldeadtimes
),
deadsum
/
(
len
(
threadids
)
*
total_t
)
*
100.0
)
print
sys
.
exit
(
0
)
examples/analyse_tasks_MPI.py
0 → 100755
View file @
f5fc9ffc
#!/usr/bin/env python
"""
Usage:
analsyse_tasks_MPI.py [options] input.dat
where input.dat is a thread info file for an MPI step. Use the '-y interval'
flag of the swift command to create these.
The output is an analysis of the task timings, including deadtime per thread
and step, total amount of time spent for each task type, for the whole step
and per thread and the minimum and maximum times spent per task type.
This file is part of SWIFT.
Copyright (c) 2017 Peter W. Draper (p.w.draper@durham.ac.uk)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import
matplotlib
matplotlib
.
use
(
"Agg"
)
import
matplotlib.collections
as
collections
import
matplotlib.ticker
as
plticker
import
pylab
as
pl
import
sys
import
argparse
# Handle the command line.
parser
=
argparse
.
ArgumentParser
(
description
=
"Analyse task dumps"
)
parser
.
add_argument
(
"input"
,
help
=
"Thread data file (-y output)"
)
parser
.
add_argument
(
"-v"
,
"--verbose"
,
dest
=
"verbose"
,
help
=
"Verbose output (default: False)"
,
default
=
False
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
infile
=
args
.
input
# Tasks and subtypes. Indexed as in tasks.h.
TASKTYPES
=
[
"none"
,
"sort"
,
"self"
,
"pair"
,
"sub_self"
,
"sub_pair"
,
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift_part"
,
"drift_gpart"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
SUBTYPES
=
[
"none"
,
"density"
,
"gradient"
,
"force"
,
"grav"
,
"external_grav"
,
"tend"
,
"xv"
,
"rho"
,
"gpart"
,
"multipole"
,
"spart"
,
"count"
]
# Read input.
data
=
pl
.
loadtxt
(
infile
)
# Get the CPU clock to convert ticks into milliseconds.
full_step
=
data
[
0
,:]
CPU_CLOCK
=
float
(
full_step
[
-
1
])
/
1000.0
if
args
.
verbose
:
print
"# CPU frequency:"
,
CPU_CLOCK
*
1000.0
nranks
=
int
(
max
(
data
[:,
0
]))
+
1
print
"# Number of ranks:"
,
nranks
maxthread
=
int
(
max
(
data
[:,
1
]))
+
1
print
"# Maximum thread id:"
,
maxthread
# Avoid start and end times of zero.
sdata
=
data
[
data
[:,
5
]
!=
0
]
sdata
=
data
[
data
[:,
6
]
!=
0
]
# Now we process all the ranks.
for
rank
in
range
(
nranks
):
print
"# Rank"
,
rank
data
=
sdata
[
sdata
[:,
0
]
==
rank
]
# Recover the start and end time
full_step
=
data
[
0
,:]
tic_step
=
int
(
full_step
[
5
])
toc_step
=
int
(
full_step
[
6
])
data
=
data
[
1
:,:]
# Avoid start and end times of zero.
data
=
data
[
data
[:,
5
]
!=
0
]
data
=
data
[
data
[:,
6
]
!=
0
]
# Calculate the time range.
total_t
=
(
toc_step
-
tic_step
)
/
CPU_CLOCK
print
"# Data range: "
,
total_t
,
"ms"
# Correct times to relative values.
start_t
=
float
(
tic_step
)
data
[:,
5
]
-=
start_t
data
[:,
6
]
-=
start_t
end_t
=
(
toc_step
-
start_t
)
/
CPU_CLOCK
tasks
=
{}
tasks
[
-
1
]
=
[]
for
i
in
range
(
maxthread
):
tasks
[
i
]
=
[]
# Gather into by thread data.
num_lines
=
pl
.
size
(
data
)
/
12
for
line
in
range
(
num_lines
):
thread
=
int
(
data
[
line
,
1
])
tic
=
int
(
data
[
line
,
5
])
/
CPU_CLOCK
toc
=
int
(
data
[
line
,
6
])
/
CPU_CLOCK
tasktype
=
int
(
data
[
line
,
2
])
subtype
=
int
(
data
[
line
,
3
])
tasks
[
thread
].
append
([
tic
,
toc
,
tasktype
,
subtype
])
# Sort by tic and gather used threads.
threadids
=
[]
for
i
in
range
(
maxthread
):
tasks
[
i
]
=
sorted
(
tasks
[
i
],
key
=
lambda
task
:
task
[
0
])
threadids
.
append
(
i
)
# Times per task.
print
"# Task times:"
print
"# {0:<16s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"
\
.
format
(
"type/subtype"
,
"count"
,
"minimum"
,
"maximum"
,
"sum"
,
"mean"
,
"percent"
)
alltasktimes
=
{}
for
i
in
threadids
:
tasktimes
=
{}
for
task
in
tasks
[
i
]:
key
=
TASKTYPES
[
task
[
2
]]
+
"/"
+
SUBTYPES
[
task
[
3
]]
dt
=
task
[
1
]
-
task
[
0
]
if
not
key
in
tasktimes
:
tasktimes
[
key
]
=
[]
tasktimes
[
key
].
append
(
dt
)
if
not
key
in
alltasktimes
:
alltasktimes
[
key
]
=
[]
alltasktimes
[
key
].
append
(
dt
)
print
"# Thread : "
,
i
for
key
in
sorted
(
tasktimes
.
keys
()):
taskmin
=
min
(
tasktimes
[
key
])
taskmax
=
max
(
tasktimes
[
key
])
tasksum
=
sum
(
tasktimes
[
key
])
print
"{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
key
,
len
(
tasktimes
[
key
]),
taskmin
,
taskmax
,
tasksum
,
tasksum
/
len
(
tasktimes
[
key
]),
tasksum
/
total_t
*
100.0
)
print
print
"# All threads : "
for
key
in
sorted
(
alltasktimes
.
keys
()):
taskmin
=
min
(
alltasktimes
[
key
])
taskmax
=
max
(
alltasktimes
[
key
])
tasksum
=
sum
(
alltasktimes
[
key
])
print
"{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
key
,
len
(
alltasktimes
[
key
]),
taskmin
,
taskmax
,
tasksum
,
tasksum
/
len
(
alltasktimes
[
key
]),
tasksum
/
(
len
(
threadids
)
*
total_t
)
*
100.0
)
print
# Dead times.
print
"# Deadtimes:"
print
"# no. : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"
\
.
format
(
"count"
,
"minimum"
,
"maximum"
,
"sum"
,
"mean"
,
"percent"
)
alldeadtimes
=
[]
for
i
in
threadids
:
deadtimes
=
[]
last
=
0
for
task
in
tasks
[
i
]:
dt
=
task
[
0
]
-
last
deadtimes
.
append
(
dt
)
last
=
task
[
1
]
dt
=
total_t
-
last
deadtimes
.
append
(
dt
)
deadmin
=
min
(
deadtimes
)
deadmax
=
max
(
deadtimes
)
deadsum
=
sum
(
deadtimes
)
print
"thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"
\
.
format
(
i
,
len
(
deadtimes
),
deadmin
,
deadmax
,
deadsum
,
deadsum
/
len
(
deadtimes
),
deadsum
/
total_t
*
100.0
)
alldeadtimes
.
extend
(
deadtimes
)
deadmin
=
min
(
alldeadtimes
)
deadmax
=
max
(
alldeadtimes
)
deadsum
=
sum
(
alldeadtimes
)
print
"all : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"
\
.
format
(
len
(
alldeadtimes
),
deadmin
,
deadmax
,
deadsum
,
deadsum
/
len
(
alldeadtimes
),
deadsum
/
(
len
(
threadids
)
*
total_t
)
*
100.0
)
print
sys
.
exit
(
0
)
examples/parameter_example.yml
View file @
f5fc9ffc
...
...
@@ -107,6 +107,12 @@ DiscPatchPotential:
timestep_mult
:
0.03
# Dimensionless pre-factor for the time-step condition
growth_time
:
5.
# (Optional) Time for the disc to grow to its final size (multiple of the dynamical time)
# Sine Wave potential
SineWavePotential
:
amplitude
:
10.
# Amplitude of the sine wave (internal units)
timestep_limit
:
1.
# Time-step dimensionless pre-factor.
growth_time
:
0.
# (Optional) Time for the potential to grow to its final size.
# Parameters related to cooling function ----------------------------------------------
# Constant du/dt cooling function
...
...
examples/plot_tasks.py
View file @
f5fc9ffc
...
...
@@ -89,9 +89,10 @@ pl.rcParams.update(PLOT_PARAMS)
# Tasks and subtypes. Indexed as in tasks.h.
TASKTYPES
=
[
"none"
,
"sort"
,
"self"
,
"pair"
,
"sub_self"
,
"sub_pair"
,
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift_part"
,
"drift_gpart"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
SUBTYPES
=
[
"none"
,
"density"
,
"gradient"
,
"force"
,
"grav"
,
"external_grav"
,
"tend"
,
"xv"
,
"rho"
,
"gpart"
,
"multipole"
,
"spart"
,
"count"
]
...
...
@@ -105,14 +106,14 @@ FULLTYPES = ["self/force", "self/density", "self/grav", "sub_self/force",
# A number of colours for the various types. Recycled when there are
# more task types than colours...
colours
=
[
"cyan"
,
"lightgray"
,
"darkblue"
,
"yellow"
,
"tan"
,
"dodgerblue"
,
"sienna"
,
"aquamarine"
,
"bisque"
,
"blue"
,
"green"
,
"brown"
,
"purple"
,
"mocassin"
,
"olivedrab"
,
"chartreuse"
,
"darksage"
,
"darkgreen"
,
"green"
,
"mediumseagreen"
,
"mediumaquamarine"
,
"darkslategrey"
,
"mediumturquoise"
,
"black"
,
"cadetblue"
,
"skyblue"
,
"red"
,
"slategray"
,
"gold"
,
"slateblue"
,
"blueviolet"
,
"mediumorchid"
,
"firebrick"
,
"magenta"
,
"hotpink"
,
"pink"
]
"sienna"
,
"aquamarine"
,
"bisque"
,
"blue"
,
"green"
,
"lightgreen"
,
"brown"
,
"purple"
,
"moccasin"
,
"olivedrab"
,
"chartreuse"
,
"darksage"
,
"darkgreen"
,
"green"
,
"mediumseagreen"
,
"mediumaquamarine"
,
"darkslategrey"
,
"mediumturquoise"
,
"black"
,
"cadetblue"
,
"skyblue"
,
"red"
,
"slategray"
,
"gold"
,
"slateblue"
,
"blueviolet"
,
"mediumorchid"
,
"firebrick"
,
"magenta"
,
"hotpink"
,
"pink"
,
"orange"
,
"lightgreen"
]
maxcolours
=
len
(
colours
)
# Set colours of task/subtype.
...
...
@@ -134,9 +135,9 @@ for task in SUBTYPES:
# For fiddling with colours...
if
args
.
verbose
:
print
"#Selected colours:"
for
task
in
TASKCOLOURS
.
keys
():
for
task
in
sorted
(
TASKCOLOURS
.
keys
()
)
:
print
"# "
+
task
+
": "
+
TASKCOLOURS
[
task
]
for
task
in
SUBCOLOURS
.
keys
():
for
task
in
sorted
(
SUBCOLOURS
.
keys
()
)
:
print
"# "
+
task
+
": "
+
SUBCOLOURS
[
task
]
# Read input.
...
...
@@ -161,7 +162,7 @@ data = data[data[:,5] != 0]
# Calculate the time range, if not given.
delta_t
=
delta_t
*
CPU_CLOCK
if
delta_t
==
0
:
dt
=
max
(
data
[:,
5
])
-
min
(
data
[:,
4
])
dt
=
toc_step
-
tic_step
if
dt
>
delta_t
:
delta_t
=
dt
print
"Data range: "
,
delta_t
/
CPU_CLOCK
,
"ms"
...
...
examples/plot_tasks_MPI.py
View file @
f5fc9ffc
...
...
@@ -95,9 +95,10 @@ pl.rcParams.update(PLOT_PARAMS)
# Tasks and subtypes. Indexed as in tasks.h.
TASKTYPES
=
[
"none"
,
"sort"
,
"self"
,
"pair"
,
"sub_self"
,
"sub_pair"
,
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
"init_grav"
,
"ghost"
,
"extra_ghost"
,
"drift_part"
,
"drift_gpart"
,
"kick1"
,
"kick2"
,
"timestep"
,
"send"
,
"recv"
,
"grav_top_level"
,
"grav_long_range"
,
"grav_mm"
,
"grav_down"
,
"cooling"
,
"sourceterms"
,
"count"
]
SUBTYPES
=
[
"none"
,
"density"
,
"gradient"
,
"force"
,
"grav"
,
"external_grav"
,
"tend"
,
"xv"
,
"rho"
,
"gpart"
,
"multipole"
,
"spart"
,
"count"
]
...
...
@@ -111,15 +112,14 @@ FULLTYPES = ["self/force", "self/density", "self/grav", "sub_self/force",
# A number of colours for the various types. Recycled when there are
# more task types than colours...
colours
=
[
"cyan"
,
"lightgray"
,
"darkblue"
,
"yellow"
,
"tan"
,
"dodgerblue"
,
"sienna"
,
"aquamarine"
,
"bisque"
,
"blue"
,
"green"
,
"
brow
n"
,
"purple"
,
"moca
s
sin"
,
"olivedrab"
,
"chartreuse"
,
"darksage"
,
"darkgreen"
,
"green"
,
"mediumseagreen"
,
"mediumaquamarine"
,
"darkslategrey"
,
"mediumturquoise"
,
"black"
,
"cadetblue"
,
"skyblue"
,
"
red"
,
"slategray"
,
"gold"
,
"slateblue"
,
"blueviolet
"
,
"
mediumorchid"
,
"firebrick"
,
"magenta"
,
"hotpink"
,
"pin
k"
,
"orange"
,
"lightgreen"
]
"sienna"
,
"aquamarine"
,
"bisque"
,
"blue"
,
"green"
,
"
lightgree
n"
,
"brown"
,
"purple"
,
"moc
c
asin"
,
"olivedrab"
,
"chartreuse"
,
"darksage"
,
"darkgreen"
,
"green"
,
"mediumseagreen"
,
"mediumaquamarine"
,
"darkslategrey"
,
"mediumturquoise"
,
"
black"
,
"cadetblue"
,
"skyblue"
,
"red"
,
"slategray"
,
"gold
"
,
"
slateblue"
,
"blueviolet"
,
"mediumorchid"
,
"firebric
k"
,
"magenta"
,
"hotpink"
,
"pink"
,
"orange"
,
"lightgreen"
]
maxcolours
=
len
(
colours
)
# Set colours of task/subtype.
...
...
@@ -141,9 +141,9 @@ for task in SUBTYPES:
# For fiddling with colours...
if
args
.
verbose
:
print
"#Selected colours:"
for
task
in
TASKCOLOURS
.
keys
():
for
task
in
sorted
(
TASKCOLOURS
.
keys
()
)
: