diff --git a/tools/analyse_dump_cells.py b/tools/analyse_dump_cells.py
index 2adfaf319e9c0da33f86a6158da68e6620c47361..586a60a76afb7c1b8cb9644c8c9b9e9fb7f1fb0c 100755
--- a/tools/analyse_dump_cells.py
+++ b/tools/analyse_dump_cells.py
@@ -47,13 +47,13 @@ mpicol = 20
 
 #  Command-line arguments.
 if len(sys.argv) < 5:
-    print "usage: ", sys.argv[0], " nx ny nz cell1.dat cell2.dat ..."
+    print("usage: ", sys.argv[0], " nx ny nz cell1.dat cell2.dat ...")
     sys.exit(1)
 nx = int(sys.argv[1])
 ny = int(sys.argv[2])
 nz = int(sys.argv[3])
 
-print "# x y z onedge"
+print("# x y z onedge")
 allactives = []
 onedge = 0
 tcount = 0
@@ -116,13 +116,13 @@ for i in range(4, len(sys.argv)):
                         count = count + 1
         if count < 27:
             onedge = onedge + 1
-            print active[3][0], active[3][1], active[3][2], 1
+            print(active[3][0], active[3][1], active[3][2], 1)
         else:
-            print active[3][0], active[3][1], active[3][2], 0
+            print(active[3][0], active[3][1], active[3][2], 0)
 
     allactives.extend(actives)
 
-print "# top cells: ", tcount, " active: ", len(allactives), " on edge: ", onedge
+print("# top cells: ", tcount, " active: ", len(allactives), " on edge: ", onedge)
 
 sys.exit(0)
 
diff --git a/tools/analyse_runtime.py b/tools/analyse_runtime.py
index 0d99c514279daa533a64a1ba53ea20502891500a..199ca6ad26bb96980edee028254fd7e95618d22d 100755
--- a/tools/analyse_runtime.py
+++ b/tools/analyse_runtime.py
@@ -70,7 +70,7 @@ lastline = ''
 for i in range(num_files):
 
     filename = sys.argv[i + 1]
-    print "Analysing", filename
+    print("Analysing", filename)
 
     # Open stdout file
     file = open(filename, 'r')
@@ -98,9 +98,9 @@ times /= 1000.
 
 # Total time
 total_measured_time = np.sum(times)
-print "\nTotal measured time: %.3f s"%total_measured_time
+print("\nTotal measured time: %.3f s"%total_measured_time)
 
-print "Total time:", total_time, "s\n"
+print("Total time:", total_time, "s\n")
 
 # Ratios
 time_ratios = times / total_time
@@ -127,7 +127,7 @@ important_times = [0.]
 important_ratios = [0.,]
 important_labels = ['Others (all below %.1f\%%)'%(threshold*100), ]
 important_is_rebuild = [0] 
-print "Elements in 'Other' category (<%.1f%%):"%(threshold*100)
+print("Elements in 'Other' category (<%.1f%%):"%(threshold*100))
 for i in range(len(labels)):
     if time_ratios[i] > threshold:
         important_times.append(times[i])
@@ -137,9 +137,9 @@ for i in range(len(labels)):
     else:
         important_times[0] += times[i]
         important_ratios[0] += time_ratios[i]
-        print " - '%-30s': %.4f%%"%(labels[i], time_ratios[i] * 100)
+        print(" - '%-30s': %.4f%%"%(labels[i], time_ratios[i] * 100))
 
-print "\nUnaccounted for: %.4f%%\n"%(100 * (total_time - total_measured_time) / total_time)
+print("\nUnaccounted for: %.4f%%\n"%(100 * (total_time - total_measured_time) / total_time))
 
 important_ratios = np.array(important_ratios)
 important_is_rebuild = np.array(important_is_rebuild)
diff --git a/tools/check_ngbs.py b/tools/check_ngbs.py
index bcd06193c10481b57e7b07fe11e86f916b6d7957..eef7fdd417cdb10b00e385b6aac24b57bfa4ae98 100755
--- a/tools/check_ngbs.py
+++ b/tools/check_ngbs.py
@@ -1,323 +1,323 @@
-#!/usr/bin/env python
-
-import h5py as h
-import numpy as np
-import matplotlib
-matplotlib.use("Agg")
-from pylab import *
-import os.path
-
-kernel_gamma = 1.825742
-kernel_gamma2 = kernel_gamma * kernel_gamma
-kernel_gamma_dim = np.power(kernel_gamma,3)
-hydro_dimension_unit_sphere = 4. * np.pi / 3.
-kernel_norm = hydro_dimension_unit_sphere * kernel_gamma_dim
-error = False
-
-inputFile1 = ""
-inputFile2 = ""
-
-# Compare the values of two floats
-def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
-    return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
-
-# Check list of density neighbours and check that they are correct.
-def check_density_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
-        h_naive, h_sort, num_invalid, acc):
-
-    for k in range(0,num_invalid):
-
-        # Filter neighbour lists for valid particle ids
-        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
-        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
-
-        # Check neighbour lists for differences
-        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
-       
-        # Check for duplicate IDs
-        duplicate_check_naive = len(filter_neigh_naive) != len(set(filter_neigh_naive))
-        duplicate_check_sort = len(filter_neigh_sort) != len(set(filter_neigh_sort))
-
-        if duplicate_check_naive:
-            print "Duplicate neighbour ID found in: ", inputFile1
-            print filter_neigh_naive
-            return True
-        
-        if duplicate_check_sort:
-            print "Duplicate neighbour ID found in: ", inputFile2
-            print filter_neigh_sort
-            return True
-
-        pid = pids[mask][k]
-
-        # Loop over discrepancies and check if they are actually neighbours
-        for pjd in id_list:
-            pi_pos = pos[np.where(pids == pid)]
-            pj_pos = pos[np.where(pids == pjd)]
-            
-            hi = h_naive[np.where(pids == pid)]
-            
-            dx = pi_pos[0][0] - pj_pos[0][0]
-            dy = pi_pos[0][1] - pj_pos[0][1]
-            dz = pi_pos[0][2] - pj_pos[0][2]
-           
-            # Correct for BCs
-            dx = nearest(dx)
-            dy = nearest(dy)
-            dz = nearest(dz)
-
-            r2 = dx*dx + dy*dy + dz*dz
-            
-            hig2 = hi*hi*kernel_gamma2
-            
-            diff = abs(r2 - hig2)
-            
-            print "Particle {} is missing {}, hig2: {}, r2: {}, |r2 - hig2|: {}".format(pid,pjd,hig2, r2, diff)
-            
-            if diff < acc * hig2:
-                print "Missing interaction due to precision issue will be ignored."
-            else:
-                hi_2 = h_sort[np.where(pids == pid)]
-
-                # If a neigbour is missing and the particle has the same h throw
-                # an error.
-                if(isclose(hi,hi_2)):
-                    print "Missing interaction found but particle has the same smoothing length (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-                    return True
-                else:
-                    print "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-
-    return False
-
-# Check list of force neighbours and check that they are correct.
-def check_force_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
-        h_naive, h_sort, num_invalid, acc):
-
-    error_val = False
-
-    for k in range(0,num_invalid):
-
-        # Filter neighbour lists for valid particle ids
-        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
-        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
-
-        # Check neighbour lists for differences
-        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
-        
-        pid = pids[mask][k]
-
-        # Loop over discrepancies and check if they are actually neighbours
-        for pjd in id_list:
-            pi_pos = pos[np.where(pids == pid)]
-            pj_pos = pos[np.where(pids == pjd)]
-            
-            hi = h_naive[np.where(pids == pid)]
-            hj = h_naive[np.where(pids == pjd)]
-            
-            dx = pi_pos[0][0] - pj_pos[0][0]
-            dy = pi_pos[0][1] - pj_pos[0][1]
-            dz = pi_pos[0][2] - pj_pos[0][2]
- 
-            # Correct for BCs
-            dx = nearest(dx)
-            dy = nearest(dy)
-            dz = nearest(dz)
-           
-            r2 = dx*dx + dy*dy + dz*dz
-            
-            hig2 = hi*hi*kernel_gamma2
-            hjg2 = hj*hj*kernel_gamma2
-            
-            diff = abs(r2 - max(hig2, hjg2))
-            
-            print "Particle {} is missing {}, hig2: {}, hjg2: {}, r2: {}, |r2 - max(hig2,hjg2)|: {}".format(pid,pjd,hig2, hjg2, r2, diff)
-
-            if diff < acc * max(hig2,hjg2):
-                print "Missing interaction due to precision issue will be ignored."
-            else:
-                hi_2 = h_sort[np.where(pids == pid)]
-                if(isclose(hi,hi_2)):
-                    print "Missing interaction due to the same smoothing lengths will not be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-                    error_val = True
-                else:
-                    print "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-
-    return error_val
-
-def nearest(dx):
-    if(dx > 0.5 * box_size):
-        return dx - box_size
-    elif(dx < -0.5 * box_size):
-        return dx + box_size
-    else: 
-        return dx
-
-# Parse command line arguments
-if len(sys.argv) < 3:
-    print "Error: pass input files as arguments"
-    sys.exit()
-else:
-    inputFile1 = sys.argv[1]
-    inputFile2 = sys.argv[2]
-    if os.path.exists(inputFile1) != 1:
-        print "\n{} does not exist!\n".format(inputFile1)
-        sys.exit()
-    if os.path.exists(inputFile2) != 1:
-        print "\n{} does not exist!\n".format(inputFile2)
-        sys.exit()
-
-# Open input files    
-file_naive = h.File(inputFile1, "r")
-file_sort = h.File(inputFile2, "r")
-
-box_size = file_naive["/Header"].attrs["BoxSize"][0]
-
-# Read input file fields
-ids_naive = file_naive["/PartType0/ParticleIDs"][:]
-ids_sort = file_sort["/PartType0/ParticleIDs"][:]
-
-h_naive = file_naive["/PartType0/SmoothingLength"][:]
-h_sort = file_sort["/PartType0/SmoothingLength"][:]
-
-pos_naive = file_naive["/PartType0/Coordinates"][:,:]
-#pos_sort = file_sort["/PartType0/Coordinates"][:,:]
-
-num_density_naive = file_naive["/PartType0/Num_ngb_density"][:]
-num_density_sort = file_sort["/PartType0/Num_ngb_density"][:]
-
-num_force_naive = file_naive["/PartType0/Num_ngb_force"][:]
-num_force_sort = file_sort["/PartType0/Num_ngb_force"][:]
-
-neighbour_ids_density_naive = file_naive["/PartType0/Ids_ngb_density"][:]
-neighbour_ids_density_sort = file_sort["/PartType0/Ids_ngb_density"][:]
-
-neighbour_ids_force_naive = file_naive["/PartType0/Ids_ngb_force"][:]
-neighbour_ids_force_sort = file_sort["/PartType0/Ids_ngb_force"][:]
-
-
-#wcount_naive = file_naive["/PartType0/Wcount"][:]
-#wcount_sort = file_sort["/PartType0/Wcount"][:]
-#
-#wcount_naive = wcount_naive * np.power(h_naive,3) * kernel_norm
-#wcount_sort = wcount_sort * np.power(h_sort,3) * kernel_norm
-
-# Cross check
-max_density_ngbs_naive = np.max(num_density_naive)
-max_density_ngbs_sort = np.max(num_density_sort)
-max_force_ngbs_naive = np.max(num_force_naive)
-max_force_ngbs_sort = np.max(num_force_sort)
-
-print "                   Min     Mean     Max "
-print "                   ---------------------"
-print "Ngbs density naiv: ", np.min(num_density_naive), np.mean(num_density_naive), max_density_ngbs_naive
-print "Ngbs density sort: ", np.min(num_density_sort), np.mean(num_density_sort), max_density_ngbs_sort
-print "Ngbs force naiv:   ", np.min(num_force_naive), np.mean(num_force_naive), max_force_ngbs_naive
-print "Ngbs force sort:   ", np.min(num_force_sort), np.mean(num_force_sort), max_force_ngbs_sort
-#print "Wcount naiv:   ", np.min(wcount_naive), np.mean(wcount_naive), np.max(wcount_naive)
-#print "Wcount sort:   ", np.min(wcount_sort), np.mean(wcount_sort), np.max(wcount_sort)
-
-# Sort
-index_naive = np.argsort(ids_naive)
-index_sort = np.argsort(ids_sort)
-
-num_density_naive = num_density_naive[index_naive]
-num_density_sort = num_density_sort[index_sort]
-num_force_naive = num_force_naive[index_naive]
-num_force_sort = num_force_sort[index_sort]
-ids_naive = ids_naive[index_naive]
-ids_sort = ids_sort[index_sort]
-neighbour_ids_density_naive = neighbour_ids_density_naive[index_naive]
-neighbour_ids_density_sort = neighbour_ids_density_sort[index_sort]
-neighbour_ids_force_naive = neighbour_ids_force_naive[index_naive]
-neighbour_ids_force_sort = neighbour_ids_force_sort[index_sort]
-#wcount_naive = wcount_naive[index_naive]
-#wcount_sort = wcount_sort[index_sort]
-h_naive = h_naive[index_naive]
-h_sort = h_sort[index_sort]
-pos_naive = pos_naive[index_naive]
-#pos_sort = pos_sort[index_sort]
-
-neighbour_length_naive = len(neighbour_ids_density_naive[0])
-neighbour_length_sort = len(neighbour_ids_density_sort[0])
-
-# Check that input files are logging the same number of neighbours
-if neighbour_length_naive != neighbour_length_sort:
-    print "Input files have logged different numbers of neighbour lengths!"
-    print "{} has logged: {} neighbours".format(inputFile1, neighbour_length_naive)
-    print "{} has logged: {} neighbours".format(inputFile2, neighbour_length_sort)
-    exit(1)
-
-if (max_density_ngbs_naive > neighbour_length_naive or max_force_ngbs_naive > neighbour_length_naive or
-    max_density_ngbs_sort > neighbour_length_sort or max_force_ngbs_sort > neighbour_length_sort):
-    print "The number of neighbours has exceeded the number of neighbours logged."
-    print "Modify NUM_OF_NEIGHBOURS in hydro_part.h to log more neighbours."
-    print "The highest neighbour count is: ", max(max_density_ngbs_naive,max_force_ngbs_naive, max_density_ngbs_sort,max_force_ngbs_sort)
-    exit(1)
-
-# First check
-print "\n                         Min    Max"
-print "                         ----------"
-print "Differences for density:  ", min(num_density_naive - num_density_sort), max(num_density_naive - num_density_sort)
-print "Differences for force:    ", min(num_force_naive - num_force_sort), max(num_force_naive - num_force_sort)
-
-# Get the IDs that are different
-mask_density = num_density_naive != num_density_sort
-mask_force = num_force_naive != num_force_sort
-num_invalid_density = np.sum(mask_density)
-num_invalid_force = np.sum(mask_force)
-
-print "\nNum non-zero density: ", num_invalid_density
-print "Num non-zero force:   ", num_invalid_force
-
-print "\nParticle IDs with incorrect densities"
-print "----------------------------------------"
-print ids_naive[mask_density]
-
-# Check density neighbour lists
-error += check_density_neighbours(ids_naive, neighbour_ids_density_naive,
-        neighbour_ids_density_sort, mask_density, pos_naive, h_naive, h_sort,
-        num_invalid_density, 2e-6)
-
-print "Num of density interactions", inputFile1
-print num_density_naive[mask_density]
-
-print "Num of density interactions", inputFile2
-print num_density_sort[mask_density]
-
-print "\nParticle IDs with incorrect forces"
-print "------------------------------------"
-print ids_naive[mask_force]
-
-# Check force neighbour lists
-error += check_force_neighbours(ids_naive, neighbour_ids_force_naive,
-        neighbour_ids_force_sort, mask_force, pos_naive, h_naive, h_sort,
-        num_invalid_force, 2e-6)
-
-print "Num of force interactions", inputFile1
-print num_force_naive[mask_force]
-
-#print "Smoothing lengths", inputFile1
-#print h_naive[mask_force]
-
-print "Num of force interactions", inputFile2
-print num_force_sort[mask_force]
-
-#print "Smoothing lengths", inputFile2
-#print h_sort[mask_force]
-
-# Statistics of h difference
-h_relative = (h_naive - h_sort) / h_naive
-print "h statistics: {} {} (Min, 1st Percentile)".format(np.min(h_relative), np.percentile(h_relative,1))
-print "h statistics: {} {} (Mean, Median)".format(np.mean(h_relative), np.median(h_relative))
-print "h statistics: {} {} (Max, 99th Percentile)".format(np.max(h_relative), np.percentile(h_relative, 99))
-
-if error:
-    print "\n------------------"
-    print "Differences found."
-    print "------------------"
-    exit(1)
-else:
-    print "\n---------------------"
-    print "No differences found."
-    print "---------------------"
-    exit(0)
+#!/usr/bin/env python
+
+import h5py as h
+import numpy as np
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import os.path
+
+kernel_gamma = 1.825742
+kernel_gamma2 = kernel_gamma * kernel_gamma
+kernel_gamma_dim = np.power(kernel_gamma,3)
+hydro_dimension_unit_sphere = 4. * np.pi / 3.
+kernel_norm = hydro_dimension_unit_sphere * kernel_gamma_dim
+error = False
+
+inputFile1 = ""
+inputFile2 = ""
+
+# Compare the values of two floats
+def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
+    return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+
+# Check list of density neighbours and check that they are correct.
+def check_density_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
+        h_naive, h_sort, num_invalid, acc):
+
+    for k in range(0,num_invalid):
+
+        # Filter neighbour lists for valid particle ids
+        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
+        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
+
+        # Check neighbour lists for differences
+        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
+       
+        # Check for duplicate IDs
+        duplicate_check_naive = len(filter_neigh_naive) != len(set(filter_neigh_naive))
+        duplicate_check_sort = len(filter_neigh_sort) != len(set(filter_neigh_sort))
+
+        if duplicate_check_naive:
+            print("Duplicate neighbour ID found in: ", inputFile1)
+            print(filter_neigh_naive)
+            return True
+        
+        if duplicate_check_sort:
+            print("Duplicate neighbour ID found in: ", inputFile2)
+            print(filter_neigh_sort)
+            return True
+
+        pid = pids[mask][k]
+
+        # Loop over discrepancies and check if they are actually neighbours
+        for pjd in id_list:
+            pi_pos = pos[np.where(pids == pid)]
+            pj_pos = pos[np.where(pids == pjd)]
+            
+            hi = h_naive[np.where(pids == pid)]
+            
+            dx = pi_pos[0][0] - pj_pos[0][0]
+            dy = pi_pos[0][1] - pj_pos[0][1]
+            dz = pi_pos[0][2] - pj_pos[0][2]
+           
+            # Correct for BCs
+            dx = nearest(dx)
+            dy = nearest(dy)
+            dz = nearest(dz)
+
+            r2 = dx*dx + dy*dy + dz*dz
+            
+            hig2 = hi*hi*kernel_gamma2
+            
+            diff = abs(r2 - hig2)
+            
+            print("Particle {} is missing {}, hig2: {}, r2: {}, |r2 - hig2|: {}".format(pid,pjd,hig2, r2, diff))
+            
+            if diff < acc * hig2:
+                print("Missing interaction due to precision issue will be ignored.")
+            else:
+                hi_2 = h_sort[np.where(pids == pid)]
+
+                # If a neigbour is missing and the particle has the same h throw
+                # an error.
+                if(isclose(hi,hi_2)):
+                    print("Missing interaction found but particle has the same smoothing length (hi_1: %e, hi_2: %e)."%(hi, hi_2))
+                    return True
+                else:
+                    print("Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2))
+
+    return False
+
+# Check list of force neighbours and check that they are correct.
+def check_force_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
+        h_naive, h_sort, num_invalid, acc):
+
+    error_val = False
+
+    for k in range(0,num_invalid):
+
+        # Filter neighbour lists for valid particle ids
+        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
+        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
+
+        # Check neighbour lists for differences
+        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
+        
+        pid = pids[mask][k]
+
+        # Loop over discrepancies and check if they are actually neighbours
+        for pjd in id_list:
+            pi_pos = pos[np.where(pids == pid)]
+            pj_pos = pos[np.where(pids == pjd)]
+            
+            hi = h_naive[np.where(pids == pid)]
+            hj = h_naive[np.where(pids == pjd)]
+            
+            dx = pi_pos[0][0] - pj_pos[0][0]
+            dy = pi_pos[0][1] - pj_pos[0][1]
+            dz = pi_pos[0][2] - pj_pos[0][2]
+ 
+            # Correct for BCs
+            dx = nearest(dx)
+            dy = nearest(dy)
+            dz = nearest(dz)
+           
+            r2 = dx*dx + dy*dy + dz*dz
+            
+            hig2 = hi*hi*kernel_gamma2
+            hjg2 = hj*hj*kernel_gamma2
+            
+            diff = abs(r2 - max(hig2, hjg2))
+            
+            print("Particle {} is missing {}, hig2: {}, hjg2: {}, r2: {}, |r2 - max(hig2,hjg2)|: {}".format(pid,pjd,hig2, hjg2, r2, diff))
+
+            if diff < acc * max(hig2,hjg2):
+                print("Missing interaction due to precision issue will be ignored.")
+            else:
+                hi_2 = h_sort[np.where(pids == pid)]
+                if(isclose(hi,hi_2)):
+                    print("Missing interaction due to the same smoothing lengths will not be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2))
+                    error_val = True
+                else:
+                    print("Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2))
+
+    return error_val
+
+def nearest(dx):
+    if(dx > 0.5 * box_size):
+        return dx - box_size
+    elif(dx < -0.5 * box_size):
+        return dx + box_size
+    else: 
+        return dx
+
+# Parse command line arguments
+if len(sys.argv) < 3:
+    print("Error: pass input files as arguments")
+    sys.exit()
+else:
+    inputFile1 = sys.argv[1]
+    inputFile2 = sys.argv[2]
+    if os.path.exists(inputFile1) != 1:
+        print("\n{} does not exist!\n".format(inputFile1))
+        sys.exit()
+    if os.path.exists(inputFile2) != 1:
+        print("\n{} does not exist!\n".format(inputFile2))
+        sys.exit()
+
+# Open input files    
+file_naive = h.File(inputFile1, "r")
+file_sort = h.File(inputFile2, "r")
+
+box_size = file_naive["/Header"].attrs["BoxSize"][0]
+
+# Read input file fields
+ids_naive = file_naive["/PartType0/ParticleIDs"][:]
+ids_sort = file_sort["/PartType0/ParticleIDs"][:]
+
+h_naive = file_naive["/PartType0/SmoothingLength"][:]
+h_sort = file_sort["/PartType0/SmoothingLength"][:]
+
+pos_naive = file_naive["/PartType0/Coordinates"][:,:]
+#pos_sort = file_sort["/PartType0/Coordinates"][:,:]
+
+num_density_naive = file_naive["/PartType0/Num_ngb_density"][:]
+num_density_sort = file_sort["/PartType0/Num_ngb_density"][:]
+
+num_force_naive = file_naive["/PartType0/Num_ngb_force"][:]
+num_force_sort = file_sort["/PartType0/Num_ngb_force"][:]
+
+neighbour_ids_density_naive = file_naive["/PartType0/Ids_ngb_density"][:]
+neighbour_ids_density_sort = file_sort["/PartType0/Ids_ngb_density"][:]
+
+neighbour_ids_force_naive = file_naive["/PartType0/Ids_ngb_force"][:]
+neighbour_ids_force_sort = file_sort["/PartType0/Ids_ngb_force"][:]
+
+
+#wcount_naive = file_naive["/PartType0/Wcount"][:]
+#wcount_sort = file_sort["/PartType0/Wcount"][:]
+#
+#wcount_naive = wcount_naive * np.power(h_naive,3) * kernel_norm
+#wcount_sort = wcount_sort * np.power(h_sort,3) * kernel_norm
+
+# Cross check
+max_density_ngbs_naive = np.max(num_density_naive)
+max_density_ngbs_sort = np.max(num_density_sort)
+max_force_ngbs_naive = np.max(num_force_naive)
+max_force_ngbs_sort = np.max(num_force_sort)
+
+print("                   Min     Mean     Max ")
+print("                   ---------------------")
+print("Ngbs density naiv: ", np.min(num_density_naive), np.mean(num_density_naive), max_density_ngbs_naive)
+print("Ngbs density sort: ", np.min(num_density_sort), np.mean(num_density_sort), max_density_ngbs_sort)
+print("Ngbs force naiv:   ", np.min(num_force_naive), np.mean(num_force_naive), max_force_ngbs_naive)
+print("Ngbs force sort:   ", np.min(num_force_sort), np.mean(num_force_sort), max_force_ngbs_sort)
+#print "Wcount naiv:   ", np.min(wcount_naive), np.mean(wcount_naive), np.max(wcount_naive)
+#print "Wcount sort:   ", np.min(wcount_sort), np.mean(wcount_sort), np.max(wcount_sort)
+
+# Sort
+index_naive = np.argsort(ids_naive)
+index_sort = np.argsort(ids_sort)
+
+num_density_naive = num_density_naive[index_naive]
+num_density_sort = num_density_sort[index_sort]
+num_force_naive = num_force_naive[index_naive]
+num_force_sort = num_force_sort[index_sort]
+ids_naive = ids_naive[index_naive]
+ids_sort = ids_sort[index_sort]
+neighbour_ids_density_naive = neighbour_ids_density_naive[index_naive]
+neighbour_ids_density_sort = neighbour_ids_density_sort[index_sort]
+neighbour_ids_force_naive = neighbour_ids_force_naive[index_naive]
+neighbour_ids_force_sort = neighbour_ids_force_sort[index_sort]
+#wcount_naive = wcount_naive[index_naive]
+#wcount_sort = wcount_sort[index_sort]
+h_naive = h_naive[index_naive]
+h_sort = h_sort[index_sort]
+pos_naive = pos_naive[index_naive]
+#pos_sort = pos_sort[index_sort]
+
+neighbour_length_naive = len(neighbour_ids_density_naive[0])
+neighbour_length_sort = len(neighbour_ids_density_sort[0])
+
+# Check that input files are logging the same number of neighbours
+if neighbour_length_naive != neighbour_length_sort:
+    print("Input files have logged different numbers of neighbour lengths!")
+    print("{} has logged: {} neighbours".format(inputFile1, neighbour_length_naive))
+    print("{} has logged: {} neighbours".format(inputFile2, neighbour_length_sort))
+    exit(1)
+
+if (max_density_ngbs_naive > neighbour_length_naive or max_force_ngbs_naive > neighbour_length_naive or
+    max_density_ngbs_sort > neighbour_length_sort or max_force_ngbs_sort > neighbour_length_sort):
+    print("The number of neighbours has exceeded the number of neighbours logged.")
+    print("Modify NUM_OF_NEIGHBOURS in hydro_part.h to log more neighbours.")
+    print("The highest neighbour count is: ", max(max_density_ngbs_naive,max_force_ngbs_naive, max_density_ngbs_sort,max_force_ngbs_sort))
+    exit(1)
+
+# First check
+print("\n                         Min    Max")
+print("                         ----------")
+print("Differences for density:  ", min(num_density_naive - num_density_sort), max(num_density_naive - num_density_sort))
+print("Differences for force:    ", min(num_force_naive - num_force_sort), max(num_force_naive - num_force_sort))
+
+# Get the IDs that are different
+mask_density = num_density_naive != num_density_sort
+mask_force = num_force_naive != num_force_sort
+num_invalid_density = np.sum(mask_density)
+num_invalid_force = np.sum(mask_force)
+
+print("\nNum non-zero density: ", num_invalid_density)
+print("Num non-zero force:   ", num_invalid_force)
+
+print("\nParticle IDs with incorrect densities")
+print("----------------------------------------")
+print(ids_naive[mask_density])
+
+# Check density neighbour lists
+error += check_density_neighbours(ids_naive, neighbour_ids_density_naive,
+        neighbour_ids_density_sort, mask_density, pos_naive, h_naive, h_sort,
+        num_invalid_density, 2e-6)
+
+print("Num of density interactions", inputFile1)
+print(num_density_naive[mask_density])
+
+print("Num of density interactions", inputFile2)
+print(num_density_sort[mask_density])
+
+print("\nParticle IDs with incorrect forces")
+print("------------------------------------")
+print(ids_naive[mask_force])
+
+# Check force neighbour lists
+error += check_force_neighbours(ids_naive, neighbour_ids_force_naive,
+        neighbour_ids_force_sort, mask_force, pos_naive, h_naive, h_sort,
+        num_invalid_force, 2e-6)
+
+print("Num of force interactions", inputFile1)
+print(num_force_naive[mask_force])
+
+#print "Smoothing lengths", inputFile1
+#print h_naive[mask_force]
+
+print("Num of force interactions", inputFile2)
+print(num_force_sort[mask_force])
+
+#print "Smoothing lengths", inputFile2
+#print h_sort[mask_force]
+
+# Statistics of h difference
+h_relative = (h_naive - h_sort) / h_naive
+print("h statistics: {} {} (Min, 1st Percentile)".format(np.min(h_relative), np.percentile(h_relative,1)))
+print("h statistics: {} {} (Mean, Median)".format(np.mean(h_relative), np.median(h_relative)))
+print("h statistics: {} {} (Max, 99th Percentile)".format(np.max(h_relative), np.percentile(h_relative, 99)))
+
+if error:
+    print("\n------------------")
+    print("Differences found.")
+    print("------------------")
+    exit(1)
+else:
+    print("\n---------------------")
+    print("No differences found.")
+    print("---------------------")
+    exit(0)
diff --git a/tools/plot_gravity_checks.py b/tools/plot_gravity_checks.py
index 23866ac2a6952ff918dbc80533269c0d2e9bcbc5..59643bb58213f1c05f202e47353728f4bc892a1d 100755
--- a/tools/plot_gravity_checks.py
+++ b/tools/plot_gravity_checks.py
@@ -73,7 +73,7 @@ exact_a = exact_a[sort_index, :]
 exact_pot = exact_pot[sort_index]
 exact_a_norm = np.sqrt(exact_a[:,0]**2 + exact_a[:,1]**2 + exact_a[:,2]**2)
 
-print "Number of particles tested:", np.size(exact_ids)
+print("Number of particles tested:", np.size(exact_ids))
     
 # Start the plot
 plt.figure()
@@ -103,23 +103,23 @@ if len(gadget2_file_list) != 0:
 
     # Cross-checks
     if not np.array_equal(exact_ids, gadget2_ids):
-        print "Comparing different IDs !"
+        print("Comparing different IDs !")
 
     if np.max(np.abs(exact_pos - gadget2_pos)/np.abs(gadget2_pos)) > 1e-6:
-        print "Comparing different positions ! max difference:"
+        print("Comparing different positions ! max difference:")
         index = np.argmax(exact_pos[:,0]**2 + exact_pos[:,1]**2 + exact_pos[:,2]**2 - gadget2_pos[:,0]**2 - gadget2_pos[:,1]**2 - gadget2_pos[:,2]**2)
-        print "Gadget2 (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n"
+        print("Gadget2 (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n")
 
     diff = np.abs(exact_a_norm - gadget2_exact_a_norm) / np.abs(gadget2_exact_a_norm)
     max_diff = np.max(diff)
     if max_diff > 2e-6:
-        print "Comparing different exact accelerations !"
-        print "Median=", np.median(diff), "Mean=", np.mean(diff), "99%=", np.percentile(diff, 99)
-        print "max difference ( relative diff =", max_diff, "):"
+        print("Comparing different exact accelerations !")
+        print("Median=", np.median(diff), "Mean=", np.mean(diff), "99%=", np.percentile(diff, 99))
+        print("max difference ( relative diff =", max_diff, "):")
         #index = np.argmax(exact_a[:,0]**2 + exact_a[:,1]**2 + exact_a[:,2]**2 - gadget2_a_exact[:,0]**2 - gadget2_a_exact[:,1]**2 - gadget2_a_exact[:,2]**2)
         index = np.argmax(diff)
-        print "a_exact --- Gadget2:", gadget2_a_exact[index,:], "exact:", exact_a[index,:]
-        print "pos ---     Gadget2: (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%gadget2_ids[index], gadget2_pos[index,:],"\n"
+        print("a_exact --- Gadget2:", gadget2_a_exact[index,:], "exact:", exact_a[index,:])
+        print("pos ---     Gadget2: (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%gadget2_ids[index], gadget2_pos[index,:],"\n")
 
     
     # Compute the error norm
@@ -154,12 +154,12 @@ if len(gadget2_file_list) != 0:
     max_y = np.max(error_y)
     max_z = np.max(error_z)
 
-    print "Gadget-2 ---- "
-    print "Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max)
-    print "X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x)
-    print "Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y)
-    print "Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z)
-    print ""
+    print("Gadget-2 ---- ")
+    print("Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max))
+    print("X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x))
+    print("Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y))
+    print("Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z))
+    print("")
 
     plt.subplot(231)    
     plt.text(min_error * 1.5, 1.55, "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(norm_median, norm_per99), ha="left", va="top", alpha=0.8)
@@ -194,20 +194,20 @@ for i in range(num_order):
 
     # Cross-checks
     if not np.array_equal(exact_ids, ids):
-        print "Comparing different IDs !"
+        print("Comparing different IDs !")
 
     if np.max(np.abs(exact_pos - pos)/np.abs(pos)) > 1e-6:
-        print "Comparing different positions ! max difference:"
+        print("Comparing different positions ! max difference:")
         index = np.argmax(exact_pos[:,0]**2 + exact_pos[:,1]**2 + exact_pos[:,2]**2 - pos[:,0]**2 - pos[:,1]**2 - pos[:,2]**2)
-        print "SWIFT (id=%d):"%ids[index], pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n"
+        print("SWIFT (id=%d):"%ids[index], pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n")
     
     # Compute the error norm
     diff = exact_a - a_grav
     diff_pot = exact_pot - pot
 
     # Correct for different normalization of potential
-    print "Difference in normalization of potential:", np.mean(diff_pot),
-    print "std_dev=", np.std(diff_pot), "99-percentile:", np.percentile(diff_pot, 99)-np.median(diff_pot), "1-percentile:", np.median(diff_pot) - np.percentile(diff_pot, 1)
+    print("Difference in normalization of potential:", np.mean(diff_pot), end=' ')
+    print("std_dev=", np.std(diff_pot), "99-percentile:", np.percentile(diff_pot, 99)-np.median(diff_pot), "1-percentile:", np.median(diff_pot) - np.percentile(diff_pot, 1))
 
     exact_pot -= np.mean(diff_pot)
     diff_pot = exact_pot - pot
@@ -245,13 +245,13 @@ for i in range(num_order):
     max_z = np.max(error_z)
     max_pot = np.max(error_pot)
 
-    print "Order %d ---- "%order[i]
-    print "Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max)
-    print "X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x)
-    print "Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y)
-    print "Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z)
-    print "Pot : median= %f 99%%= %f max= %f"%(median_pot, per99_pot, max_pot)
-    print ""
+    print("Order %d ---- "%order[i])
+    print("Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max))
+    print("X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x))
+    print("Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y))
+    print("Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z))
+    print("Pot : median= %f 99%%= %f max= %f"%(median_pot, per99_pot, max_pot))
+    print("")
     
     plt.subplot(231)    
     plt.semilogx(bins, error_x_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
diff --git a/tools/plot_scaling_results.py b/tools/plot_scaling_results.py
index e39f0d2d0c00eecf7680b2f090bd2c0aa29ed8bb..c5e8b2e00a4b092148cb7a1c5edb7015bcb0c440 100755
--- a/tools/plot_scaling_results.py
+++ b/tools/plot_scaling_results.py
@@ -58,7 +58,7 @@ inputFileNames = []
 
 # Work out how many data series there are
 if len(sys.argv) == 1:
-  print "Please specify an input file in the arguments."
+  print("Please specify an input file in the arguments.")
   sys.exit()
 else:
   for fileName in sys.argv[1:]:
@@ -177,23 +177,23 @@ def parse_files():
 def print_results(totalTime,parallelEff,version):
  
   for i in range(0,numOfSeries):
-    print " "
-    print "------------------------------------"
-    print version[i]
-    print "------------------------------------"
-    print "Wall clock time for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
+    print(" ")
+    print("------------------------------------")
+    print(version[i])
+    print("------------------------------------")
+    print("Wall clock time for: {} time steps".format(numTimesteps))
+    print("------------------------------------")
     
     for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(totalTime[i][j])
+      print(str(threadList[i][j]) + " threads: {}".format(totalTime[i][j]))
     
-    print " "
-    print "------------------------------------"
-    print "Parallel Efficiency for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
+    print(" ")
+    print("------------------------------------")
+    print("Parallel Efficiency for: {} time steps".format(numTimesteps))
+    print("------------------------------------")
     
     for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j])
+      print(str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j]))
 
   return
 
diff --git a/tools/plot_scaling_results_breakdown.py b/tools/plot_scaling_results_breakdown.py
index 6a87e42bcd393d543187e768e31a15bc56f1ae6a..8bdb79dd07f601709e9037a468354d323b6a6163 100755
--- a/tools/plot_scaling_results_breakdown.py
+++ b/tools/plot_scaling_results_breakdown.py
@@ -58,7 +58,7 @@ inputFileNames = []
 
 # Work out how many data series there are
 if len(sys.argv) == 1:
-  print "Please specify an input file in the arguments."
+  print("Please specify an input file in the arguments.")
   sys.exit()
 else:
   for fileName in sys.argv[1:]:
@@ -178,23 +178,23 @@ def parse_files():
 def print_results(totalTime,parallelEff,version):
  
   for i in range(0,numOfSeries):
-    print " "
-    print "------------------------------------"
-    print version[i]
-    print "------------------------------------"
-    print "Wall clock time for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
+    print(" ")
+    print("------------------------------------")
+    print(version[i])
+    print("------------------------------------")
+    print("Wall clock time for: {} time steps".format(numTimesteps))
+    print("------------------------------------")
     
     for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(totalTime[i][j])
+      print(str(threadList[i][j]) + " threads: {}".format(totalTime[i][j]))
     
-    print " "
-    print "------------------------------------"
-    print "Parallel Efficiency for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
+    print(" ")
+    print("------------------------------------")
+    print("Parallel Efficiency for: {} time steps".format(numTimesteps))
+    print("------------------------------------")
     
     for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j])
+      print(str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j]))
 
   return
 
diff --git a/tools/task_plots/analyse_tasks.py b/tools/task_plots/analyse_tasks.py
index 48a00a63ea5da5cb80ebd6f10187cc613c1a5ed5..7d5028b73bdfba40d8a34b360432e41feb08c4d6 100755
--- a/tools/task_plots/analyse_tasks.py
+++ b/tools/task_plots/analyse_tasks.py
@@ -73,10 +73,10 @@ full_step = data[0,:]
 #  Do we have an MPI file?
 full_step = data[0,:]
 if full_step.size == 13:
-    print "# MPI mode"
+    print("# MPI mode")
     mpimode = True
     nranks = int(max(data[:,0])) + 1
-    print "# Number of ranks:", nranks
+    print("# Number of ranks:", nranks)
     rankcol = 0
     threadscol = 1
     taskcol = 2
@@ -87,7 +87,7 @@ if full_step.size == 13:
     g_updates = int(full_step[8])
     s_updates = int(full_step[9])
 else:
-    print "# non MPI mode"
+    print("# non MPI mode")
     nranks = 1
     mpimode = False
     rankcol = -1
@@ -103,24 +103,24 @@ else:
 #  Get the CPU clock to convert ticks into milliseconds.
 CPU_CLOCK = float(full_step[-1]) / 1000.0
 if args.verbose:
-    print "# CPU frequency:", CPU_CLOCK * 1000.0
-print "#   updates:", updates
-print "# g_updates:", g_updates
-print "# s_updates:", s_updates
+    print("# CPU frequency:", CPU_CLOCK * 1000.0)
+print("#   updates:", updates)
+print("# g_updates:", g_updates)
+print("# s_updates:", s_updates)
 
 if mpimode:
     if args.rank == "all":
-        ranks = range(nranks)
+        ranks = list(range(nranks))
     else:
         ranks = [int(args.rank)]
         if ranks[0] >= nranks:
-            print "Error: maximum rank is " + str(nranks - 1)
+            print("Error: maximum rank is " + str(nranks - 1))
             sys.exit(1)
 else:
     ranks = [1]
 
 maxthread = int(max(data[:,threadscol])) + 1
-print "# Maximum thread id:", maxthread
+print("# Maximum thread id:", maxthread)
 
 #  Avoid start and end times of zero.
 sdata = data[data[:,ticcol] != 0]
@@ -129,7 +129,7 @@ sdata = data[data[:,toccol] != 0]
 #  Now we process the required ranks.
 for rank in ranks:
     if mpimode:
-        print "# Rank", rank
+        print("# Rank", rank)
         data = sdata[sdata[:,rankcol] == rank]
         full_step = data[0,:]
     else:
@@ -146,8 +146,8 @@ for rank in ranks:
 
     #  Calculate the time range.
     total_t = (toc_step - tic_step)/ CPU_CLOCK
-    print "# Data range: ", total_t, "ms"
-    print
+    print("# Data range: ", total_t, "ms")
+    print()
 
     #  Correct times to relative values.
     start_t = float(tic_step)
@@ -179,11 +179,11 @@ for rank in ranks:
         threadids.append(i)
 
     #  Times per task.
-    print "# Task times:"
-    print "# -----------"
-    print "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
+    print("# Task times:")
+    print("# -----------")
+    print("# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
           .format("type/subtype", "count","minimum", "maximum",
-                  "sum", "mean", "percent")
+                  "sum", "mean", "percent"))
 
     alltasktimes = {}
     sidtimes = {}
@@ -206,32 +206,32 @@ for rank in ranks:
                     sidtimes[my_sid] = []
                 sidtimes[my_sid].append(dt)
 
-        print "# Thread : ", i
+        print("# Thread : ", i)
         for key in sorted(tasktimes.keys()):
             taskmin = min(tasktimes[key])
             taskmax = max(tasktimes[key])
             tasksum = sum(tasktimes[key])
-            print "{0:19s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+            print("{0:19s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
                   .format(key, len(tasktimes[key]), taskmin, taskmax, tasksum,
-                          tasksum / len(tasktimes[key]), tasksum / total_t * 100.0)
-        print
+                          tasksum / len(tasktimes[key]), tasksum / total_t * 100.0))
+        print()
 
-    print "# All threads : "
+    print("# All threads : ")
     for key in sorted(alltasktimes.keys()):
         taskmin = min(alltasktimes[key])
         taskmax = max(alltasktimes[key])
         tasksum = sum(alltasktimes[key])
-        print "{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+        print("{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
               .format(key, len(alltasktimes[key]), taskmin, taskmax, tasksum,
                       tasksum / len(alltasktimes[key]),
-                      tasksum / (len(threadids) * total_t) * 100.0)
-    print
+                      tasksum / (len(threadids) * total_t) * 100.0))
+    print()
 
     # For pairs, show stuff sorted by SID
-    print "# By SID (all threads): "
-    print "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
+    print("# By SID (all threads): ")
+    print("# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
         .format("Pair/Sub-pair SID", "count","minimum", "maximum",
-                "sum", "mean", "percent")
+                "sum", "mean", "percent"))
 
     for sid in range(0,13):
         if sid in sidtimes:
@@ -246,22 +246,22 @@ for rank in ranks:
             sidsum = 0.
             sidcount = 0
             sidmean = 0.
-        print "{0:3d} {1:15s}: {2:7d} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.4f} {7:9.2f}"\
+        print("{0:3d} {1:15s}: {2:7d} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.4f} {7:9.2f}"\
             .format(sid, SIDS[sid], sidcount, sidmin, sidmax, sidsum,
-                    sidmean, sidsum / (len(threadids) * total_t) * 100.0)
-    print
+                    sidmean, sidsum / (len(threadids) * total_t) * 100.0))
+    print()
 
     #  Dead times.
-    print "# Times not in tasks (deadtimes)"
-    print "# ------------------------------"
-    print "# Time before first task:"
-    print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+    print("# Times not in tasks (deadtimes)")
+    print("# ------------------------------")
+    print("# Time before first task:")
+    print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
     predeadtimes = []
     for i in threadids:
         if len(tasks[i]) > 0:
             predeadtime = tasks[i][0][0]
-            print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-                  .format(i, predeadtime, predeadtime / total_t * 100.0)
+            print("thread {0:2d}: {1:9.4f} {2:9.4f}"\
+                  .format(i, predeadtime, predeadtime / total_t * 100.0))
             predeadtimes.append(predeadtime)
         else:
             predeadtimes.append(0.0)
@@ -269,22 +269,22 @@ for rank in ranks:
     predeadmin = min(predeadtimes)
     predeadmax = max(predeadtimes)
     predeadsum = sum(predeadtimes)
-    print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+    print("#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+          .format("count", "minimum", "maximum", "sum", "mean", "percent"))
+    print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
           .format(len(predeadtimes), predeadmin, predeadmax, predeadsum,
                   predeadsum / len(predeadtimes),
-                  predeadsum / (len(threadids) * total_t ) * 100.0)
-    print
+                  predeadsum / (len(threadids) * total_t ) * 100.0))
+    print()
 
-    print "# Time after last task:"
-    print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+    print("# Time after last task:")
+    print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
     postdeadtimes = []
     for i in threadids:
         if len(tasks[i]) > 0:
             postdeadtime = total_t - tasks[i][-1][1]
-            print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-                  .format(i, postdeadtime, postdeadtime / total_t * 100.0)
+            print("thread {0:2d}: {1:9.4f} {2:9.4f}"\
+                  .format(i, postdeadtime, postdeadtime / total_t * 100.0))
             postdeadtimes.append(postdeadtime)
         else:
             postdeadtimes.append(0.0)
@@ -292,18 +292,18 @@ for rank in ranks:
     postdeadmin = min(postdeadtimes)
     postdeadmax = max(postdeadtimes)
     postdeadsum = sum(postdeadtimes)
-    print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+    print("#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+          .format("count", "minimum", "maximum", "sum", "mean", "percent"))
+    print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
           .format(len(postdeadtimes), postdeadmin, postdeadmax, postdeadsum,
                   postdeadsum / len(postdeadtimes),
-                  postdeadsum / (len(threadids) * total_t ) * 100.0)
-    print
+                  postdeadsum / (len(threadids) * total_t ) * 100.0))
+    print()
 
     #  Time in engine, i.e. from first to last tasks.
-    print "# Time between tasks (engine deadtime):"
-    print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
+    print("# Time between tasks (engine deadtime):")
+    print("# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+          .format("count", "minimum", "maximum", "sum", "mean", "percent"))
     enginedeadtimes = []
     for i in threadids:
         deadtimes = []
@@ -326,24 +326,24 @@ for rank in ranks:
         deadmin = min(deadtimes)
         deadmax = max(deadtimes)
         deadsum = sum(deadtimes)
-        print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+        print("thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
               .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                      deadsum / len(deadtimes), deadsum / total_t * 100.0)
+                      deadsum / len(deadtimes), deadsum / total_t * 100.0))
         enginedeadtimes.extend(deadtimes)
 
     deadmin = min(enginedeadtimes)
     deadmax = max(enginedeadtimes)
     deadsum = sum(enginedeadtimes)
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+    print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
           .format(len(enginedeadtimes), deadmin, deadmax, deadsum,
                   deadsum / len(enginedeadtimes),
-                  deadsum / (len(threadids) * total_t ) * 100.0)
-    print
+                  deadsum / (len(threadids) * total_t ) * 100.0))
+    print()
 
     #  All times in step.
-    print "# All deadtimes:"
-    print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
+    print("# All deadtimes:")
+    print("# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+          .format("count", "minimum", "maximum", "sum", "mean", "percent"))
     alldeadtimes = []
     for i in threadids:
         deadtimes = []
@@ -358,18 +358,18 @@ for rank in ranks:
         deadmin = min(deadtimes)
         deadmax = max(deadtimes)
         deadsum = sum(deadtimes)
-        print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+        print("thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
               .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
+                  deadsum / len(deadtimes), deadsum / total_t * 100.0))
         alldeadtimes.extend(deadtimes)
 
     deadmin = min(alldeadtimes)
     deadmax = max(alldeadtimes)
     deadsum = sum(alldeadtimes)
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+    print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
           .format(len(alldeadtimes), deadmin, deadmax, deadsum,
                   deadsum / len(alldeadtimes),
-                  deadsum / (len(threadids) * total_t ) * 100.0)
-    print
+                  deadsum / (len(threadids) * total_t ) * 100.0))
+    print()
 
 sys.exit(0)
diff --git a/tools/task_plots/analyse_threadpool_tasks.py b/tools/task_plots/analyse_threadpool_tasks.py
index 609af363b4110e010d6714bef6862d40e5acb278..351af07eb1af4d3e8d92ebf02fc3094e416cccd1 100755
--- a/tools/task_plots/analyse_threadpool_tasks.py
+++ b/tools/task_plots/analyse_threadpool_tasks.py
@@ -49,14 +49,14 @@ infile = args.input
 
 #  Read header. First two lines.
 with open(infile) as infid:
-    head = [next(infid) for x in xrange(2)]
+    head = [next(infid) for x in range(2)]
 header = head[1][2:].strip()
 header = eval(header)
 nthread = int(header['num_threads']) + 1
 CPU_CLOCK = float(header['cpufreq']) / 1000.0
-print "Number of threads: ", nthread - 1
+print("Number of threads: ", nthread - 1)
 if args.verbose:
-    print "CPU frequency:", CPU_CLOCK * 1000.0
+    print("CPU frequency:", CPU_CLOCK * 1000.0)
 
 #  Read input.
 data = pl.genfromtxt(infile, dtype=None, delimiter=" ")
@@ -89,8 +89,8 @@ toc_step = max(tocs)
 
 #  Calculate the time range.
 total_t = (toc_step - tic_step)/ CPU_CLOCK
-print "# Data range: ", total_t, "ms"
-print
+print("# Data range: ", total_t, "ms")
+print()
 
 #  Correct times to relative millisecs.
 start_t = float(tic_step)
@@ -117,11 +117,11 @@ for i in range(nthread):
         threadids.append(i)
 
 #  Times per task.
-print "# Task times:"
-print "# -----------"
-print "# {0:<31s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
+print("# Task times:")
+print("# -----------")
+print("# {0:<31s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
       .format("type/subtype", "count","minimum", "maximum",
-              "sum", "mean", "percent")
+              "sum", "mean", "percent"))
 alltasktimes = {}
 sidtimes = {}
 for i in threadids:
@@ -137,74 +137,74 @@ for i in threadids:
             alltasktimes[key] = []
         alltasktimes[key].append(dt)
 
-    print "# Thread : ", i
+    print("# Thread : ", i)
     for key in sorted(tasktimes.keys()):
         taskmin = min(tasktimes[key])
         taskmax = max(tasktimes[key])
         tasksum = sum(tasktimes[key])
-        print "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+        print("{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
               .format(key, len(tasktimes[key]), taskmin, taskmax, tasksum,
-                      tasksum / len(tasktimes[key]), tasksum / total_t * 100.0)
-    print
+                      tasksum / len(tasktimes[key]), tasksum / total_t * 100.0))
+    print()
 
-print "# All threads : "
+print("# All threads : ")
 for key in sorted(alltasktimes.keys()):
     taskmin = min(alltasktimes[key])
     taskmax = max(alltasktimes[key])
     tasksum = sum(alltasktimes[key])
-    print "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+    print("{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
           .format(key, len(alltasktimes[key]), taskmin, taskmax, tasksum,
                   tasksum / len(alltasktimes[key]),
-                  tasksum / (len(threadids) * total_t) * 100.0)
-print
+                  tasksum / (len(threadids) * total_t) * 100.0))
+print()
 
 #  Dead times.
-print "# Times not in tasks (deadtimes)"
-print "# ------------------------------"
-print "# Time before first task:"
-print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+print("# Times not in tasks (deadtimes)")
+print("# ------------------------------")
+print("# Time before first task:")
+print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
 predeadtimes = []
 for i in threadids:
     predeadtime = tasks[i][0][0]
-    print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-          .format(i, predeadtime, predeadtime / total_t * 100.0)
+    print("thread {0:2d}: {1:9.4f} {2:9.4f}"\
+          .format(i, predeadtime, predeadtime / total_t * 100.0))
     predeadtimes.append(predeadtime)
 
 predeadmin = min(predeadtimes)
 predeadmax = max(predeadtimes)
 predeadsum = sum(predeadtimes)
-print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+print("#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+      .format("count", "minimum", "maximum", "sum", "mean", "percent"))
+print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
       .format(len(predeadtimes), predeadmin, predeadmax, predeadsum,
               predeadsum / len(predeadtimes),
-              predeadsum / (len(threadids) * total_t ) * 100.0)
-print
+              predeadsum / (len(threadids) * total_t ) * 100.0))
+print()
 
-print "# Time after last task:"
-print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+print("# Time after last task:")
+print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
 postdeadtimes = []
 for i in threadids:
     postdeadtime = total_t - tasks[i][-1][1]
-    print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-          .format(i, postdeadtime, postdeadtime / total_t * 100.0)
+    print("thread {0:2d}: {1:9.4f} {2:9.4f}"\
+          .format(i, postdeadtime, postdeadtime / total_t * 100.0))
     postdeadtimes.append(postdeadtime)
 
 postdeadmin = min(postdeadtimes)
 postdeadmax = max(postdeadtimes)
 postdeadsum = sum(postdeadtimes)
-print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+print("#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+      .format("count", "minimum", "maximum", "sum", "mean", "percent"))
+print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
       .format(len(postdeadtimes), postdeadmin, postdeadmax, postdeadsum,
               postdeadsum / len(postdeadtimes),
-              postdeadsum / (len(threadids) * total_t ) * 100.0)
-print
+              postdeadsum / (len(threadids) * total_t ) * 100.0))
+print()
 
 #  Time in threadpool, i.e. from first to last tasks.
-print "# Time between tasks (threadpool deadtime):"
-print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
+print("# Time between tasks (threadpool deadtime):")
+print("# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+      .format("count", "minimum", "maximum", "sum", "mean", "percent"))
 threadpooldeadtimes = []
 for i in threadids:
     deadtimes = []
@@ -224,24 +224,24 @@ for i in threadids:
     deadmin = min(deadtimes)
     deadmax = max(deadtimes)
     deadsum = sum(deadtimes)
-    print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+    print("thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
           .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
+                  deadsum / len(deadtimes), deadsum / total_t * 100.0))
     threadpooldeadtimes.extend(deadtimes)
 
 deadmin = min(threadpooldeadtimes)
 deadmax = max(threadpooldeadtimes)
 deadsum = sum(threadpooldeadtimes)
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
       .format(len(threadpooldeadtimes), deadmin, deadmax, deadsum,
               deadsum / len(threadpooldeadtimes),
-              deadsum / (len(threadids) * total_t ) * 100.0)
-print
+              deadsum / (len(threadids) * total_t ) * 100.0))
+print()
 
 #  All times in step.
-print "# All deadtimes:"
-print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
+print("# All deadtimes:")
+print("# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
+      .format("count", "minimum", "maximum", "sum", "mean", "percent"))
 alldeadtimes = []
 for i in threadids:
     deadtimes = []
@@ -256,18 +256,18 @@ for i in threadids:
     deadmin = min(deadtimes)
     deadmax = max(deadtimes)
     deadsum = sum(deadtimes)
-    print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
+    print("thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
           .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
+                  deadsum / len(deadtimes), deadsum / total_t * 100.0))
     alldeadtimes.extend(deadtimes)
 
 deadmin = min(alldeadtimes)
 deadmax = max(alldeadtimes)
 deadsum = sum(alldeadtimes)
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
+print("all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
       .format(len(alldeadtimes), deadmin, deadmax, deadsum,
               deadsum / len(alldeadtimes),
-              deadsum / (len(threadids) * total_t ) * 100.0)
-print
+              deadsum / (len(threadids) * total_t ) * 100.0))
+print()
 
 sys.exit(0)
diff --git a/tools/task_plots/plot_tasks.py b/tools/task_plots/plot_tasks.py
index 76128ac66af21cd5f6d9a7ab0546ed813bde4536..29a6470ed1b624727b013bb0ed70e194e06a42f7 100755
--- a/tools/task_plots/plot_tasks.py
+++ b/tools/task_plots/plot_tasks.py
@@ -159,9 +159,9 @@ for task in SUBTYPES:
 if args.verbose:
     print("#Selected colours:")
     for task in sorted(TASKCOLOURS.keys()):
-        print("# " + task + ": " + TASKCOLOURS[task])
+        print(("# " + task + ": " + TASKCOLOURS[task]))
     for task in sorted(SUBCOLOURS.keys()):
-        print("# " + task + ": " + SUBCOLOURS[task])
+        print(("# " + task + ": " + SUBCOLOURS[task]))
 
 #  Read input.
 data = pl.loadtxt( infile )
@@ -172,8 +172,8 @@ if full_step.size == 13:
     print("# MPI mode")
     mpimode = True
     if ranks == None:
-        ranks = range(int(max(data[:,0])) + 1)
-    print("# Number of ranks:", len(ranks))
+        ranks = list(range(int(max(data[:,0])) + 1))
+    print(("# Number of ranks:", len(ranks)))
     rankcol = 0
     threadscol = 1
     taskcol = 2
@@ -194,10 +194,10 @@ else:
 #  Get CPU_CLOCK to convert ticks into milliseconds.
 CPU_CLOCK = float(full_step[-1]) / 1000.0
 if args.verbose:
-    print("# CPU frequency:", CPU_CLOCK * 1000.0)
+    print(("# CPU frequency:", CPU_CLOCK * 1000.0))
 
 nthread = int(max(data[:,threadscol])) + 1
-print("# Number of threads:", nthread)
+print(("# Number of threads:", nthread))
 
 # Avoid start and end times of zero.
 sdata = data[data[:,ticcol] != 0]
@@ -224,24 +224,24 @@ if delta_t == 0:
         dt = toc_step - tic_step
         if dt > delta_t:
             delta_t = dt
-    print("# Data range: ", delta_t / CPU_CLOCK, "ms")
+    print(("# Data range: ", delta_t / CPU_CLOCK, "ms"))
 
 # Once more doing the real gather and plots this time.
 for rank in ranks:
-    print("# Processing rank: ", rank)
+    print(("# Processing rank: ", rank))
     if mpimode:
         data = sdata[sdata[:,rankcol] == rank]
         full_step = data[0,:]
     tic_step = int(full_step[ticcol])
     toc_step = int(full_step[toccol])
-    print("# Min tic = ", tic_step)
+    print(("# Min tic = ", tic_step))
     data = data[1:,:]
     typesseen = []
     nethread = 0
 
     #  Dummy image for ranks that have no tasks.
     if data.size == 0:
-        print("# Rank ", rank, " has no tasks")
+        print(("# Rank ", rank, " has no tasks"))
         fig = pl.figure()
         ax = fig.add_subplot(1,1,1)
         ax.set_xlim(-delta_t * 0.01 / CPU_CLOCK, delta_t * 1.01 / CPU_CLOCK)
@@ -355,7 +355,7 @@ for rank in ranks:
         ax.set_ylabel("Thread ID" )
     else:
         ax.set_ylabel("Thread ID * " + str(expand) )
-    ax.set_yticks(pl.array(range(nethread)), True)
+    ax.set_yticks(pl.array(list(range(nethread))), True)
 
     loc = plticker.MultipleLocator(base=expand)
     ax.yaxis.set_major_locator(loc)
@@ -367,6 +367,6 @@ for rank in ranks:
     else:
         outpng = outbase + ".png"
     pl.savefig(outpng, bbox_inches="tight")
-    print("Graphics done, output written to", outpng)
+    print(("Graphics done, output written to", outpng))
 
 sys.exit(0)
diff --git a/tools/task_plots/plot_threadpool.py b/tools/task_plots/plot_threadpool.py
index e7553cb0a7375fb38196ae6f6d5413ae34dcc119..be96bb2f58a19f0c69d0c7158a9944032fdf8e5a 100755
--- a/tools/task_plots/plot_threadpool.py
+++ b/tools/task_plots/plot_threadpool.py
@@ -105,14 +105,14 @@ maxcolours = len(colours)
 
 #  Read header. First two lines.
 with open(infile) as infid:
-    head = [next(infid) for x in xrange(2)]
+    head = [next(infid) for x in range(2)]
 header = head[1][2:].strip()
 header = eval(header)
 nthread = int(header['num_threads']) + 1
 CPU_CLOCK = float(header['cpufreq']) / 1000.0
-print "Number of threads: ", nthread
+print("Number of threads: ", nthread)
 if args.verbose:
-    print "CPU frequency:", CPU_CLOCK * 1000.0
+    print("CPU frequency:", CPU_CLOCK * 1000.0)
 
 #  Read input.
 data = pl.genfromtxt(infile, dtype=None, delimiter=" ")
@@ -143,7 +143,7 @@ chunks = pl.array(chunks)
 mintic_step = min(tics)
 tic_step = mintic_step
 toc_step = max(tocs)
-print "# Min tic = ", mintic_step
+print("# Min tic = ", mintic_step)
 if mintic > 0:
     tic_step = mintic
 
@@ -153,7 +153,7 @@ if delta_t == 0:
     dt = toc_step - tic_step
     if dt > delta_t:
         delta_t = dt
-    print "Data range: ", delta_t / CPU_CLOCK, "ms"
+    print("Data range: ", delta_t / CPU_CLOCK, "ms")
 
 #  Once more doing the real gather and plots this time.
 start_t = float(tic_step)
@@ -163,7 +163,7 @@ end_t = (toc_step - start_t) / CPU_CLOCK
 
 #  Get all "task" names and assign colours.
 TASKTYPES = pl.unique(funcs)
-print TASKTYPES
+print(TASKTYPES)
 
 #  Set colours of task/subtype.
 TASKCOLOURS = {}
@@ -174,11 +174,11 @@ for task in TASKTYPES:
 
 #  For fiddling with colours...
 if args.verbose:
-    print "#Selected colours:"
+    print("#Selected colours:")
     for task in sorted(TASKCOLOURS.keys()):
-        print "# " + task + ": " + TASKCOLOURS[task]
+        print("# " + task + ": " + TASKCOLOURS[task])
     for task in sorted(SUBCOLOURS.keys()):
-        print "# " + task + ": " + SUBCOLOURS[task]
+        print("# " + task + ": " + SUBCOLOURS[task])
 
 tasks = {}
 tasks[-1] = []
@@ -265,7 +265,7 @@ if expand == 1:
     ax.set_ylabel("Thread ID", labelpad=0 )
 else:
     ax.set_ylabel("Thread ID * " + str(expand), labelpad=0 )
-ax.set_yticks(pl.array(range(nthread)), True)
+ax.set_yticks(pl.array(list(range(nthread))), True)
 
 loc = plticker.MultipleLocator(base=expand)
 ax.yaxis.set_major_locator(loc)
@@ -273,6 +273,6 @@ ax.grid(True, which='major', axis="y", linestyle="-")
 
 pl.show()
 pl.savefig(outpng)
-print "Graphics done, output written to", outpng
+print("Graphics done, output written to", outpng)
 
 sys.exit(0)