diff --git a/src/lock.h b/src/lock.h
index 90e9f90602c120ddd10f4cdefb9b08cedbf45e0f..84bfa7bec77a5f23a69a7288c460a94a6857e20a 100644
--- a/src/lock.h
+++ b/src/lock.h
@@ -34,6 +34,7 @@
 #define lock_trylock(l) (pthread_spin_lock(l) != 0)
 #define lock_unlock(l) (pthread_spin_unlock(l) != 0)
 #define lock_unlock_blind(l) pthread_spin_unlock(l)
+
 #elif defined(PTHREAD_LOCK)
 #include <pthread.h>
 #define lock_type pthread_mutex_t
@@ -43,6 +44,7 @@
 #define lock_trylock(l) (pthread_mutex_trylock(l) != 0)
 #define lock_unlock(l) (pthread_mutex_unlock(l) != 0)
 #define lock_unlock_blind(l) pthread_mutex_unlock(l)
+
 #else
 #define lock_type volatile int
 #define lock_init(l) (*(l) = 0)
diff --git a/src/scheduler.c b/src/scheduler.c
index f59ee894d5079e0d6b6f6b3c5eb077d73c1d463b..5fc2f6b4b13b054c21e6cdd8c4f5905165d86a60 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -54,7 +54,7 @@
  * @param tb The #task that will be unlocked.
  */
 
-void scheduler_addunlock(struct scheduler *s, struct task *ta,
+void scheduler_addunlock_old(struct scheduler *s, struct task *ta,
                          struct task *tb) {
 
   /* Lock the scheduler since re-allocating the unlocks is not
@@ -87,6 +87,37 @@ void scheduler_addunlock(struct scheduler *s, struct task *ta,
   if (lock_unlock(&s->lock) != 0) error("Unable to unlock scheduler.");
 }
 
+void scheduler_addunlock(struct scheduler *s, struct task *ta,
+                         struct task *tb) {
+  /* Get an index at which to store this unlock. */
+  const int ind = atomic_inc(&s->nr_unlocks);
+
+  /* Does the buffer need to be grown? */
+  if (ind == s->size_unlocks) {
+    struct task **unlocks_new;
+    int *unlock_ind_new;
+    const int size_unlocks_new = s->size_unlocks * 2;
+    if ((unlocks_new = (struct task **)malloc(
+             sizeof(struct task *) * size_unlocks_new)) == NULL ||
+        (unlock_ind_new = (int *)malloc(sizeof(int) * size_unlocks_new)) == NULL)
+      error("Failed to re-allocate unlocks.");
+    memcpy(unlocks_new, s->unlocks, sizeof(struct task *) * ind);
+    memcpy(unlock_ind_new, s->unlock_ind, sizeof(int) * ind);
+    free(s->unlocks);
+    free(s->unlock_ind);
+    s->unlocks = unlocks_new;
+    s->unlock_ind = unlock_ind_new;
+    s->size_unlocks = size_unlocks_new;
+  }
+  
+  /* Wait for there to actually be space at my index. */
+  while (ind > s->size_unlocks);
+
+  /* Write the unlock to the scheduler. */
+  s->unlocks[ind] = tb;
+  s->unlock_ind[ind] = ta - s->tasks;
+}
+
 /**
  * @brief Split tasks that may be too large.
  *
@@ -748,6 +779,17 @@ void scheduler_set_unlocks(struct scheduler *s) {
     t->unlock_tasks = &s->unlocks[offsets[k]];
     for (int j = offsets[k]; j < offsets[k + 1]; j++) s->unlock_ind[j] = k;
   }
+  
+  /* Verify that there are no duplicate unlocks. */
+  /* for (int k = 0; k < s->nr_tasks; k++) {
+    struct task *t = &s->tasks[k];
+    for (int i = 0; i < t->nr_unlock_tasks; i++) {
+      for (int j = i + 1; j < t->nr_unlock_tasks; j++) {
+        if (t->unlock_tasks[i] == t->unlock_tasks[j])
+          error("duplicate unlock!");
+      }
+    }
+  } */
 
   /* Clean up. */
   free(counts);
diff --git a/src/scheduler.h b/src/scheduler.h
index ee352f6ae51be14a227063bbb0e30c981b9cc364..4352ec0447327ecdb88288efb4cff75be5e76517 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -85,9 +85,9 @@ struct scheduler {
   int *tasks_ind;
 
   /* The task unlocks. */
-  struct task **unlocks;
-  int *unlock_ind;
-  int nr_unlocks, size_unlocks;
+  struct task **volatile unlocks;
+  int *volatile unlock_ind;
+  volatile int nr_unlocks, size_unlocks;
 
   /* Lock for this scheduler. */
   lock_type lock;
@@ -98,7 +98,7 @@ struct scheduler {
 
   /* The space associated with this scheduler. */
   struct space *space;
-  
+
   /* Threadpool to use internally for mundane parallel work. */
   struct threadpool *threadpool;