Skip to content
Snippets Groups Projects
Commit cbdf2faf authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

make scheduler_addunlock a bit more competitive.

parent e03bb736
No related branches found
No related tags found
1 merge request!176Tasks cleanup
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define lock_trylock(l) (pthread_spin_lock(l) != 0) #define lock_trylock(l) (pthread_spin_lock(l) != 0)
#define lock_unlock(l) (pthread_spin_unlock(l) != 0) #define lock_unlock(l) (pthread_spin_unlock(l) != 0)
#define lock_unlock_blind(l) pthread_spin_unlock(l) #define lock_unlock_blind(l) pthread_spin_unlock(l)
#elif defined(PTHREAD_LOCK) #elif defined(PTHREAD_LOCK)
#include <pthread.h> #include <pthread.h>
#define lock_type pthread_mutex_t #define lock_type pthread_mutex_t
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#define lock_trylock(l) (pthread_mutex_trylock(l) != 0) #define lock_trylock(l) (pthread_mutex_trylock(l) != 0)
#define lock_unlock(l) (pthread_mutex_unlock(l) != 0) #define lock_unlock(l) (pthread_mutex_unlock(l) != 0)
#define lock_unlock_blind(l) pthread_mutex_unlock(l) #define lock_unlock_blind(l) pthread_mutex_unlock(l)
#else #else
#define lock_type volatile int #define lock_type volatile int
#define lock_init(l) (*(l) = 0) #define lock_init(l) (*(l) = 0)
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
* @param tb The #task that will be unlocked. * @param tb The #task that will be unlocked.
*/ */
void scheduler_addunlock(struct scheduler *s, struct task *ta, void scheduler_addunlock_old(struct scheduler *s, struct task *ta,
struct task *tb) { struct task *tb) {
/* Lock the scheduler since re-allocating the unlocks is not /* Lock the scheduler since re-allocating the unlocks is not
...@@ -87,6 +87,37 @@ void scheduler_addunlock(struct scheduler *s, struct task *ta, ...@@ -87,6 +87,37 @@ void scheduler_addunlock(struct scheduler *s, struct task *ta,
if (lock_unlock(&s->lock) != 0) error("Unable to unlock scheduler."); if (lock_unlock(&s->lock) != 0) error("Unable to unlock scheduler.");
} }
void scheduler_addunlock(struct scheduler *s, struct task *ta,
struct task *tb) {
/* Get an index at which to store this unlock. */
const int ind = atomic_inc(&s->nr_unlocks);
/* Does the buffer need to be grown? */
if (ind == s->size_unlocks) {
struct task **unlocks_new;
int *unlock_ind_new;
const int size_unlocks_new = s->size_unlocks * 2;
if ((unlocks_new = (struct task **)malloc(
sizeof(struct task *) * size_unlocks_new)) == NULL ||
(unlock_ind_new = (int *)malloc(sizeof(int) * size_unlocks_new)) == NULL)
error("Failed to re-allocate unlocks.");
memcpy(unlocks_new, s->unlocks, sizeof(struct task *) * ind);
memcpy(unlock_ind_new, s->unlock_ind, sizeof(int) * ind);
free(s->unlocks);
free(s->unlock_ind);
s->unlocks = unlocks_new;
s->unlock_ind = unlock_ind_new;
s->size_unlocks = size_unlocks_new;
}
/* Wait for there to actually be space at my index. */
while (ind > s->size_unlocks);
/* Write the unlock to the scheduler. */
s->unlocks[ind] = tb;
s->unlock_ind[ind] = ta - s->tasks;
}
/** /**
* @brief Split tasks that may be too large. * @brief Split tasks that may be too large.
* *
...@@ -748,6 +779,17 @@ void scheduler_set_unlocks(struct scheduler *s) { ...@@ -748,6 +779,17 @@ void scheduler_set_unlocks(struct scheduler *s) {
t->unlock_tasks = &s->unlocks[offsets[k]]; t->unlock_tasks = &s->unlocks[offsets[k]];
for (int j = offsets[k]; j < offsets[k + 1]; j++) s->unlock_ind[j] = k; for (int j = offsets[k]; j < offsets[k + 1]; j++) s->unlock_ind[j] = k;
} }
/* Verify that there are no duplicate unlocks. */
/* for (int k = 0; k < s->nr_tasks; k++) {
struct task *t = &s->tasks[k];
for (int i = 0; i < t->nr_unlock_tasks; i++) {
for (int j = i + 1; j < t->nr_unlock_tasks; j++) {
if (t->unlock_tasks[i] == t->unlock_tasks[j])
error("duplicate unlock!");
}
}
} */
/* Clean up. */ /* Clean up. */
free(counts); free(counts);
......
...@@ -85,9 +85,9 @@ struct scheduler { ...@@ -85,9 +85,9 @@ struct scheduler {
int *tasks_ind; int *tasks_ind;
/* The task unlocks. */ /* The task unlocks. */
struct task **unlocks; struct task **volatile unlocks;
int *unlock_ind; int *volatile unlock_ind;
int nr_unlocks, size_unlocks; volatile int nr_unlocks, size_unlocks;
/* Lock for this scheduler. */ /* Lock for this scheduler. */
lock_type lock; lock_type lock;
...@@ -98,7 +98,7 @@ struct scheduler { ...@@ -98,7 +98,7 @@ struct scheduler {
/* The space associated with this scheduler. */ /* The space associated with this scheduler. */
struct space *space; struct space *space;
/* Threadpool to use internally for mundane parallel work. */ /* Threadpool to use internally for mundane parallel work. */
struct threadpool *threadpool; struct threadpool *threadpool;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment