Commit 426b27e1 authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

new strategy, sched_done now may return a ready and locked task.


Former-commit-id: f7971b5a2018685fdfe2ddd57c5cd82ce04e51b3
parent 2f811cd4
......@@ -676,7 +676,7 @@ void engine_step ( struct engine *e ) {
/* Check if all the kick1 threads have executed. */
for ( k = 0 ; k < e->sched.nr_tasks ; k++ )
if ( e->sched.tasks[k].type == task_type_kick1 &&
e->sched.tasks[k].tic == 0 )
e->sched.tasks[k].toc == 0 )
error( "Not all kick1 tasks completed." );
// for(k=0; k<10; ++k)
......
......@@ -87,16 +87,6 @@ void queue_insert ( struct queue *q , struct task *t ) {
}
else
break;
/* for ( int k = q->count - 1 ; k > 0 && q->tasks[ q->tid[k-1] ].weight < q->tasks[ q->tid[k] ].weight ; k-- ) {
int temp = q->tid[k];
q->tid[k] = q->tid[k-1];
q->tid[k-1] = temp;
} */
/* Verify queue consistency. */
/* for ( int k = 1 ; k < q->count ; k++ )
if ( q->tasks[ q->tid[(k-1)/2] ].weight < q->tasks[ q->tid[k] ].weight )
error( "Queue not heaped." ); */
/* Unlock the queue. */
if ( lock_unlock( &q->lock ) != 0 )
......@@ -130,8 +120,8 @@ void queue_init ( struct queue *q , struct task *tasks ) {
error( "Failed to init queue lock." );
}
/**
* @brief Get a task free of dependencies and conflicts.
*
......@@ -142,10 +132,9 @@ void queue_init ( struct queue *q , struct task *tasks ) {
struct task *queue_gettask ( struct queue *q , int qid , int blocking ) {
int k, temp, qcount, *qtid, type;
int k, temp, qcount, *qtid;
lock_type *qlock = &q->lock;
struct task *qtasks, *res = NULL;
struct cell *ci, *cj;
/* If there are no tasks, leave immediately. */
if ( q->count == 0 )
......@@ -168,35 +157,15 @@ struct task *queue_gettask ( struct queue *q , int qid , int blocking ) {
/* Put a finger on the task. */
res = &qtasks[ qtid[k] ];
ci = res->ci;
cj = res->cj;
type = res->type;
/* Is this task blocked? */
if ( res->wait )
continue;
error( "Enqueued waiting task." );
/* Try to lock ci. */
if ( type == task_type_self ||
type == task_type_sort ||
(type == task_type_sub && cj == NULL) ) {
if ( cell_locktree( ci ) != 0 )
continue;
}
else if ( type == task_type_pair || (type == task_type_sub && cj != NULL) ) {
if ( ci->hold || cj->hold || ci->wait || cj->wait )
continue;
if ( cell_locktree( ci ) != 0 )
continue;
if ( cell_locktree( cj ) != 0 ) {
cell_unlocktree( ci );
continue;
}
}
/* Try to lock the task and exit if successful. */
if ( task_lock( res ) )
break;
/* If we made it this far, we're safe. */
break;
} /* loop over the task IDs. */
/* Did we get a task? */
......@@ -205,11 +174,6 @@ struct task *queue_gettask ( struct queue *q , int qid , int blocking ) {
/* Another one bites the dust. */
qcount = q->count -= 1;
/* Own the cells involved. */
/* ci->super->owner = qid;
if ( cj != NULL )
cj->super->owner = qid; */
/* Swap this task with the last task and re-heap. */
if ( k < qcount ) {
qtid[ k ] = qtid[ qcount ];
......@@ -233,18 +197,6 @@ struct task *queue_gettask ( struct queue *q , int qid , int blocking ) {
break;
}
}
/* qtid[ k ] = qtid[ qcount ];
while ( k < qcount-1 && qtasks[ qtid[k+1] ].weight > qtasks[ qtid[k] ].weight ) {
temp = qtid[k];
qtid[k] = qtid[k+1];
qtid[k+1] = temp;
k += 1;
} */
/* Verify queue consistency. */
/* for ( k = 1 ; k < q->count ; k++ )
if ( q->tasks[ q->tid[(k-1)/2] ].weight < q->tasks[ q->tid[k] ].weight )
error( "Queue not heaped." ); */
}
else
......
......@@ -776,7 +776,7 @@ void *runner_main ( void *data ) {
struct runner *r = (struct runner *)data;
struct engine *e = r->e;
struct scheduler *sched = &e->sched;
struct task *t;
struct task *t = NULL;
struct cell *ci, *cj;
/* Main loop. */
......@@ -788,15 +788,20 @@ void *runner_main ( void *data ) {
/* Loop while there are tasks... */
while ( 1 ) {
/* Get a task, how and from where depends on the policy. */
TIMER_TIC
t = scheduler_gettask( sched , r->qid );
TIMER_TOC(timer_gettask);
/* If there's no old task, try to get a new one. */
if ( t == NULL ) {
/* Did I get anything? */
if ( t == NULL )
break;
/* Get the task. */
TIMER_TIC
t = scheduler_gettask( sched , r->qid );
TIMER_TOC(timer_gettask);
/* Did I get anything? */
if ( t == NULL )
break;
}
/* Get the cells. */
ci = t->ci;
cj = t->cj;
......@@ -855,8 +860,8 @@ void *runner_main ( void *data ) {
error( "Unknown task type." );
}
/* We're done with this task. */
scheduler_done( sched , t );
/* We're done with this task, see if we get a next one. */
t = scheduler_done( sched , t );
} /* main loop. */
......
......@@ -587,16 +587,16 @@ void scheduler_reweight ( struct scheduler *s ) {
t->weight += __builtin_popcount( t->flags ) * t->ci->count * ( sizeof(int)*8 - __builtin_clz( t->ci->count ) );
break;
case task_type_self:
t->weight += 2 * t->ci->count * t->ci->count;
t->weight += 10 * t->ci->count * t->ci->count;
break;
case task_type_pair:
t->weight += 2 * sid_scale[ t->flags ] * t->ci->count * t->cj->count;
t->weight += 10 * t->ci->count * t->cj->count * sid_scale[ t->flags ];
break;
case task_type_sub:
if ( t->cj != NULL )
t->weight += 2 * sid_scale[ t->flags ] * t->ci->count * t->cj->count;
t->weight += 10 * t->ci->count * t->cj->count * sid_scale[ t->flags ];
else
t->weight += 2 * t->ci->count * t->ci->count;
t->weight += 10 * t->ci->count * t->ci->count;
break;
case task_type_ghost:
if ( t->ci == t->ci->super )
......@@ -669,8 +669,13 @@ void scheduler_enqueue ( struct scheduler *s , struct task *t ) {
return;
/* If this is an implicit task, just pretend it's done. */
if ( t->implicit )
scheduler_done( s , t );
if ( t->implicit ) {
for ( int j = 0 ; j < t->nr_unlock_tasks ; j++ ) {
struct task *t2 = t->unlock_tasks[j];
if ( atomic_dec( &t2->wait ) == 1 && !t2->skip )
scheduler_enqueue( s , t2 );
}
}
/* Otherwise, look for a suitable queue. */
else {
......@@ -697,9 +702,6 @@ void scheduler_enqueue ( struct scheduler *s , struct task *t ) {
/* If no previous owner, find the shortest queue. */
if ( qid < 0 )
qid = rand() % s->nr_queues;
/* for ( qid = 0 , int k = 1 ; k < s->nr_queues ; k++ )
if ( s->queues[k].count < s->queues[qid].count )
qid = k; */
/* Increase the waiting counter. */
atomic_inc( &s->waiting );
......@@ -719,7 +721,7 @@ void scheduler_enqueue ( struct scheduler *s , struct task *t ) {
* @param t The finished #task.
*/
void scheduler_done ( struct scheduler *s , struct task *t ) {
void scheduler_done_old ( struct scheduler *s , struct task *t ) {
int k, res;
struct task *t2;
......@@ -761,6 +763,63 @@ void scheduler_done ( struct scheduler *s , struct task *t ) {
}
/**
* @brief Take care of a tasks dependencies.
*
* @param s The #scheduler.
* @param t The finished #task.
*
* @return A pointer to the next task, if a suitable one has
* been identified.
*/
struct task *scheduler_done ( struct scheduler *s , struct task *t ) {
int k, res, oid = t->ci->super->owner;
struct task *t2, *next = NULL;
/* Release whatever locks this task held. */
if ( !t->implicit )
task_unlock( t );
/* Loop through the dependencies and add them to a queue if
they are ready. */
for ( k = 0 ; k < t->nr_unlock_tasks ; k++ ) {
t2 = t->unlock_tasks[k];
if ( ( res = atomic_dec( &t2->wait ) ) < 1 )
error( "Negative wait!" );
if ( res == 1 && !t2->skip ) {
if ( !t2->implicit &&
t2->ci->super->owner == oid &&
( next == NULL || t2->weight > next->weight ) &&
task_lock( t2 ) ) {
if ( next != NULL ) {
task_unlock( next );
scheduler_enqueue( s , next );
}
next = t2;
}
else
scheduler_enqueue( s , t2 );
}
}
/* Task definitely done. */
if ( !t->implicit ) {
t->toc = getticks();
pthread_mutex_lock( &s->sleep_mutex );
if ( next == NULL )
atomic_dec( &s->waiting );
pthread_cond_broadcast( &s->sleep_cond );
pthread_mutex_unlock( &s->sleep_mutex );
}
/* Return the next best task. */
return next;
}
/**
* @brief Get a task, preferably from the given queue.
*
......
......@@ -78,5 +78,5 @@ struct task *scheduler_addtask ( struct scheduler *s , int type , int subtype ,
void scheduler_splittasks ( struct scheduler *s );
void scheduler_map_mkghosts ( struct cell *c , void *data );
void scheduler_map_mkkick1 ( struct cell *c , void *data );
void scheduler_done ( struct scheduler *s , struct task *t );
struct task *scheduler_done ( struct scheduler *s , struct task *t );
......@@ -35,6 +35,7 @@
#include "cycle.h"
#include "atomic.h"
#include "lock.h"
#include "cell.h"
#include "task.h"
#include "error.h"
......@@ -42,6 +43,68 @@
const char *taskID_names[task_type_count] = { "none" , "sort" , "self" , "pair" , "sub" , "ghost" , "kick1" , "kick2" };
/**
* @brief Unlock the cell held by this task.
*
* @param t The #task.
*/
void task_unlock ( struct task *t ) {
/* Act based on task type. */
switch ( t->type ) {
case task_type_self:
case task_type_sort:
cell_unlocktree( t->ci );
break;
case task_type_pair:
case task_type_sub:
cell_unlocktree( t->ci );
if ( t->cj != NULL )
cell_unlocktree( t->cj );
break;
}
}
/**
* @brief Try to lock the cells associated with this task.
*
* @param t the #task.
*/
int task_lock ( struct task *t ) {
int type = t->type;
struct cell *ci = t->ci, *cj = t->cj;
/* Unary lock? */
if ( type == task_type_self ||
type == task_type_sort ||
(type == task_type_sub && cj == NULL) ) {
if ( cell_locktree( ci ) != 0 )
return 0;
}
/* Otherwise, binary lock. */
else if ( type == task_type_pair || (type == task_type_sub && cj != NULL) ) {
if ( ci->hold || cj->hold || ci->wait || cj->wait )
return 0;
if ( cell_locktree( ci ) != 0 )
return 0;
if ( cell_locktree( cj ) != 0 ) {
cell_unlocktree( ci );
return 0;
}
}
/* If we made it this far, we've got a lock. */
return 1;
}
/**
* @brief Remove all unlocks to tasks that are of the given type.
*
......
......@@ -72,3 +72,5 @@ void task_rmunlock( struct task *ta , struct task *tb );
void task_rmunlock_blind( struct task *ta , struct task *tb );
void task_cleanunlock ( struct task *t , int type );
void task_addunlock( struct task *ta , struct task *tb );
void task_unlock ( struct task *t );
int task_lock ( struct task *t );
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment