diff --git a/src/qsched.c b/src/qsched.c index c3615f23ec7d8d7998bcfd5b5c1e811d99abe842..0692040e8eec9e8a28164ad74e47a24c0380bc8c 100644 --- a/src/qsched.c +++ b/src/qsched.c @@ -2513,34 +2513,39 @@ for(i = 0; i < count; i++) /* We have the current parents. */ if(num_current == 0) { + /* If resource is non-local we need to recv it. */ - if(s->res[getindex(t->locks[j],s)].node != s->rank){ + if(res->node != s->rank){ - if(first_recv[getindex(t->locks[j],s)] == -1){ + if(first_recv[lock_index] == -1){ /* Find out the task data*/ - task_data[0] = s->res[getindex(t->locks[j],s)].node; + task_data[0] = res->node; task_data[1] = s->rank; - task_data[2] = (int)(s->res[getindex(t->locks[j],s)].ID >> 32); - task_data[3] = (int)(s->res[getindex(t->locks[j],s)].ID & 0xFFFFFFFF); + task_data[2] = (int)(res->ID >> 32); + task_data[3] = (int)(res->ID & 0xFFFFFFFF); task_data[4] = sends_added; /* Create the send task. */ - send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->locks[j],s)].size ); + send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , res->size ); /* The send task needs to lock the resource. */ - tsched_addlock( &ts, send_task_id, s->res[getindex(t->locks[j], s)].ID); + tsched_addlock( &ts, send_task_id, res->ID); /* Create the recv task*/ - recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->locks[j],s)].size ); + recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , res->size ); /* The recv task unlocks this task locally. */ tsched_addunlock( &ts, recv_task_id, t->id); /* The recv task needs to lock the resource. */ - tsched_addlock( &ts, recv_task_id, s->res[getindex(t->locks[j], s)].ID); + tsched_addlock( &ts, recv_task_id, res->ID); + /* Store the recv task as the last task for this resource. */ + data_task[lock_index] = recv_task_id; + send_task[lock_index] = send_task_id; + first_recv[lock_index] = recv_task_id; sends_added+=1; }else{ - tsched_addunlock( &ts, first_recv[getindex(t->locks[j], s)], t->id); + tsched_addunlock( &ts, first_recv[lock_index], t->id); } } - if(data_pos[getindex(t->locks[j], s)] <= i) - data_pos[getindex(t->locks[j], s)] = i; + if(data_pos[lock_index] <= i) + data_pos[lock_index] = i; continue; }/* If no current parents.*/ @@ -2549,19 +2554,19 @@ for(i = 0; i < count; i++) if(num_skipped > 0) { /* If data is out of date */ - if(data_pos[getindex(t->locks[j], s)] < last_index) + if(data_pos[lock_index] < last_index) { /* Find out the task data*/ - task_data[0] = s->res[getindex(t->locks[j],s)].node; + task_data[0] = res->node; task_data[1] = s->rank; - task_data[2] = (int)(s->res[getindex(t->locks[j],s)].ID >> 32); - task_data[3] = (int)(s->res[getindex(t->locks[j],s)].ID & 0xFFFFFFFF); + task_data[2] = (int)(res->ID >> 32); + task_data[3] = (int)(res->ID & 0xFFFFFFFF); task_data[4] = sends_added; /* Create the send task. */ - send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->locks[j],s)].size ); + send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , res->size ); /* The send task needs to lock the resource. */ - tsched_addlock( &ts, send_task_id, s->res[getindex(t->locks[j], s)].ID); + tsched_addlock( &ts, send_task_id, res->ID); /* Each parent task is an "unlocker" of the send task. */ for(k = 0; k < num_current; k++) @@ -2570,11 +2575,11 @@ for(i = 0; i < count; i++) } /* Create the recv task*/ - recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->locks[j],s)].size ); + recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , res->size ); /* The recv task unlocks this task locally. */ tsched_addunlock( &ts, recv_task_id, t->id); /* The recv task needs to lock the resource. */ - tsched_addlock( &ts, recv_task_id, s->res[getindex(t->locks[j], s)].ID); + tsched_addlock( &ts, recv_task_id, res->ID); /* Each parent task executed locally is an unlocker of the recv task. */ for(k = 0; k < num_current; k++) @@ -2586,14 +2591,14 @@ for(i = 0; i < count; i++) } /* Store the recv task as the last task for this resource. */ - data_task[getindex(t->locks[j], s)] = recv_task_id; - send_task[getindex(t->locks[j], s)] = send_task_id; + data_task[lock_index] = recv_task_id; + send_task[lock_index] = send_task_id; /* Update data_pos to the latest parent task in the top order. */ /* We know we have the data correct as of the latest parent task in the topological order. */ - if(data_pos[getindex(t->locks[j], s)] < last_index) - data_pos[getindex(t->locks[j], s)] = last_index; + if(data_pos[lock_index] < last_index) + data_pos[lock_index] = last_index; sends_added+=1; @@ -2637,14 +2642,14 @@ for(i = 0; i < count; i++) /* This task should be unlocked by the recv task. */ /* The data is here already, we just need this task to wait on the recv task.*/ - tsched_addunlock(&ts, data_task[getindex(t->locks[j], s)], t->id); + tsched_addunlock(&ts, data_task[lock_index], t->id); }/* else data is not out of date.*/ }else{ /* We know we have the data correct as of the latest parent task in the topological order. */ - if(data_pos[getindex(t->locks[j], s)] < last_index) - data_pos[getindex(t->locks[j], s)] = last_index; + if(data_pos[lock_index] < last_index) + data_pos[lock_index] = last_index; }/* If we skipped parents. */ }/* j over locks. */ @@ -2653,7 +2658,10 @@ for(i = 0; i < count; i++) /* Loop over the used resources */ for(j = 0; j < t->nr_uses; j++) { - if(s->res[getindex(t->uses[j],s)].node == s->rank) + + int use_index = getindex(t->uses[j],s); + struct res *res = &s->res[use_index]; + if(res->node == s->rank) continue; /* Find the parents that unlock this resource. */ num_current = 0; @@ -2668,8 +2676,8 @@ for(i = 0; i < count; i++) if(t2->locks[l] == t->uses[j]) { current_parents[num_current++] = parents[k]; - if(k > last_index) - last_index = k; + if(parents[k] > last_index) + last_index = parents[k]; /* If we skipped the parent task, increment the number we skipped. */ if(t2->flags & task_flag_skip) num_skipped++; @@ -2683,8 +2691,8 @@ for(i = 0; i < count; i++) { found = 1; current_parents[num_current++] = parents[k]; - if(k > last_index) - last_index = k; + if(parents[k] > last_index) + last_index = parents[k]; /* If we skipped the parent task, increment the number we skipped. */ if(t2->flags & task_flag_skip) num_skipped++; @@ -2702,8 +2710,8 @@ for(i = 0; i < count; i++) { found = 1; current_parents[num_current++] = parents[k]; - if(k > last_index) - last_index = k; + if(parents[k] > last_index) + last_index = parents[k]; /* If we skipped the parent task, increment the number we skipped. */ if(t2->flags & task_flag_skip) num_skipped++; @@ -2718,28 +2726,32 @@ for(i = 0; i < count; i++) if(num_current == 0) { /* If resource is non-local we need to recv it. */ - if(s->res[getindex(t->uses[j],s)].node != s->rank){ - if(s->res[getindex(t->uses[j],s)].node != s->rank){ + if(res->node != s->rank){ + if(res->node != s->rank){ /* Find out the task data*/ - task_data[0] = s->res[getindex(t->uses[j],s)].node; + task_data[0] = res->node; task_data[1] = s->rank; - task_data[2] = (int)(s->res[getindex(t->uses[j],s)].ID >> 32); - task_data[3] = (int)(s->res[getindex(t->uses[j],s)].ID & 0xFFFFFFFF); + task_data[2] = (int)(res->ID >> 32); + task_data[3] = (int)(res->ID & 0xFFFFFFFF); task_data[4] = sends_added; /* Create the send task. */ - send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->uses[j],s)].size ); + send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , res->size ); /* The send task needs to lock the resource. */ - tsched_addlock( &ts, send_task_id, s->res[getindex(t->uses[j], s)].ID); + tsched_addlock( &ts, send_task_id, res->ID); /* Create the recv task*/ - recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->uses[j],s)].size ); + recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , res->size ); /* The recv task unlocks this task locally. */ tsched_addunlock( &ts, recv_task_id, t->id); /* The recv task needs to lock the resource. */ - tsched_addlock( &ts, recv_task_id, s->res[getindex(t->uses[j], s)].ID); + tsched_addlock( &ts, recv_task_id, res->ID); + /* Store the recv task as the last task for this resource. */ + data_task[use_index] = recv_task_id; + send_task[use_index] = send_task_id; + first_recv[use_index] = recv_task_id; sends_added+=1; }else{ - tsched_addunlock( &ts, first_recv[getindex(t->uses[j], s)], t->id); + tsched_addunlock( &ts, first_recv[use_index], t->id); } } continue; @@ -2749,19 +2761,19 @@ for(i = 0; i < count; i++) if(num_skipped > 0) { /* If data is out of date */ - if(data_pos[getindex(t->uses[j], s)] < last_index) + if(data_pos[use_index] < last_index) { /* Find out the task data*/ - task_data[0] = s->res[getindex(t->uses[j],s)].node; + task_data[0] = res->node; task_data[1] = s->rank; - task_data[2] = (int)(s->res[getindex(t->uses[j],s)].ID >> 32); - task_data[3] = (int)(s->res[getindex(t->uses[j],s)].ID & 0xFFFFFFFF); + task_data[2] = (int)(res->ID >> 32); + task_data[3] = (int)(res->ID & 0xFFFFFFFF); task_data[4] = sends_added; /* Create the send task. */ - send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->uses[j],s)].size ); + send_task_id = tsched_addtask(&ts, task_type_send , 0 , task_data , 5 * sizeof(int) , res->size ); /* The send task needs to lock the resource. */ - tsched_addlock( &ts, send_task_id, s->res[getindex(t->uses[j], s)].ID); + tsched_addlock( &ts, send_task_id, res->ID); /* Each parent task is an "unlocker" of the send task. */ for(k = 0; k < num_current; k++) @@ -2770,12 +2782,12 @@ for(i = 0; i < count; i++) } /* Create the recv task*/ - recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , s->res[getindex(t->uses[j],s)].size ); + recv_task_id = tsched_addtask(&ts, task_type_recv , 0 , task_data , 5 * sizeof(int) , res->size ); /* The recv task unlocks this task locally. */ tsched_addunlock( &ts, recv_task_id, t->id); /* The recv task needs to lock the resource. */ - tsched_addlock( &ts, recv_task_id, s->res[getindex(t->uses[j], s)].ID); + tsched_addlock( &ts, recv_task_id, res->ID); /* Each parent task executed locally is an unlocker of the recv task. */ for(k = 0; k < num_current; k++) @@ -2787,14 +2799,14 @@ for(i = 0; i < count; i++) } /* Store the recv task as the last task for this resource. */ - data_task[getindex(t->uses[j], s)] = recv_task_id; - send_task[getindex(t->uses[j], s)] = send_task_id; + data_task[use_index] = recv_task_id; + send_task[use_index] = send_task_id; /* Update data_pos to the latest parent task in the top order. */ /* We know we have the data correct as of the latest parent task in the topological order. */ - if(data_pos[getindex(t->uses[j], s)] < last_index) - data_pos[getindex(t->uses[j], s)] = last_index; + if(data_pos[use_index] < last_index) + data_pos[use_index] = last_index; sends_added+=1; @@ -2816,7 +2828,7 @@ for(i = 0; i < count; i++) } if(!found) { - tsched_addunlocker(&ts, tid[current_parents[k]], data_task[getindex(t->uses[j], s)] ); + tsched_addunlocker(&ts, tid[current_parents[k]], data_task[use_index] ); } } /* All the parents of this task are completed before the send task can occur also!*/ @@ -2827,7 +2839,7 @@ for(i = 0; i < count; i++) message("send_task = %lli", send_task[getindex(t->uses[j], s)]); message("send_task & 0xFFFFFFFFF = %lli", send_task[getindex(t->uses[j], s)] & 0xFFFFFFFFF); message("j=%i, i = %i, k = %i", j, i, k);*/ - struct task *temp = &ts.tasks[(send_task[getindex(t->uses[j], s)] & 0xFFFFFFFFF) - ts.id_count]; + struct task *temp = &ts.tasks[(send_task[use_index] & 0xFFFFFFFFF) - ts.id_count]; // for(l = 0; l < temp->nr_unlocks; l++) for(l = 0; l < ts.count_unlockers; l++) { @@ -2852,14 +2864,14 @@ for(i = 0; i < count; i++) /* This task should be unlocked by the recv task. */ /* The data is here already, we just need this task to wait on the recv task.*/ - tsched_addunlock(&ts, data_task[getindex(t->uses[j], s)], t->id); + tsched_addunlock(&ts, data_task[use_index], t->id); }/* else data is not out of date.*/ }else{ /* We know we have the data correct as of the latest parent task in the topological order. */ - if(data_pos[getindex(t->uses[j], s)] < last_index) - data_pos[getindex(t->uses[j], s)] = last_index; + if(data_pos[use_index] < last_index) + data_pos[use_index] = last_index; }/* If we skipped parents. */ }/* j over used. */