Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 38 → Rev 39

/shark/tags/rel_0_3/kernel/conditio.c
20,11 → 20,11
 
/**
------------
CVS : $Id: conditio.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
CVS : $Id: conditio.c,v 1.3 2003-01-07 17:07:49 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
Revision: $Revision: 1.3 $
Last update: $Date: 2003-01-07 17:07:49 $
------------
 
This file contains the condition variables handling functions.
59,6 → 59,7
#include <kernel/var.h>
#include <kernel/func.h>
#include <errno.h>
#include <kernel/iqueue.h>
 
/*---------------------------------------------------------------------*/
/* Condition variables */
76,13 → 77,13
/* if the task is waiting on a condition variable, we have to extract it
from the waiters queue, then set the KILL_REQUEST flag, and reinsert
the task into the ready queue so it can reaquire the mutex and die */
q_extract(i,&proc_table[i].cond_waiting->waiters);
if (proc_table[i].cond_waiting->waiters == NIL)
iq_extract(i,&proc_table[i].cond_waiting->waiters);
if (iq_isempty(&proc_table[i].cond_waiting->waiters))
proc_table[i].cond_waiting->used_for_waiting = NULL;
proc_table[i].cond_waiting = NULL;
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
level_table[l]->public_unblock(l,i);
/* then, the kill_request flag is set, and when the task is rescheduled
it autokill itself... */
 
102,7 → 103,8
register_cancellation_point(condition_cancellation_point, NULL);
}
 
cond->waiters = NIL;
iq_init (&cond->waiters, &freedesc, 0);
 
cond->used_for_waiting = NULL;
 
return 0;
110,7 → 112,7
 
int cond_destroy(cond_t *cond)
{
if (cond->waiters != NIL)
if (!iq_isempty(&cond->waiters))
return (EBUSY);
 
return 0;
123,11 → 125,11
 
proc_table[exec_shadow].context = kern_context_save();
 
if (cond->waiters != NIL) {
p = q_getfirst(&cond->waiters);
if (!iq_isempty(&cond->waiters)) {
p = iq_getfirst(&cond->waiters);
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
level_table[l]->public_unblock(l,p);
 
scheduler();
}
143,13 → 145,13
 
proc_table[exec_shadow].context = kern_context_save();
 
if (cond->waiters != NIL) {
if (!iq_isempty(&cond->waiters)) {
do {
p = q_getfirst(&cond->waiters);
p = iq_getfirst(&cond->waiters);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
} while(cond->waiters != NIL);
level_table[l]->public_unblock(l,p);
} while(!iq_isempty(&cond->waiters));
 
scheduler();
}
160,8 → 162,6
int cond_wait(cond_t *cond, mutex_t *mutex)
{
LEVEL l;
struct timespec ty;
TIME tx;
 
/* Why I used task_nopreempt???... because we have to unlock the mutex,
and we can't call mutex_unlock after kern_context_save (the unlock
198,23 → 198,14
/* now, we really block the task... */
proc_table[exec_shadow].context = kern_context_save();
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
kern_epilogue_macro();
 
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
level_table[l]->public_block(l,exec_shadow);
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
q_insert(exec_shadow,&cond->waiters);
iq_priority_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
242,7 → 233,7
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (cond->waiters == NIL) cond->used_for_waiting = NULL;
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
}
task_preempt();
 
268,8 → 259,8
PID p = (PID)arg;
LEVEL l;
 
q_extract(p,&proc_table[p].cond_waiting->waiters);
if (proc_table[p].cond_waiting->waiters == NIL)
iq_extract(p,&proc_table[p].cond_waiting->waiters);
if (iq_isempty(&proc_table[p].cond_waiting->waiters))
proc_table[p].cond_waiting->used_for_waiting = NULL;
proc_table[p].cond_waiting = NULL;
 
276,7 → 267,7
proc_table[p].delay_timer = -1;
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
level_table[l]->public_unblock(l,p);
 
event_need_reschedule();
}
286,8 → 277,6
{
LEVEL l;
int returnvalue = 0;
struct timespec ty;
TIME tx;
 
/* Why I used task_nopreempt???... because we have to unlock the mutex,
and we can't call mutex_unlock after kern_context_save (the unlock
324,23 → 313,14
/* now, we really block the task... */
proc_table[exec_shadow].context = kern_context_save();
 
/* SAME AS SCHEDULER... manage the capacity event and the load_info */
ll_gettime(TIME_EXACT, &schedule_time);
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
proc_table[exec_shadow].avail_time -= tx;
jet_update_slice(tx);
if (cap_timer != NIL) {
event_delete(cap_timer);
cap_timer = NIL;
}
kern_epilogue_macro();
 
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
level_table[l]->public_block(l,exec_shadow);
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
q_insert(exec_shadow,&cond->waiters);
iq_priority_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
359,7 → 339,7
ll_context_to(proc_table[exec_shadow].context);
 
if (proc_table[exec_shadow].delay_timer != -1)
event_delete(proc_table[exec_shadow].delay_timer);
kern_event_delete(proc_table[exec_shadow].delay_timer);
 
kern_sti();
 
379,7 → 359,7
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (cond->waiters == NIL) cond->used_for_waiting = NULL;
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
}
else
/* cond_waiting == NULL if the task is killed or the timer has fired */