Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 29 → Rev 28

/shark/trunk/kernel/iqueue.c
File deleted
/shark/trunk/kernel/modules/edf.c
20,11 → 20,11
 
/**
------------
CVS : $Id: edf.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: edf.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
67,6 → 67,7
 
/*+ Status used in the level +*/
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define EDF_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
89,7 → 90,7
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
QUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
105,6 → 106,7
 
switch (status) {
case EDF_READY : return "EDF_Ready";
case EDF_DELAY : return "EDF_Delay";
case EDF_WCET_VIOLATED: return "EDF_Wcet_Violated";
case EDF_WAIT : return "EDF_Sporadic_Wait";
case EDF_IDLE : return "EDF_Idle";
117,7 → 119,6
{
PID p = (PID) par;
EDF_level_des *lev;
struct timespec *temp;
 
edf_printf("$");
 
127,7 → 128,7
case EDF_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
136,16 → 137,15
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to EDF_task_activate */
temp = iq_query_timespec(p,&lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time,
temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
&proc_table[p].timespec_priority);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
q_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
printk("el%d|",p);
172,6 → 172,23
kern_raise(XDEADLINE_MISS,p);
}
 
/*+ this function is called when a task finish his delay +*/
static void EDF_timer_delay(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
 
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = EDF_READY;
q_timespec_insert(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
event_need_reschedule();
}
 
 
static int EDF_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
204,7 → 221,7
static void EDF_level_status(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
PID p = lev->ready;
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & EDF_ENABLE_WCET_CHECK));
222,10 → 239,10
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
p = proc_table[p].next;
}
 
for (p=0; p<MAX_PROC; p++)
236,8 → 253,8
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
}
 
256,7 → 273,7
kern_printf(") ");
}
*/
return iq_query_first(&lev->ready);
return (PID)lev->ready;
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
348,6 → 365,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void EDF_task_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
357,7 → 382,17
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void EDF_task_epilogue(LEVEL l, PID p)
374,7 → 409,7
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
}
382,7 → 417,6
static void EDF_task_activate(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == EDF_WAIT) {
kern_raise(XACTIVATION,p);
399,19 → 433,19
/* see also EDF_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority,
&proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000);
}
 
static void EDF_task_insert(LEVEL l, PID p)
423,7 → 457,7
 
/* Insert task in the coEDFect position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
}
 
static void EDF_task_extract(LEVEL l, PID p)
484,7 → 518,22
correctly the task state to sleep... */
}
 
static void EDF_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* equal to EDF_task_endcycle */
proc_table[p].status = EDF_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
EDF_timer_delay,
(void *)p);
}
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the EDF ready queue. */
497,7 → 546,7
/* if the EDF_guest_create is called, then the pclass must be a
valid pclass. */
 
*iq_query_timespec(p, &lev->ready) = job->deadline;
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline);
lev->deadline_timer[p] = -1;
 
528,7 → 577,7
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
}
 
static void EDF_guest_epilogue(LEVEL l, PID p)
536,7 → 585,7
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
545,12 → 594,12
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & EDF_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
EDF_timer_guest_deadline,
(void *)p);
 
561,7 → 610,7
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_timespec_insert(p,&lev->ready);
q_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
586,9 → 635,13
//kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == EDF_READY)
{
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
else if (proc_table[p].status == EDF_DELAY) {
event_delete(proc_table[p].delay_timer);
proc_table[p].delay_timer = NIL; /* paranoia */
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
602,8 → 655,25
static void EDF_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void EDF_guest_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* equal to EDF_task_endcycle */
proc_table[p].status = EDF_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
EDF_timer_delay,
(void *)p);
}
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
655,6 → 725,7
lev->l.task_endcycle = EDF_task_endcycle;
lev->l.task_end = EDF_task_end;
lev->l.task_sleep = EDF_task_sleep;
lev->l.task_delay = EDF_task_delay;
 
lev->l.guest_create = EDF_guest_create;
lev->l.guest_detach = EDF_guest_detach;
666,6 → 737,7
lev->l.guest_endcycle = EDF_guest_endcycle;
lev->l.guest_end = EDF_guest_end;
lev->l.guest_sleep = EDF_guest_sleep;
lev->l.guest_delay = EDF_guest_delay;
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
674,7 → 746,7
lev->flag[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
lev->ready = NIL;
lev->flags = flags & 0x07;
lev->U = 0;
}
/shark/trunk/kernel/modules/posix.c
20,11 → 20,11
 
/**
------------
CVS : $Id: posix.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: posix.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the scheduling module compatible with POSIX
66,6 → 66,7
 
/*+ Status used in the level +*/
#define POSIX_READY MODULE_STATUS_BASE
#define POSIX_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
72,10 → 73,8
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
int priority[MAX_PROC]; /*+ priority of each task +*/
 
IQUEUE *ready; /*+ the ready queue array +*/
QQUEUE *ready; /*+ the ready queue array +*/
 
int slice; /*+ the level's time slice +*/
 
95,10 → 94,30
 
switch (status) {
case POSIX_READY: return "POSIX_Ready";
case POSIX_DELAY: return "POSIX_Delay";
default : return "POSIX_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void POSIX_timer_delay(void *par)
{
PID p = (PID) par;
POSIX_level_des *lev;
 
lev = (POSIX_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = POSIX_READY;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int POSIX_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
122,9 → 141,9
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != POSIX_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Prio: %3d Status: %s\n",
kern_printf("Pid: %d\t Name: %20s Prio: %3ld Status: %s\n",
p,proc_table[p].name,
lev->priority[p],
proc_table[p].priority,
POSIX_status_to_a(proc_table[p].status));
 
}
145,7 → 164,7
prio = lev->maxpriority;
 
for (;;) {
p = iq_query_first(&lev->ready[prio]);
p = qq_queryfirst(&lev->ready[prio]);
if (p == NIL) {
if (prio) {
prio--;
158,8 → 177,8
if ((proc_table[p].control & CONTROL_CAP) &&
(proc_table[p].avail_time <= 0)) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready[prio]);
iq_insertlast(p,&lev->ready[prio]);
qq_extract(p,&lev->ready[prio]);
qq_insertlast(p,&lev->ready[prio]);
}
else
return p;
189,7 → 208,7
proc_table[exec_shadow].task_level == l) {
/* We inherit the scheduling properties if the scheduling level
*is* the same */
lev->priority[p] = lev->priority[exec_shadow];
proc_table[p].priority = proc_table[exec_shadow].priority;
proc_table[p].avail_time = proc_table[exec_shadow].avail_time;
proc_table[p].wcet = proc_table[exec_shadow].wcet;
200,7 → 219,7
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0;
}
else {
lev->priority[p] = nrt->weight;
proc_table[p].priority = nrt->weight;
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
235,6 → 254,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void POSIX_task_dispatch(LEVEL l, PID p, int nostop)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
242,7 → 269,18
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready[lev->priority[p]]);
qq_extract(p, &lev->ready[proc_table[p].priority]);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void POSIX_task_epilogue(LEVEL l, PID p)
251,7 → 289,7
 
if (lev->yielding) {
lev->yielding = 0;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
}
/* check if the slice is finished and insert the task in the coPOSIXect
qqueue position */
258,10 → 296,10
else if (proc_table[p].control & CONTROL_CAP &&
proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
}
else
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
qq_insertfirst(p,&lev->ready[proc_table[p].priority]);
 
proc_table[p].status = POSIX_READY;
}
282,7 → 320,7
 
/* Insert task in the correct position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
}
 
static void POSIX_task_insert(LEVEL l, PID p)
294,7 → 332,7
 
/* Insert task in the coPOSIXect position */
proc_table[p].status = POSIX_READY;
iq_insertlast(p,&lev->ready[lev->priority[p]]);
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
}
 
static void POSIX_task_extract(LEVEL l, PID p)
317,7 → 355,7
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
qq_insertfirst(p,&lev->ready[proc_table[p].priority]);
proc_table[p].status = POSIX_READY;
}
else
332,7 → 370,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_priority_insert(p,&freedesc);
q_insert(p,&freedesc);
}
 
static void POSIX_task_sleep(LEVEL l, PID p)
342,8 → 380,23
proc_table[p].status = SLEEP;
}
 
static void POSIX_task_delay(LEVEL l, PID p, TIME usdelay)
{
// POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to POSIX_task_endcycle */
proc_table[p].status = POSIX_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
POSIX_timer_delay,
(void *)p);
}
 
 
static int POSIX_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
374,7 → 427,12
static void POSIX_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
460,6 → 518,7
lev->l.task_endcycle = POSIX_task_endcycle;
lev->l.task_end = POSIX_task_end;
lev->l.task_sleep = POSIX_task_sleep;
lev->l.task_delay = POSIX_task_delay;
 
lev->l.guest_create = POSIX_guest_create;
lev->l.guest_detach = POSIX_guest_detach;
471,6 → 530,7
lev->l.guest_endcycle = POSIX_guest_endcycle;
lev->l.guest_end = POSIX_guest_end;
lev->l.guest_sleep = POSIX_guest_sleep;
lev->l.guest_delay = POSIX_guest_delay;
 
/* fill the POSIX descriptor part */
for (i = 0; i < MAX_PROC; i++)
478,10 → 538,10
 
lev->maxpriority = prioritylevels -1;
 
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels);
lev->ready = (QQUEUE *)kern_alloc(sizeof(QQUEUE) * prioritylevels);
 
for (x = 0; x < prioritylevels; x++)
iq_init(&lev->ready[x], &freedesc, 0);
qq_init(&lev->ready[x]);
 
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE;
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE;
554,7 → 614,7
else
*policy = NRT_FIFO_POLICY;
 
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p];
*priority = proc_table[p].priority;
 
return 0;
}
584,14 → 644,14
else
return EINVAL;
 
if (lev->priority[p] != priority) {
if (proc_table[p].priority != priority) {
if (proc_table[p].status == POSIX_READY) {
iq_extract(p,&lev->ready[lev->priority[p]]);
lev->priority[p] = priority;
iq_insertlast(p,&lev->ready[priority]);
qq_extract(p,&lev->ready[proc_table[p].priority]);
proc_table[p].priority = priority;
qq_insertlast(p,&lev->ready[priority]);
}
else
lev->priority[p] = priority;
proc_table[p].priority = priority;
}
 
return 0;
/shark/trunk/kernel/modules/hartport.c
20,11 → 20,11
 
/**
------------
CVS : $Id: hartport.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: hartport.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the Hartik 3.3.1 Port functions
110,8 → 110,8
struct hash_port htable[MAX_HASH_ENTRY];
struct port_ker port_des[MAX_PORT];
struct port_com port_int[MAX_PORT_INT];
int freeportdes;
int freeportint;
QUEUE freeportdes;
QUEUE freeportint;
 
static int port_installed = 0;
 
/shark/trunk/kernel/modules/cbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: cbs.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: cbs.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
76,6 → 76,7
/*+ Status used in the level +*/
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/
#define CBS_DELAY APER_STATUS_BASE+2 /*+ waiting the delay end +*/
 
/*+ task flags +*/
#define CBS_SAVE_ARRIVALS 1
187,6 → 188,7
switch (status) {
case CBS_IDLE : return "CBS_Idle";
case CBS_ZOMBIE : return "CBS_Zombie";
case CBS_DELAY : return "CBS_Delay";
default : return "CBS_Unknown";
}
}
251,6 → 253,20
 
}
 
/*+ this function is called when a task finish his delay +*/
static void CBS_timer_delay(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
CBS_activation(lev,p,&proc_table[p].timespec_priority);
 
event_need_reschedule();
}
 
 
/*+ this function is called when a killed or ended task reach the
period end +*/
static void CBS_timer_zombie(void *par)
262,7 → 278,7
 
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
 
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
435,11 → 451,29
return 0;
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void CBS_task_dispatch(LEVEL l, PID p, int nostop)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void CBS_task_epilogue(LEVEL l, PID p)
605,6 → 639,32
lev->nact[p] = 0;
}
 
static void CBS_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = CBS_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
 
/* the timespec_priority field is used to store the time at witch the delay
timer raises */
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
CBS_timer_delay,
(void *)p);
}
 
 
static int CBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
635,7 → 695,12
static void CBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
687,6 → 752,7
lev->l.task_endcycle = CBS_task_endcycle;
lev->l.task_end = CBS_task_end;
lev->l.task_sleep = CBS_task_sleep;
lev->l.task_delay = CBS_task_delay;
 
lev->l.guest_create = CBS_guest_create;
lev->l.guest_detach = CBS_guest_detach;
698,6 → 764,7
lev->l.guest_endcycle = CBS_guest_endcycle;
lev->l.guest_end = CBS_guest_end;
lev->l.guest_sleep = CBS_guest_sleep;
lev->l.guest_delay = CBS_guest_delay;
 
/* fill the CBS descriptor part */
for (i=0; i<MAX_PROC; i++) {
/shark/trunk/kernel/modules/rrsoft.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rrsoft.c,v 1.3 2002-11-11 08:32:07 pj Exp $
CVS : $Id: rrsoft.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
63,6 → 63,7
 
/*+ Status used in the level +*/
#define RRSOFT_READY MODULE_STATUS_BASE
#define RRSOFT_DELAY MODULE_STATUS_BASE+1
#define RRSOFT_IDLE MODULE_STATUS_BASE+2
 
/*+ the level redefinition for the Round Robin level +*/
71,7 → 72,7
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
QQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
99,6 → 100,7
 
switch (status) {
case RRSOFT_READY: return "RRSOFT_Ready";
case RRSOFT_DELAY: return "RRSOFT_Delay";
case RRSOFT_IDLE : return "RRSOFT_Idle";
default : return "RRSOFT_Unknown";
}
119,7 → 121,7
/* the task has finished the current activation and must be
reactivated */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
 
event_need_reschedule();
}
138,6 → 140,25
}
 
 
/*+ this function is called when a task finish his delay +*/
static void RRSOFT_timer_delay(void *par)
{
PID p = (PID) par;
RRSOFT_level_des *lev;
 
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RRSOFT_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RRSOFT_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
160,7 → 181,7
static void RRSOFT_level_status(LEVEL l)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
PID p = qq_queryfirst(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
167,7 → 188,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RRSOFT_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
p = proc_table[p].next;
}
 
for (p=0; p<MAX_PROC; p++)
190,7 → 211,7
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
p = qq_queryfirst(&lev->ready);
if (p == -1)
return p;
//{kern_printf("(s%d)",p); return p;}
198,8 → 219,8
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet);
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
//{kern_printf("(s%d)",p); return p;}
301,6 → 322,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RRSOFT_task_dispatch(LEVEL l, PID p, int nostop)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
309,7 → 338,18
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void RRSOFT_task_epilogue(LEVEL l, PID p)
320,11 → 360,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another cuRRSOFT usec */
iq_insertfirst(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RRSOFT_READY;
}
345,7 → 385,7
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
 
/* Set the reactivation timer */
if (lev->periodic[p])
368,7 → 408,7
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
 
static void RRSOFT_task_extract(LEVEL l, PID p)
392,7 → 432,7
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
// qq_insertlast(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
proc_table[p].status = RRSOFT_READY;
}
else
413,7 → 453,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
q_insert(p,&freedesc);
}
 
static void RRSOFT_task_sleep(LEVEL l, PID p)
431,6 → 471,23
proc_table[p].status = SLEEP;
}
 
static void RRSOFT_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RRSOFT_task_endcycle */
proc_table[p].status = RRSOFT_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RRSOFT_timer_delay,
(void *)p);
}
 
 
static int RRSOFT_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
461,8 → 518,12
static void RRSOFT_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
542,6 → 603,7
lev->l.task_endcycle = RRSOFT_task_endcycle;
lev->l.task_end = RRSOFT_task_end;
lev->l.task_sleep = RRSOFT_task_sleep;
lev->l.task_delay = RRSOFT_task_delay;
 
lev->l.guest_create = RRSOFT_guest_create;
lev->l.guest_detach = RRSOFT_guest_detach;
553,6 → 615,7
lev->l.guest_endcycle = RRSOFT_guest_endcycle;
lev->l.guest_end = RRSOFT_guest_end;
lev->l.guest_sleep = RRSOFT_guest_sleep;
lev->l.guest_delay = RRSOFT_guest_delay;
 
/* fill the RRSOFT descriptor part */
for (i = 0; i < MAX_PROC; i++) {
563,7 → 626,7
lev->period[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
qq_init(&lev->ready);
 
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE;
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE;
/shark/trunk/kernel/modules/ss.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ss.c,v 1.3 2002-11-11 08:32:07 pj Exp $
CVS : $Id: ss.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
155,7 → 155,7
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
IQUEUE wait; /*+ the wait queue of the SS +*/
QQUEUE wait; /*+ the wait queue of the SS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
465,8 → 465,8
}
 
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
536,7 → 536,7
void SS_level_status(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
PID p = qq_queryfirst(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
(lev->flags & SS_ENABLE_GUARANTEE_EDF ||
554,8 → 554,8
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
lev->nact[lev->activated],
SS_status_to_a(proc_table[lev->activated].status));
 
564,7 → 564,7
p,
proc_table[p].name,
SS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
p = proc_table[p].next;
}
}
 
593,7 → 593,7
if (lev->flags & SS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
return qq_queryfirst(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
696,7 → 696,7
to exe before calling task_dispatch.
We have to check lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
#ifdef DEBUG
kern_printf("extr task:%d ",p);
#endif
779,7 → 779,7
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
lev->activated = NIL;
}
793,7 → 793,7
guest_epilogue(lev->scheduling_level,p);
}
else { /* goes into wait queue */
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
833,7 → 833,7
SS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
860,7 → 860,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the SS before... */
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
 
912,11 → 912,11
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0) {
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
else {
923,7 → 923,7
proc_table[p].status = SLEEP;
}
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
962,9 → 962,9
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
1004,11 → 1004,11
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
1020,7 → 1020,41
}
}
 
static void SS_task_delay(LEVEL l, PID p, TIME usdelay)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tdelay ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
 
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
#endif
 
/* Here set replenish amount because delay may be too long and
replenish time could arrive */
SS_set_ra(l);
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
/*-------------------------------------------------------------------*/
 
/*** Guest functions ***/
1058,7 → 1092,10
static void SS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
/*-------------------------------------------------------------------*/
 
/*** Registration functions ***/
1116,6 → 1153,7
lev->l.task_endcycle = SS_task_endcycle;
lev->l.task_end = SS_task_end;
lev->l.task_sleep = SS_task_sleep;
lev->l.task_delay = SS_task_delay;
 
lev->l.guest_create = SS_guest_create;
lev->l.guest_detach = SS_guest_detach;
1127,6 → 1165,7
lev->l.guest_endcycle = SS_guest_endcycle;
lev->l.guest_end = SS_guest_end;
lev->l.guest_sleep = SS_guest_sleep;
lev->l.guest_delay = SS_guest_delay;
 
/* fill the SS descriptor part */
 
1138,7 → 1177,7
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
qq_init(&lev->wait);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/trunk/kernel/modules/tbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: tbs.c,v 1.3 2002-11-11 08:32:07 pj Exp $
CVS : $Id: tbs.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
84,7 → 84,7
struct timespec lastdline; /*+ the last deadline assigned to
a TBS task +*/
 
IQUEUE wait; /*+ the wait queue of the TBS +*/
QQUEUE wait; /*+ the wait queue of the TBS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
206,7 → 206,7
static void TBS_level_status(LEVEL l)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
PID p = qq_queryfirst(&lev->wait);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & TBS_ENABLE_WCET_CHECK));
221,8 → 221,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%9ld nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated, &lev->wait)->tv_sec,
iq_query_timespec(lev->activated, &lev->wait)->tv_nsec,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
lev->nact[lev->activated],
TBS_status_to_a(proc_table[lev->activated].status));
 
231,7 → 231,7
p,
proc_table[p].name,
TBS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
p = proc_table[p].next;
}
}
 
288,6 → 288,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void TBS_task_dispatch(LEVEL l, PID p, int nostop)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
297,6 → 305,16
 
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void TBS_task_epilogue(LEVEL l, PID p)
327,7 → 345,7
lev->lastdline.tv_sec, lev->lastdline.tv_nsec);
#endif
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
354,7 → 372,7
}
else {
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
}
}
else if (lev->flag[p] & TBS_SAVE_ARRIVALS)
399,12 → 417,12
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set
lev->nact[p]--;
proc_table[p].status = TBS_WAIT;
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
}
else
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
420,9 → 438,9
TBS_bandwidth_reclaiming(lev,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
447,12 → 465,21
 
lev->nact[p] = 0;
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
}
 
static void TBS_task_delay(LEVEL l, PID p, TIME usdelay)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int TBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
483,9 → 510,12
static void TBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
537,6 → 567,7
lev->l.task_endcycle = TBS_task_endcycle;
lev->l.task_end = TBS_task_end;
lev->l.task_sleep = TBS_task_sleep;
lev->l.task_delay = TBS_task_delay;
 
lev->l.guest_create = TBS_guest_create;
lev->l.guest_detach = TBS_guest_detach;
548,6 → 579,7
lev->l.guest_endcycle = TBS_guest_endcycle;
lev->l.guest_end = TBS_guest_end;
lev->l.guest_sleep = TBS_guest_sleep;
lev->l.guest_delay = TBS_guest_delay;
 
/* fill the TBS descriptor part */
 
558,7 → 590,7
 
NULL_TIMESPEC(&lev->lastdline);
 
iq_init(&lev->wait, &freedesc, 0);
qq_init(&lev->wait);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / den) * num;
/shark/trunk/kernel/modules/rr2.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr2.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: rr2.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
63,6 → 63,7
 
/*+ Status used in the level +*/
#define RR2_READY MODULE_STATUS_BASE
#define RR2_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
70,7 → 71,7
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
IQUEUE ready; /*+ the ready queue +*/
QQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
86,10 → 87,30
 
switch (status) {
case RR2_READY: return "RR2_Ready";
case RR2_DELAY: return "RR2_Delay";
default : return "RR2_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void RR2_timer_delay(void *par)
{
PID p = (PID) par;
RR2_level_des *lev;
 
lev = (RR2_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RR2_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RR2_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
106,7 → 127,7
static void RR2_level_status(LEVEL l)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
PID p = qq_queryfirst(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
113,7 → 134,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR2_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
p = proc_table[p].next;
}
 
for (p=0; p<MAX_PROC; p++)
136,14 → 157,14
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
p = qq_queryfirst(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
return p;
201,6 → 222,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RR2_task_dispatch(LEVEL l, PID p, int nostop)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
208,7 → 237,18
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void RR2_task_epilogue(LEVEL l, PID p)
219,11 → 259,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */
iq_insertfirst(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR2_READY;
}
244,7 → 284,7
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
 
static void RR2_task_insert(LEVEL l, PID p)
256,7 → 296,7
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
 
static void RR2_task_extract(LEVEL l, PID p)
279,7 → 319,7
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
iq_insertfirst(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
proc_table[p].status = RR2_READY;
}
else
294,7 → 334,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
q_insert(p,&freedesc);
}
 
static void RR2_task_sleep(LEVEL l, PID p)
304,6 → 344,23
proc_table[p].status = SLEEP;
}
 
static void RR2_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RR2_task_endcycle */
proc_table[p].status = RR2_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RR2_timer_delay,
(void *)p);
}
 
 
static int RR2_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
334,9 → 391,12
static void RR2_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
415,6 → 475,7
lev->l.task_endcycle = RR2_task_endcycle;
lev->l.task_end = RR2_task_end;
lev->l.task_sleep = RR2_task_sleep;
lev->l.task_delay = RR2_task_delay;
 
lev->l.guest_create = RR2_guest_create;
lev->l.guest_detach = RR2_guest_detach;
426,12 → 487,13
lev->l.guest_endcycle = RR2_guest_endcycle;
lev->l.guest_end = RR2_guest_end;
lev->l.guest_sleep = RR2_guest_sleep;
lev->l.guest_delay = RR2_guest_delay;
 
/* fill the RR2 descriptor part */
for (i = 0; i < MAX_PROC; i++)
lev->nact[i] = -1;
 
iq_init(&lev->ready, &freedesc, 0);
qq_init(&lev->ready);
 
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE;
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE;
/shark/trunk/kernel/modules/ds.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ds.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: ds.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
83,7 → 83,7
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the DS +*/
QQUEUE wait; /*+ the wait queue of the DS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
128,8 → 128,8
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
DS_activation(lev);
event_need_reschedule();
}
178,7 → 178,7
static void DS_level_status(LEVEL l)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
PID p = qq_queryfirst(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & DS_ENABLE_GUARANTEE_EDF ||
190,8 → 190,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
lev->nact[lev->activated],
DS_status_to_a(proc_table[lev->activated].status));
 
200,7 → 200,7
p,
proc_table[p].name,
DS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
p = proc_table[p].next;
}
}
 
221,7 → 221,7
if (lev->flags & DS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
return qq_queryfirst(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
275,6 → 275,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void DS_task_dispatch(LEVEL l, PID p, int nostop)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
286,7 → 294,7
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
303,6 → 311,16
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void DS_task_epilogue(LEVEL l, PID p)
338,7 → 356,7
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
lev->activated = NIL;
}
349,7 → 367,7
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
370,7 → 388,7
DS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
390,7 → 408,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the DS before... */
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
 
427,18 → 445,18
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
463,9 → 481,9
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
491,16 → 509,36
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
static void DS_task_delay(LEVEL l, PID p, TIME usdelay)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int DS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
531,9 → 569,12
static void DS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
606,6 → 647,7
lev->l.task_endcycle = DS_task_endcycle;
lev->l.task_end = DS_task_end;
lev->l.task_sleep = DS_task_sleep;
lev->l.task_delay = DS_task_delay;
 
lev->l.guest_create = DS_guest_create;
lev->l.guest_detach = DS_guest_detach;
617,6 → 659,7
lev->l.guest_endcycle = DS_guest_endcycle;
lev->l.guest_end = DS_guest_end;
lev->l.guest_sleep = DS_guest_sleep;
lev->l.guest_delay = DS_guest_delay;
 
/* fill the DS descriptor part */
 
628,7 → 671,7
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
qq_init(&lev->wait);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/trunk/kernel/modules/rm.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rm.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: rm.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
71,6 → 71,7
 
/*+ Status used in the level +*/
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define RM_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
93,7 → 94,7
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
QUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
109,6 → 110,7
 
switch (status) {
case RM_READY : return "RM_Ready";
case RM_DELAY : return "RM_Delay";
case RM_WCET_VIOLATED: return "RM_Wcet_Violated";
case RM_WAIT : return "RM_Sporadic_Wait";
case RM_IDLE : return "RM_Idle";
121,8 → 123,8
{
PID p = (PID) par;
RM_level_des *lev;
struct timespec *temp;
 
 
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
129,7 → 131,7
case RM_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
138,12 → 140,12
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to RM_task_activate */
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time, temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
TIMESPEC_ASSIGN(&proc_table[p].request_time,
&proc_table[p].timespec_priority);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
q_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
RM_timer_deadline,
(void *)p);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
171,6 → 173,23
kern_raise(XDEADLINE_MISS,p);
}
 
/*+ this function is called when a task finish his delay +*/
static void RM_timer_delay(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
 
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RM_READY;
q_insert(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
event_need_reschedule();
}
 
 
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
203,7 → 222,7
static void RM_level_status(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
PID p = lev->ready;
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & RM_ENABLE_WCET_CHECK));
221,10 → 240,10
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->ready);
p = proc_table[p].next;
}
 
for (p=0; p<MAX_PROC; p++)
235,8 → 254,8
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
}
 
255,7 → 274,7
kern_printf(") ");
}
*/
return iq_query_first(&lev->ready);
return (PID)lev->ready;
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
286,7 → 305,7
 
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
proc_table[p].priority = lev->period[p] = h->mit;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = RM_FLAG_SPORADIC;
347,6 → 366,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RM_task_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
356,7 → 383,17
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void RM_task_epilogue(LEVEL l, PID p)
373,7 → 410,7
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
}
381,7 → 418,6
static void RM_task_activate(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == RM_WAIT) {
kern_raise(XACTIVATION,p);
398,16 → 434,16
/* see also RM_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority,
&proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
RM_timer_deadline,
(void *)p);
}
421,7 → 457,7
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
}
 
static void RM_task_extract(LEVEL l, PID p)
480,7 → 516,22
correctly the task state to sleep... */
}
 
static void RM_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* equal to RM_task_endcycle */
proc_table[p].status = RM_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RM_timer_delay,
(void *)p);
}
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the RM ready queue. */
493,9 → 544,9
/* if the RM_guest_create is called, then the pclass must be a
valid pclass. */
 
 
*iq_query_timespec(p,&lev->ready) = job->deadline;
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline);
lev->deadline_timer[p] = -1;
 
if (job->noraiseexc)
503,7 → 554,7
else
lev->flag[p] = 0;
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
proc_table[p].priority = lev->period[p] = job->period;
 
/* there is no bandwidth guarantee at this level, it is performed
by the level that inserts guest tasks... */
525,7 → 576,7
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
}
 
static void RM_guest_epilogue(LEVEL l, PID p)
533,7 → 584,7
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
542,14 → 593,15
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
RM_timer_guest_deadline,
(void *)p);
 
}
 
static void RM_guest_insert(LEVEL l, PID p)
557,7 → 609,7
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
q_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
582,9 → 634,13
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == RM_READY)
{
iq_extract(p, &lev->ready);
q_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
else if (proc_table[p].status == RM_DELAY) {
event_delete(proc_table[p].delay_timer);
proc_table[p].delay_timer = NIL; /* paranoia */
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
598,9 → 654,25
static void RM_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RM_guest_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* equal to RM_task_endcycle */
proc_table[p].status = RM_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RM_timer_delay,
(void *)p);
}
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
650,6 → 722,7
lev->l.task_endcycle = RM_task_endcycle;
lev->l.task_end = RM_task_end;
lev->l.task_sleep = RM_task_sleep;
lev->l.task_delay = RM_task_delay;
 
lev->l.guest_create = RM_guest_create;
lev->l.guest_detach = RM_guest_detach;
661,6 → 734,7
lev->l.guest_endcycle = RM_guest_endcycle;
lev->l.guest_end = RM_guest_end;
lev->l.guest_sleep = RM_guest_sleep;
lev->l.guest_delay = RM_guest_delay;
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
669,7 → 743,7
lev->flag[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
lev->ready = NIL;
lev->flags = flags & 0x07;
lev->U = 0;
}
/shark/trunk/kernel/modules/ps.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ps.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: ps.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the aperiodic server PS (Polling Server)
122,7 → 122,7
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
IQUEUE wait; /*+ the wait queue of the PS +*/
QQUEUE wait; /*+ the wait queue of the PS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
167,8 → 167,8
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
PS_activation(lev);
event_need_reschedule();
}
219,7 → 219,7
static void PS_level_status(LEVEL l)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->wait);
PID p = qq_queryfirst(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & PS_ENABLE_GUARANTEE_EDF ||
231,8 → 231,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
lev->nact[lev->activated],
PS_status_to_a(proc_table[lev->activated].status));
 
241,7 → 241,7
p,
proc_table[p].name,
PS_status_to_a(proc_table[p].status));
p = iq_query_next(p, &lev->wait);
p = proc_table[p].next;
}
}
 
262,7 → 262,7
if (lev->flags & PS_BACKGROUND_BLOCK)
return NIL;
else
return iq_query_first(&lev->wait);
return qq_queryfirst(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
316,6 → 316,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void PS_task_dispatch(LEVEL l, PID p, int nostop)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
327,7 → 335,7
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
344,6 → 352,16
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void PS_task_epilogue(LEVEL l, PID p)
379,7 → 397,7
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
lev->activated = NIL;
}
390,7 → 408,7
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
411,7 → 429,7
PS_activation(lev);
}
else {
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
431,7 → 449,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the PS before... */
iq_insertfirst(p, &lev->wait);
qq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
 
468,18 → 486,18
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
iq_insertlast(p, &lev->wait);
qq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
506,9 → 524,9
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
q_insertfirst(p,&freedesc);
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
536,18 → 554,38
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
iq_extract(p, &lev->wait);
qq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = iq_getfirst(&lev->wait);
lev->activated = qq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
static void PS_task_delay(LEVEL l, PID p, TIME usdelay)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int PS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
578,9 → 616,12
static void PS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
653,6 → 694,7
lev->l.task_endcycle = PS_task_endcycle;
lev->l.task_end = PS_task_end;
lev->l.task_sleep = PS_task_sleep;
lev->l.task_delay = PS_task_delay;
 
lev->l.guest_create = PS_guest_create;
lev->l.guest_detach = PS_guest_detach;
664,6 → 706,7
lev->l.guest_endcycle = PS_guest_endcycle;
lev->l.guest_end = PS_guest_end;
lev->l.guest_sleep = PS_guest_sleep;
lev->l.guest_delay = PS_guest_delay;
 
/* fill the PS descriptor part */
 
675,7 → 718,7
 
lev->period = per;
 
iq_init(&lev->wait, &freedesc, 0);
qq_init(&lev->wait);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/trunk/kernel/modules/rr.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: rr.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
This file contains the scheduling module RR (Round Robin)
63,12 → 63,13
 
/*+ Status used in the level +*/
#define RR_READY MODULE_STATUS_BASE
#define RR_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
IQUEUE ready; /*+ the ready queue +*/
QQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
84,10 → 85,30
 
switch (status) {
case RR_READY: return "RR_Ready";
case RR_DELAY: return "RR_Delay";
default : return "RR_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void RR_timer_delay(void *par)
{
PID p = (PID) par;
RR_level_des *lev;
 
lev = (RR_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RR_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
104,7 → 125,7
static void RR_level_status(LEVEL l)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
PID p = iq_query_first(&lev->ready);
PID p = qq_queryfirst(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
111,7 → 132,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR_status_to_a(proc_table[p].status));
p = iq_query_next(p,&lev->ready);
p = proc_table[p].next;
}
 
for (p=0; p<MAX_PROC; p++)
134,14 → 155,14
PID p;
 
for (;;) {
p = iq_query_first(&lev->ready);
p = qq_queryfirst(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
return p;
194,6 → 215,14
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RR_task_dispatch(LEVEL l, PID p, int nostop)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
201,7 → 230,20
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
// if (nostop) kern_printf("Û");
// kern_printf("(RR d %d)",nostop);
}
 
static void RR_task_epilogue(LEVEL l, PID p)
212,11 → 254,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another curr usec */
iq_insertfirst(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR_READY;
}
234,7 → 276,7
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
 
static void RR_task_insert(LEVEL l, PID p)
246,7 → 288,7
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
iq_insertlast(p,&lev->ready);
qq_insertlast(p,&lev->ready);
}
 
static void RR_task_extract(LEVEL l, PID p)
276,7 → 318,7
 
/* we insert the task in the free queue */
proc_table[p].status = FREE;
iq_insertlast(p,&freedesc);
q_insert(p,&freedesc);
}
 
static void RR_task_sleep(LEVEL l, PID p)
284,6 → 326,23
proc_table[p].status = SLEEP;
}
 
static void RR_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RR_level_des *lev = (RR_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RR_task_endcycle */
proc_table[p].status = RR_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RR_timer_delay,
(void *)p);
}
 
 
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
314,9 → 373,12
static void RR_guest_sleep(LEVEL l, PID p)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
394,6 → 456,7
lev->l.task_endcycle = RR_task_endcycle;
lev->l.task_end = RR_task_end;
lev->l.task_sleep = RR_task_sleep;
lev->l.task_delay = RR_task_delay;
 
lev->l.guest_create = RR_guest_create;
lev->l.guest_detach = RR_guest_detach;
405,9 → 468,10
lev->l.guest_endcycle = RR_guest_endcycle;
lev->l.guest_end = RR_guest_end;
lev->l.guest_sleep = RR_guest_sleep;
lev->l.guest_delay = RR_guest_delay;
 
/* fill the RR descriptor part */
iq_init(&lev->ready, &freedesc, 0);
qq_init(&lev->ready);
 
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
/shark/trunk/kernel/modules/dummy.c
20,11 → 20,11
 
/**
------------
CVS : $Id: dummy.c,v 1.3 2002-11-11 08:32:06 pj Exp $
CVS : $Id: dummy.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
This file contains the Dummy scheduling module
171,6 → 171,9
static void dummy_task_sleep(LEVEL l, PID p)
{ kern_printf("Dummy6"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_delay(LEVEL l, PID p, TIME tickdelay)
{ kern_printf("Dummy7"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static int dummy_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_printf("Dummy8"); kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
201,7 → 204,12
static void dummy_guest_sleep(LEVEL l, PID p)
{ kern_printf("Dummyg"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_printf("Dummyh"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
 
 
 
/*+ Dummy task must be present & cannot be killed; +*/
static TASK dummy()
{
290,6 → 298,7
lev->l.task_endcycle = dummy_task_endcycle;
lev->l.task_end = dummy_task_end;
lev->l.task_sleep = dummy_task_sleep;
lev->l.task_delay = dummy_task_delay;
 
lev->l.guest_create = dummy_guest_create;
lev->l.guest_detach = dummy_guest_detach;
301,6 → 310,7
lev->l.guest_endcycle = dummy_guest_endcycle;
lev->l.guest_end = dummy_guest_end;
lev->l.guest_sleep = dummy_guest_sleep;
lev->l.guest_delay = dummy_guest_delay;
 
/* the dummy process will be created at init_time.
see also dummy_level_accept_model,dummy_create */
/shark/trunk/kernel/modules/nopm.c
20,11 → 20,11
 
/**
------------
CVS : $Id: nopm.c,v 1.2 2002-11-11 08:32:06 pj Exp $
CVS : $Id: nopm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
See modules/nopm.h.
73,7 → 73,7
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
QQUEUE blocked;
int counter;
} NOPM_mutex_t;
 
108,12 → 108,12
kern_printf("----------------------\n");
for(i=0;i<index;i++) {
ptr=table[i]->opt;
if (!iq_isempty(&ptr->blocked)) {
if (ptr->blocked.first!=NIL) {
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]);
j=iq_query_first(&ptr->blocked);
j=ptr->blocked.first;
while (j!=NIL) {
kern_printf("%i ",(int)j);
j=iq_query_next(j, &ptr->blocked);
j=proc_table[j].next;
}
kern_printf("\n");
} else {
181,7 → 181,7
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
qq_init(&p->blocked);
p->counter=0;
m->mutexlevel = l;
254,7 → 254,7
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOPM_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
qq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
327,7 → 327,7
 
/* the mutex is mine, pop the firsttask to extract */
for (;;) {
e = iq_getfirst(&p->blocked);
e = qq_getfirst(&p->blocked);
if (e == NIL) {
p->owner = NIL;
break;
/shark/trunk/kernel/modules/sem.c
20,11 → 20,11
 
/**
------------
CVS : $Id: sem.c,v 1.2 2002-11-11 08:32:07 pj Exp $
CVS : $Id: sem.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:07 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
This file contains the Hartik 3.3.1 Semaphore functions
79,7 → 79,7
char *name; /* a name, for named semaphores */
int index; /* an index for sem_open, containing the sem number */
int count; /* the semaphore counter */
IQUEUE blocked; /* the blocked processes queue */
QQUEUE blocked; /* the blocked processes queue */
int next; /* the semaphore queue */
BYTE used; /* 1 if the semaphore is used */
} sem_table[SEM_NSEMS_MAX];
91,7 → 91,7
int sem; /* the semaphore on whitch the process is blocked */
} sp_table[MAX_PROC];
 
static int free_sem; /* Queue of free sem */
static QUEUE free_sem; /* Queue of free sem */
 
 
 
112,7 → 112,7
task_testcancel */
 
/* extract the process from the semaphore queue... */
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
qq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
134,7 → 134,7
sem_table[i].name = NULL;
sem_table[i].index = i;
sem_table[i].count = 0;
iq_init(&sem_table[i].blocked, &freedesc, 0);
qq_init(&sem_table[i].blocked);
sem_table[i].next = i+1;
sem_table[i].used = 0;
}
160,7 → 160,7
free_sem = sem_table[*sem].next;
sem_table[*sem].name = NULL;
sem_table[*sem].count = value;
iq_init(&sem_table[*sem].blocked, &freedesc, 0);
qq_init(&sem_table[*sem].blocked);
sem_table[*sem].used = 1;
}
else {
254,7 → 254,7
sem_table[sem].name = kern_alloc(strlen((char *)name)+1);
strcpy(sem_table[sem].name, (char *)name);
sem_table[sem].count = j;
iq_init(&sem_table[sem].blocked, &freedesc, 0);
qq_init(&sem_table[sem].blocked);
sem_table[sem].used = 1;
kern_sti();
return &sem_table[sem].index;
378,7 → 378,7
sp_table[exec_shadow].sem = *s;
 
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
qq_insertlast(exec_shadow,&s1->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
504,7 → 504,7
sp_table[exec_shadow].sem = *s;
/* ...and put it in sem queue */
iq_insertlast(exec_shadow,&s1->blocked);
qq_insertlast(exec_shadow,&s1->blocked);
/* and finally we reschedule */
exec = exec_shadow = -1;
554,7 → 554,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
qq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
579,7 → 579,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
qq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
627,7 → 627,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
qq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
657,7 → 657,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
iq_extract(p,&s1->blocked);
qq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
695,16 → 695,16
 
kern_cli();
 
if (iq_isempty(&sem_table[*sem].blocked))
if (sem_table[*sem].blocked.first == NIL)
/* the sem is free */
*sval = sem_table[*sem].count;
else {
/* the sem is busy */
*sval = 0;
p = iq_query_first(&sem_table[*sem].blocked);
p = sem_table[*sem].blocked.first;
do {
(*sval)--;
p = iq_query_next(p, &sem_table[*sem].blocked);
p = proc_table[p].next;
} while (p != NIL);
}
 
/shark/trunk/kernel/modules/nop.c
20,11 → 20,11
 
/**
------------
CVS : $Id: nop.c,v 1.2 2002-11-11 08:32:06 pj Exp $
CVS : $Id: nop.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
Binary Semaphores. see nop.h for more details...
73,7 → 73,7
mutex_t structure */
typedef struct {
PID owner;
IQUEUE blocked;
QQUEUE blocked;
} NOP_mutex_t;
 
 
124,7 → 124,7
return (ENOMEM);
 
p->owner = NIL;
iq_init(&p->blocked, &freedesc, 0);
qq_init(&p->blocked);
 
m->mutexlevel = l;
m->opt = (void *)p;
192,7 → 192,7
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOP_WAIT;
iq_insertlast(exec_shadow,&p->blocked);
qq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
253,7 → 253,7
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
p->owner = iq_getfirst(&p->blocked);
p->owner = qq_getfirst(&p->blocked);
if (p->owner != NIL) {
l = proc_table[p->owner].task_level;
level_table[l]->task_insert(l,p->owner);
/shark/trunk/kernel/int_sem.c
18,11 → 18,11
 
/**
------------
CVS : $Id: int_sem.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: int_sem.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Internal semaphores.
68,7 → 68,7
void internal_sem_init(internal_sem_t *s, int value)
{
s->count = value;
iq_init(&s->blocked,&freedesc,0);
qq_init(&s->blocked);
}
 
void internal_sem_wait(internal_sem_t *s)
106,7 → 106,7
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = INTERNAL_SEM_WAIT;
iq_insertlast(exec_shadow,&s->blocked);
qq_insertlast(exec_shadow,&s->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
148,7 → 148,7
register PID p;
register LEVEL l;
 
p = iq_getfirst(&s->blocked);
p = qq_getfirst(&s->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
 
/shark/trunk/kernel/mqueue.c
18,11 → 18,11
 
/**
------------
CVS : $Id: mqueue.c,v 1.2 2002-11-11 08:34:09 pj Exp $
CVS : $Id: mqueue.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:09 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
POSIX message queues
90,8 → 90,8
correct bit is set */
 
/* the blocked processes queues */
IQUEUE blocked_send;
IQUEUE blocked_rcv;
QQUEUE blocked_send;
QQUEUE blocked_rcv;
 
int next; /* the mq queue */
} mq_table[MQ_OPEN_MAX];
105,7 → 105,7
if the task is not blocked...) */
} mqproc_table[MAX_PROC];
 
static int free_mq; /* Queue of free sem */
static QUEUE free_mq; /* Queue of free sem */
 
mqd_t mq_open(const char *name, int oflag, ...)
{
168,8 → 168,8
mq_table[mq].maxmsg = MQ_DEFAULT_MAXMSG;
mq_table[mq].msgsize = MQ_DEFAULT_MSGSIZE;
}
iq_init(&mq_table[mq].blocked_send, &freedesc, 0);
iq_init(&mq_table[mq].blocked_rcv, &freedesc, 0);
qq_init(&mq_table[mq].blocked_send);
qq_init(&mq_table[mq].blocked_rcv);
 
mq_table[mq].count = 0;
mq_table[mq].start = -1;
320,7 → 320,7
/* the task that have to be killed is waiting on a mq_send */
 
/* we have to extract the task from the blocked queue... */
iq_extract(i,&mq_table[mqproc_table[i].mqdes].blocked_send);
qq_extract(i,&mq_table[mqproc_table[i].mqdes].blocked_send);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
334,7 → 334,7
/* the task that have to be killed is waiting on a mq_send */
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
357,7 → 357,7
mqproc_table[exec_shadow].intsig = 1;
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_send);
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_send);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
373,7 → 373,7
mqproc_table[exec_shadow].intsig = 1;
 
/* we have to extract the task from the blocked queue... */
iq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
qq_extract(i, &mq_table[mqproc_table[i].mqdes].blocked_rcv);
 
/* and the task have to be reinserted into the ready queues, so it
will fall into task_testcancel */
463,7 → 463,7
 
/* we insert the task in the message queue */
proc_table[exec_shadow].status = WAIT_MQSEND;
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_send);
qq_insert(exec_shadow,&mq_table[mqdes].blocked_send);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
504,7 → 504,7
/* the mq was empty */
PID p;
 
p = iq_getfirst(&mq_table[mqdes].blocked_rcv);
p = qq_getfirst(&mq_table[mqdes].blocked_rcv);
 
if ( p != NIL) {
/* The first blocked task has to be woken up */
629,7 → 629,7
 
/* we insert the task into the message queue */
proc_table[exec_shadow].status = WAIT_MQRECEIVE;
iq_priority_insert(exec_shadow,&mq_table[mqdes].blocked_rcv);
qq_insert(exec_shadow,&mq_table[mqdes].blocked_rcv);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
671,7 → 671,7
returnvalue = mq_table[mqdes].mq_info[ msg ].msglen;
 
/* if the mq was full, there may be a task into blocked-send queue */
p = iq_getfirst(&mq_table[mqdes].blocked_send);
p = qq_getfirst(&mq_table[mqdes].blocked_send);
 
if ( p != NIL) {
/* The first blocked task on send has to be woken up */
/shark/trunk/kernel/qqueue.c
0,0 → 1,252
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: qqueue.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
This file contains the system queues function management
The functions use the general process descriptor table entries,
particularly the next & prev to mantain the queues order;
the insertion is based on the field priority, inserted in generic process
descriptor to simplify the implementation of many schedulers (like EDF,
RM, DM, etc.)
 
There are 2 queue types:
QUEUE -> a simple queue with only head.
QQUEUE -> a queue that manage the head and the tail of the queue
 
... and 2 queue function sets, beginning with q_ if related to the type
QUEUE, qq_ if related to the other type.
 
The queue insertion is made by the following functions:
q_insert, qq_insert -> insertion based on the priority field.
q_timespec_insert, qq_timespec_insert -> same as above but use
the timespec_priority field
q_insertfirst, qq_insertfirst -> insert in the first position of the queue
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/var.h>
 
/*
*
* QQUEUE Functions
*
*/
 
/*+ This function initialize a QQUEUE +*/
void qq_init(QQUEUE *que)
{
que->first = NIL;
que->last = NIL;
}
 
 
/*+
This function insert the task with PID p in the queue que.
The insertion is made respecting the priority field.
(the first item in the queue have the less priority)
+*/
void qq_insert(PID i, QQUEUE *que)
{
DWORD prio;
PID p,q;
 
p = NIL;
q = que->first;
prio = proc_table[i].priority;
 
while ((q != NIL) && (prio >= proc_table[q].priority)) {
p = q;
q = proc_table[q].next;
}
 
if (p != NIL)
proc_table[p].next = i;
else
que->first = i;
 
if (q != NIL)
proc_table[q].prev = i;
else
que->last = i;
 
proc_table[i].next = q;
proc_table[i].prev = p;
}
 
/*+
This function insert the task with PID p in the queue que.
The insertion is made respecting the timespec priority field.
(the first item in the queue have the less priority)
+*/
void qq_timespec_insert(PID i, QQUEUE *que)
{
struct timespec prio;
PID p,q;
 
p = NIL;
q = que->first;
TIMESPEC_ASSIGN(&prio, &proc_table[i].timespec_priority);
 
 
while ((q != NIL) &&
!TIMESPEC_A_LT_B(&prio, &proc_table[q].timespec_priority)) {
p = q;
q = proc_table[q].next;
}
 
if (p != NIL)
proc_table[p].next = i;
else
que->first = i;
 
if (q != NIL)
proc_table[q].prev = i;
else
que->last = i;
 
proc_table[i].next = q;
proc_table[i].prev = p;
}
 
/*+
This function extract the task i from the queue que.
It not check if the task i is REALLY IN the queue que...
so make attention!!!
+*/
void qq_extract(PID i, QQUEUE *que)
{
PID p,q;
 
p = proc_table[i].prev;
q = proc_table[i].next;
 
if (p != NIL)
proc_table[p].next = proc_table[i].next;
else
que->first = q;
 
if (q != NIL)
proc_table[q].prev = proc_table[i].prev;
else
que->last = p;
 
}
 
/*+
This function returns the first task in the queue que, or NIL if the
queue is empty... (it extracts the task from the queue...)
+*/
PID qq_getfirst(QQUEUE *q)
{
PID p = q->first;
 
if (p == NIL) return(NIL);
q->first = proc_table[q->first].next;
 
if (q->first != NIL)
proc_table[q->first].prev = NIL;
else
q->last = NIL;
 
return(p);
}
 
/*+
This function insert the task with PID p in the first position
of the queue que redardless of the priority field
+*/
 
void qq_insertfirst(PID p, QQUEUE *que)
{
if (que->first != NIL) {
proc_table[p].next = que->first;
proc_table[que->first].prev = p;
}
else {
que->last = p;
proc_table[p].next = NIL;
}
proc_table[p].prev = NIL;
que->first = p;
}
 
/*+
This function insert the task with PID p in the last position
of the queue que redardless of the priority field
+*/
 
void qq_insertlast(PID p, QQUEUE *que)
{
if (que->last != NIL) {
proc_table[p].prev = que->last;
proc_table[que->last].next = p;
}
else {
que->first = p;
proc_table[p].prev = NIL;
}
proc_table[p].next = NIL;
que->last = p;
}
 
/*+
This function returns the first task in the queue que, without removing it
+*/
PID qq_queryfirst(QQUEUE *q)
{
return q->first;
}
 
/*+
This function returns the last task in the queue que, without removing it
+*/
PID qq_querylast(QQUEUE *q)
{
return q->last;
}
 
/shark/trunk/kernel/conditio.c
20,11 → 20,11
 
/**
------------
CVS : $Id: conditio.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: conditio.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
This file contains the condition variables handling functions.
59,7 → 59,6
#include <kernel/var.h>
#include <kernel/func.h>
#include <errno.h>
#include <kernel/iqueue.h>
 
/*---------------------------------------------------------------------*/
/* Condition variables */
77,8 → 76,8
/* if the task is waiting on a condition variable, we have to extract it
from the waiters queue, then set the KILL_REQUEST flag, and reinsert
the task into the ready queue so it can reaquire the mutex and die */
iq_extract(i,&proc_table[i].cond_waiting->waiters);
if (iq_isempty(&proc_table[i].cond_waiting->waiters))
q_extract(i,&proc_table[i].cond_waiting->waiters);
if (proc_table[i].cond_waiting->waiters == NIL)
proc_table[i].cond_waiting->used_for_waiting = NULL;
proc_table[i].cond_waiting = NULL;
 
103,8 → 102,7
register_cancellation_point(condition_cancellation_point, NULL);
}
 
iq_init (&cond->waiters, &freedesc, 0);
 
cond->waiters = NIL;
cond->used_for_waiting = NULL;
 
return 0;
112,7 → 110,7
 
int cond_destroy(cond_t *cond)
{
if (!iq_isempty(&cond->waiters))
if (cond->waiters != NIL)
return (EBUSY);
 
return 0;
125,8 → 123,8
 
proc_table[exec_shadow].context = kern_context_save();
 
if (!iq_isempty(&cond->waiters)) {
p = iq_getfirst(&cond->waiters);
if (cond->waiters != NIL) {
p = q_getfirst(&cond->waiters);
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
145,13 → 143,13
 
proc_table[exec_shadow].context = kern_context_save();
 
if (!iq_isempty(&cond->waiters)) {
if (cond->waiters != NIL) {
do {
p = iq_getfirst(&cond->waiters);
p = q_getfirst(&cond->waiters);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
} while(!iq_isempty(&cond->waiters));
} while(cond->waiters != NIL);
 
scheduler();
}
216,7 → 214,7
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
iq_priority_insert(exec_shadow,&cond->waiters);
q_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
244,7 → 242,7
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
if (cond->waiters == NIL) cond->used_for_waiting = NULL;
}
task_preempt();
 
270,8 → 268,8
PID p = (PID)arg;
LEVEL l;
 
iq_extract(p,&proc_table[p].cond_waiting->waiters);
if (iq_isempty(&proc_table[p].cond_waiting->waiters))
q_extract(p,&proc_table[p].cond_waiting->waiters);
if (proc_table[p].cond_waiting->waiters == NIL)
proc_table[p].cond_waiting->used_for_waiting = NULL;
proc_table[p].cond_waiting = NULL;
 
342,7 → 340,7
 
/* we insert the task in the condition queue */
proc_table[exec_shadow].status = WAIT_COND;
iq_priority_insert(exec_shadow,&cond->waiters);
q_insert(exec_shadow,&cond->waiters);
 
/* then, we set into the processor descriptor the condition on that
the task is blocked... (if the task is killed while it is waiting
381,7 → 379,7
if (proc_table[exec_shadow].cond_waiting != NULL) {
proc_table[exec_shadow].cond_waiting = NULL;
 
if (iq_isempty(&cond->waiters)) cond->used_for_waiting = NULL;
if (cond->waiters == NIL) cond->used_for_waiting = NULL;
}
else
/* cond_waiting == NULL if the task is killed or the timer has fired */
/shark/trunk/kernel/exchtxt.c
18,11 → 18,11
 
/**
------------
CVS : $Id: exchtxt.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: exchtxt.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
**/
 
75,7 → 75,7
kern_cli();
ll_gettime(TIME_EXACT, &t),
kern_printf("\nHartik Exception raised!!!"
"\nTime (s:ns) :%ld:%ld"
"\nTime (s:ns) :%d:%d"
"\nException number:%d"
"\nPID :%d\n",
t.tv_sec, t.tv_nsec, info->si_value.sival_int,
/shark/trunk/kernel/join.c
18,11 → 18,11
 
/**
------------
CVS : $Id: join.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: join.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
task join and related primitives
188,7 → 188,7
queue */
proc_table[p].control &= ~WAIT_FOR_JOIN;
if (proc_table[p].control & DESCRIPTOR_DISCARDED)
iq_insertfirst(p, &freedesc);
q_insertfirst(p, &freedesc);
 
if (value)
*value = proc_table[p].return_value;
/shark/trunk/kernel/kern.c
18,11 → 18,11
 
/**
------------
CVS : $Id: kern.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: kern.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
This file contains:
96,7 → 96,7
PID exec; /*+ Task advised by the scheduler +*/
PID exec_shadow; /*+ Currently executing task +*/
 
IQUEUE freedesc; /*+ Free descriptor handled as a queue +*/
QUEUE freedesc; /*+ Free descriptor handled as a queue +*/
 
DWORD sys_tick; /*+ System tick (in usec) +*/
struct timespec schedule_time;
405,8 → 405,8
proc_table[i].shadow = i;
proc_table[i].cleanup_stack= NULL;
proc_table[i].errnumber = 0;
//proc_table[i].priority = 0;
//NULL_TIMESPEC(&proc_table[i].timespec_priority);
proc_table[i].priority = 0;
NULL_TIMESPEC(&proc_table[i].timespec_priority);
proc_table[i].delay_timer = -1;
proc_table[i].wcet = -1;
 
424,17 → 424,12
for (j=0; j<PTHREAD_KEYS_MAX; j++)
proc_table[i].keys[j] = NULL;
}
for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1;
proc_table[MAX_PROC-1].next = NIL;
for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1;
proc_table[0].prev = NIL;
freedesc = 0;
 
/* set up the free descriptor queue */
// for (i = 0; i < MAX_PROC-1; i++) proc_table[i].next = i+1;
// proc_table[MAX_PROC-1].next = NIL;
// for (i = MAX_PROC-1; i > 0; i--) proc_table[i].prev = i-1;
// proc_table[0].prev = NIL;
// freedesc = 0;
iq_init(&freedesc, NULL, 0);
for (i = 0; i < MAX_PROC; i++)
iq_insertlast(i,&freedesc);
 
/* Set up the varius stuff */
global_errnumber = 0;
task_counter = 0;
/shark/trunk/kernel/grpcreat.c
18,11 → 18,11
 
/**
------------
CVS : $Id: grpcreat.c,v 1.2 2002-11-11 08:34:08 pj Exp $
CVS : $Id: grpcreat.c,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:08 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
This file contains:
116,7 → 116,7
 
/* Get a free descriptor */
for (;;) {
i = iq_getfirst(&freedesc);
i = q_getfirst(&freedesc);
 
/* If no one is available abort the system */
if (i == NIL) {
145,7 → 145,7
proc_table[i].sigpending = 0; /* No pending signal for new tasks*/
proc_table[i].shadow = i;
proc_table[i].cleanup_stack = NULL;
// proc_table[i].next = proc_table[i].prev = NIL;
proc_table[i].next = proc_table[i].prev = NIL;
proc_table[i].errnumber = 0; /* meaningless value */
 
/* Fill jet info */
182,7 → 182,7
if (l == sched_levels) {
/* no level can accept the task_model, exit!!! */
proc_table[i].status = FREE;
iq_insertfirst(i,&freedesc);
q_insertfirst(i,&freedesc);
errno = ENO_AVAIL_SCHEDLEVEL;
return -1;
}
194,7 → 194,7
if (level_table[l]->task_create(l,i,m) < 0) {
/* an error occurred in the task_create */
proc_table[i].status = FREE;
iq_insertfirst(i,&freedesc);
q_insertfirst(i,&freedesc);
errno = ETASK_CREATE;
return -1;
}
344,7 → 344,7
proc_table[i].status = FREE;
iq_insertfirst(i,&freedesc);
q_insertfirst(i,&freedesc);
}
 
 
/shark/trunk/kernel/signal.c
18,11 → 18,11
 
/**
------------
CVS : $Id: signal.c,v 1.2 2002-11-11 08:34:09 pj Exp $
CVS : $Id: signal.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:34:09 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
This file contains:
108,7 → 108,7
* A queue of all threads waiting in sigwait.
* It is not static because it is used into the task_kill...ð
*/
static IQUEUE sigwaiters;
static QUEUE sigwaiters;
 
 
/*+ An array of queues of pending signals posted with sigqueue(). +*/
331,7 → 331,7
LEVEL l;
 
/* Reactivate the task... */
iq_extract(p, &sigwaiters);
q_extract(p, &sigwaiters);
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
469,9 → 469,9
* in sigwait will have blocked the signals being waited for).
*/
 
for (task = iq_query_first(&sigwaiters);
for (task = sigwaiters;
task != NIL;
task = iq_query_next(task, &sigwaiters)) {
task = proc_table[task].next) {
if (sigismember(&proc_table[task].sigwaiting, signo)) {
LEVEL l;
 
479,7 → 479,7
sigaddset(&proc_table[task].sigpending, signo);
 
/* Reactivate the task... */
iq_extract(task, &sigwaiters);
q_extract(task, &sigwaiters);
l = proc_table[task].task_level;
level_table[l]->task_insert(l,task);
 
614,9 → 614,9
* the FIFO order, and how to prevent lost signals in the case that
* a thread calls sigwait before the woken thread runs and gets it.
*/
for (task = iq_query_first(&sigwaiters);
for (task = sigwaiters;
task != NIL;
task = iq_query_next(task, &sigwaiters)) {
task = proc_table[task].next) {
if (sigismember(&proc_table[task].sigwaiting, signo)) {
LEVEL l;
 
624,7 → 624,7
sigaddset(&proc_table[task].sigpending, signo);
 
/* Reactivate the task... */
iq_extract(task, &sigwaiters);
q_extract(task, &sigwaiters);
 
l = proc_table[task].task_level;
level_table[l]->task_insert(l,task);
697,7 → 697,7
proc_table[p].control |= SIGTIMEOUT_EXPIRED;
 
/* insert the task into the ready queue and extract it from the waiters */
iq_extract(p, &sigwaiters);
q_extract(p, &sigwaiters);
 
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
754,7 → 754,7
* Grab the first queue entry.
*/
sos = sigqueued[thissig];
sigqueued[thissig] = sig_queue[sos].next;
sigqueued[thissig] = sig_queue[sigqueued[thissig]].next;
 
/*
* If that was the last one, reset the process procsigpending.
834,7 → 834,7
* find a thread in sigwait, but it will not be able to wake it up
* until the waitlock is released in the switch code.
*/
iq_insertfirst(exec_shadow, &sigwaiters);
q_insertfirst(exec_shadow, &sigwaiters);
proc_table[exec_shadow].status = WAIT_SIG;
 
if (timeout) {
890,8 → 890,7
/*
* Grab the first queue entry.
*/
sos = sigqueued[thissig];
sigqueued[thissig] = sig_queue[sos].next;
sos = q_getfirst(&sigqueued[thissig]);
 
/*
* If that was the last one, reset the process procsigpending.
1069,7 → 1068,7
l = proc_table[exec_shadow].task_level;
level_table[l]->task_extract(l,exec_shadow);
 
iq_insertfirst(exec_shadow, &sigwaiters);
q_insertfirst(exec_shadow, &sigwaiters);
proc_table[exec_shadow].status = WAIT_SIGSUSPEND;
 
/* and finally we reschedule */
1501,7 → 1500,7
proc_table[i].delay_timer = -1;
}
 
iq_extract(i, &sigwaiters);
q_extract(i, &sigwaiters);
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
1545,7 → 1544,7
 
procsigpending = 0;
 
iq_init(&sigwaiters, &freedesc, 0);
sigwaiters = NIL;
alarm_timer = -1;
 
/* Interrupt handling init */
/shark/trunk/kernel/queue.c
0,0 → 1,191
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: queue.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
------------
 
This file contains the system queues function management
The functions use the general process descriptor table entries,
particularly the next & prev to mantain the queues order;
the insertion is based on the field priority, inserted in generic process
descriptor to simplify the implementation of many schedulers (like EDF,
RM, DM, etc.)
 
There are 2 queue types:
QUEUE -> a simple queue with only head.
QQUEUE -> a queue that manage the head and the tail of the queue
 
... and 2 queue function sets, beginning with q_ if related to the type
QUEUE, qq_ if related to the other type.
 
The queue insertion is made by the following functions:
q_insert, qq_insert -> insertion based on the priority field.
q_timespec_insert, qq_timespec_insert -> same as above but use
the timespec_priority field
q_insertfirst, qq_insertfirst -> insert in the first position of the queue
 
**/
 
/*
* Copyright (C) 2000 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#include <kernel/const.h>
#include <sys/types.h>
#include <kernel/var.h>
 
/*
*
* QUEUE Functions
*
*/
 
/*+
This function insert the task with PID p in the queue que.
The insertion is made respecting the priority field.
(the first item in the queue have the less priority)
+*/
 
void q_insert(PID i, QUEUE *que)
{
DWORD prio;
PID p,q;
 
p = NIL;
q = *que;
prio = proc_table[i].priority;
 
while ((q != NIL) && (prio >= proc_table[q].priority)) {
p = q;
q = proc_table[q].next;
}
 
if (p != NIL)
proc_table[p].next = i;
else
*que = i;
 
if (q != NIL) proc_table[q].prev = i;
 
proc_table[i].next = q;
proc_table[i].prev = p;
}
 
/*+
This function insert the task with PID p in the queue que.
The insertion is made respecting the timespec_priority field.
(the first item in the queue have the less priority)
+*/
 
void q_timespec_insert(PID i, QUEUE *que)
{
struct timespec prio;
PID p,q;
 
p = NIL;
q = *que;
 
TIMESPEC_ASSIGN(&prio, &proc_table[i].timespec_priority);
 
 
while ((q != NIL) &&
!TIMESPEC_A_LT_B(&prio, &proc_table[q].timespec_priority)) {
p = q;
q = proc_table[q].next;
}
 
if (p != NIL)
proc_table[p].next = i;
else
*que = i;
 
if (q != NIL) proc_table[q].prev = i;
 
proc_table[i].next = q;
proc_table[i].prev = p;
}
 
 
/*+
This function extract the task i from the queue que.
It not check if the task i is REALLY IN the queue que...
so make attention!!!
+*/
void q_extract(PID i, QUEUE *que)
{
PID p,q;
 
p = proc_table[i].prev;
q = proc_table[i].next;
 
if (p == NIL) *que = q;
else proc_table[p].next = proc_table[i].next;
 
if (q != NIL) proc_table[q].prev = proc_table[i].prev;
}
 
/*+
This function returns the first task in the queue que, or NIL if the
queue is empty... (it extracts the task from the queue...)
+*/
PID q_getfirst(QUEUE *que)
{
QUEUE q = *que;
if (*que == NIL) return(NIL);
*que = proc_table[q].next;
if (*que != NIL) proc_table[*que].prev = NIL;
return(q);
}
 
/*+
This function insert the task with PID p in the first position
of the queue que redardless of the priority field
+*/
 
void q_insertfirst(PID i, QUEUE *que)
{
if (*que != NIL) proc_table[*que].prev = i;
proc_table[i].next = *que;
proc_table[i].prev = NIL;
*que = i;
}
 
 
 
/shark/trunk/kernel/makefile
16,6 → 16,7
blkact.o \
cancel.o \
conditio.o \
delay.o \
endcycle.o \
event.o \
exchtxt.o \
36,7 → 37,8
printk.o \
perror.o \
pthread.o \
iqueue.o \
qqueue.o \
queue.o \
signal.o \
sleep.o \
status.o \
48,6 → 50,7
include $(BASE)/config/lib.mk
 
install all clean cleanall depend::
make -C init $@
make -C modules $@
make -C mem $@
 
/shark/trunk/include/kernel/iqueue.h
File deleted
/shark/trunk/include/kernel/func.h
21,11 → 21,11
 
/**
------------
CVS : $Id: func.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: func.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Kernel functions:
89,6 → 89,15
/* if a source use printk() it should include log.h not func.h */
#include <kernel/log.h>
 
#if 0
#ifdef __DEBUG_ON__
#define printk(fmt,args...) \
VM_printf(fmt,##args)
#else
#define printk(fmt,args...)
#endif
#endif
 
/*---------------------------------------------------------------------*/
/* Kernel global functions: initialization & termination... */
/*---------------------------------------------------------------------*/
136,7 → 145,7
int set_exchandler_text();
 
/*---------------------------------------------------------------------*/
/* Kernel global functions: scheduler, */
/* Kernel global functions: scheduler, queues */
/*---------------------------------------------------------------------*/
 
/*+ This is the generic scheduler.
148,6 → 157,26
the end of an event list +*/
void event_need_reschedule();
 
/* Simple QUEUE management functions */
void q_insert (PID p, QUEUE *q);
void q_timespec_insert (PID p, QUEUE *q);
void q_extract (PID p, QUEUE *q);
PID q_getfirst ( QUEUE *q);
void q_insertfirst (PID p, QUEUE *q);
 
 
/* QQUEUE management functions */
void qq_init ( QQUEUE *q);
void qq_insert (PID p, QQUEUE *q);
void qq_timespec_insert (PID p, QQUEUE *q);
void qq_extract (PID p, QQUEUE *q);
PID qq_getfirst ( QQUEUE *q);
void qq_insertfirst (PID p, QQUEUE *q);
void qq_insertlast (PID p, QQUEUE *q);
PID qq_queryfirst ( QQUEUE *q);
PID qq_querylast ( QQUEUE *q);
 
 
void task_makefree(void *ret);
void check_killed_async(void);
 
192,6 → 221,15
return ll_context_from();
}
 
 
 
#ifdef __TEST1__
extern int useds;
extern int testactive;
extern struct timespec s_send[];
#endif
 
 
/*+ this functions are called every time a context is changed +*/
void kern_after_dispatch(void);
 
205,6 → 243,10
{
ll_context_to(c);
kern_after_dispatch();
 
#ifdef __TEST1__
if (testactive) ll_gettime(TIME_EXACT,&s_send[useds-1] );
#endif
sti();
}
 
497,6 → 539,9
Pending activations are discarded +*/
void task_sleep(void);
 
/*+ This function suspend the actual task for a minimum delay time +*/
void task_delay(DWORD delay);
 
/*+ This primitives refers the group id which is supplied
by the application, not by the kernel +*/
int group_activate(WORD g);
/shark/trunk/include/kernel/int_sem.h
21,11 → 21,11
 
/**
------------
CVS : $Id: int_sem.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: int_sem.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Internal semaphores.
63,13 → 63,12
#define __INT_SEM_H__
 
#include <kernel/types.h>
#include <kernel/iqueue.h>
 
/* this is the structure normally pointed by the opt field in the
mutex_t structure */
typedef struct {
int count;
IQUEUE blocked;
QQUEUE blocked;
} internal_sem_t;
 
 
/shark/trunk/include/kernel/kern.h
21,11 → 21,11
 
/**
------------
CVS : $Id: kern.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: kern.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Main kernel include file.
67,8 → 67,6
//#include <kernel/err.h>
//#include <kernel/exc.h>
#include <kernel/var.h>
#include <kernel/iqueue.h>
#include <kernel/func.h>
 
 
 
/shark/trunk/include/kernel/descr.h
21,11 → 21,11
 
/**
------------
CVS : $Id: descr.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: descr.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Kernel main data structures
70,7 → 70,6
#include <ll/ll.h>
#include <kernel/model.h>
#include <kernel/types.h>
#include <kernel/iqueue.h>
#include <limits.h>
 
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
141,8 → 140,8
struct _task_handler_rec *cleanup_stack;
/*+ The cleanup stack +*/
 
QUEUE next,prev; /*+ Next/Prev Index in the queue +*/
 
 
int errnumber;
 
/* Job Execution Time fields */
175,7 → 174,8
* the generic kernel, with exclusion of delay_timer that is used
* also in cond_timedwait
*/
 
DWORD priority; /*+ A priority field +*/
struct timespec timespec_priority; /*+ Another priority field +*/
int delay_timer; /*+ A field useful to store the delay timer +*/
 
int wcet; /*+ a worst case time execution +*/
219,6 → 219,8
0 if the level can manage the model,
-1 if not +*/
 
// void (*level_init)(); /*+ initialization of the level module +*/
// void (*level_end)(); /*+ level termination (at system end... +*/
void (*level_status)(LEVEL l);/*+ print level statistics... +*/
 
PID (*level_scheduler)(LEVEL l);
284,6 → 286,9
task in the EXE state. +*/
 
 
void (*task_delay)(LEVEL l, PID p,DWORD tickdelay);
 
 
/* guest CALLS:
these functions are called from an Aperiodic Server Level for the task
that are inserted in the local queues */
319,6 → 324,7
/*+ the task is killed +*/
 
void (*guest_sleep)(LEVEL l, PID p);
void (*guest_delay)(LEVEL l, PID p, TIME tickdelay);
 
} level_des;
 
432,7 → 438,7
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
 
typedef struct condition_struct {
IQUEUE waiters; /*+ queue for tasks waiting on the condition +*/
QUEUE waiters; /*+ queue for tasks waiting on the condition +*/
mutex_t *used_for_waiting;
} cond_t;
 
/shark/trunk/include/kernel/types.h
21,11 → 21,11
 
/**
------------
CVS : $Id: types.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: types.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
**/
49,9 → 49,27
*
*/
 
/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
HARTIK SYSTEM TYPES
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
 
#ifndef __KERNEL_TYPES_H__
#define __KERNEL_TYPES_H__
 
 
/*+ Used to manage task queues +*/
typedef int QUEUE;
 
/*+ Used to manage task queues with tail +*/
typedef struct {
int first; /*+ first element of a task queue, NIL if empty +*/
int last; /*+ last element of a task qqueue, NIL if empty +*/
} QQUEUE;
 
 
/*+ Used to manage mutex queues +*/
//typedef int MQUEUE;
 
#define TASK void *
 
/*+ ... a task index +*/
/shark/trunk/include/kernel/config.h
21,11 → 21,11
 
/**
------------
CVS : $Id: config.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: config.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Kernel configuration macros:
57,6 → 57,28
#ifndef __KERNEL_CONFIG_H__
#define __KERNEL_CONFIG_H__
 
 
/*+ Define this if you use the CABs... +*/
#define __CAB__
 
/*+ Define this if you use the ports... +*/
#define __PORT__
 
/*+ Define this if you use the tracer... +*/
#define __TRACE__
//#undef __TRACE__
 
/*+ Define this if you want the printk messages... +*/
#define __DEBUG_ON__
#undef __DEBUG_ON__
 
 
/*+ checks the Memory at the kern_mem_init... +*/
#undef __MEM_DEBUG__
 
/*+ defined if we are compiling test1.c with init1.c +*/
//#define __TEST1__
 
/*+ defined if we are compiling testG.c +*/
//#define TESTG
 
/shark/trunk/include/kernel/var.h
21,11 → 21,11
 
/**
------------
CVS : $Id: var.h,v 1.2 2002-11-11 08:36:01 pj Exp $
CVS : $Id: var.h,v 1.1.1.1 2002-03-29 14:12:51 pj Exp $
 
File: $File$
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:36:01 $
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:51 $
------------
 
Kernel global variables
70,7 → 70,7
extern PID exec; /*+ task suggested by the scheduler +*/
extern PID exec_shadow; /*+ task really executed +*/
 
extern IQUEUE freedesc; /*+ Free descriptor handled as a queue +*/
extern QUEUE freedesc; /*+ Free descriptor handled as a queue +*/
 
extern TIME sys_tick; /*+ System tick (in usec) +*/
extern struct timespec schedule_time;