Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 29 → Rev 35

/shark/tags/rel_0_2/kernel/modules/edf2.c
File deleted
/shark/tags/rel_0_2/kernel/modules/old/trace.c
File deleted
/shark/tags/rel_0_2/kernel/modules/edf.c
20,11 → 20,11
 
/**
------------
CVS : $Id: edf.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: edf.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
67,7 → 67,6
 
/*+ Status used in the level +*/
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define EDF_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
90,7 → 89,7
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
QUEUE ready; /*+ the ready queue +*/
IQUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
106,7 → 105,6
 
switch (status) {
case EDF_READY : return "EDF_Ready";
case EDF_DELAY : return "EDF_Delay";
case EDF_WCET_VIOLATED: return "EDF_Wcet_Violated";
case EDF_WAIT : return "EDF_Sporadic_Wait";
case EDF_IDLE : return "EDF_Idle";
119,6 → 117,7
{
PID p = (PID) par;
EDF_level_des *lev;
struct timespec *temp;
 
edf_printf("$");
 
128,7 → 127,7
case EDF_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
137,15 → 136,16
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to EDF_task_activate */
temp = iq_query_timespec(p,&lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time,
&proc_table[p].timespec_priority);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = EDF_READY;
q_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
iq_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000);
edf_printf("(dline p%d ev%d %d.%d)",(int)p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
printk("el%d|",p);
172,23 → 172,6
kern_raise(XDEADLINE_MISS,p);
}
 
/*+ this function is called when a task finish his delay +*/
static void EDF_timer_delay(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
 
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = EDF_READY;
q_timespec_insert(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
event_need_reschedule();
}
 
 
static int EDF_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
221,7 → 204,7
static void EDF_level_status(LEVEL l)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
PID p = lev->ready;
PID p = iq_query_first(&lev->ready);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & EDF_ENABLE_WCET_CHECK));
239,10 → 222,10
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
253,8 → 236,8
proc_table[p].name,
lev->flag[p] & EDF_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
EDF_status_to_a(proc_table[p].status));
}
 
273,7 → 256,7
kern_printf(") ");
}
*/
return (PID)lev->ready;
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
365,14 → 348,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void EDF_task_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
382,17 → 357,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
q_extract(p, &lev->ready);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
iq_extract(p, &lev->ready);
}
 
static void EDF_task_epilogue(LEVEL l, PID p)
409,7 → 374,7
}
else {
/* the task has been preempted. it returns into the ready queue... */
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
}
417,6 → 382,7
static void EDF_task_activate(LEVEL l, PID p)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == EDF_WAIT) {
kern_raise(XACTIVATION,p);
433,19 → 399,19
/* see also EDF_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority,
&proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)proc_table[p].timespec_priority.tv_sec,(int)proc_table[p].timespec_priority.tv_nsec/1000);
edf_printf("(dline p%d ev%d %d.%d)",p,(int)lev->deadline_timer[p],(int)temp->tv_sec,(int)temp->tv_nsec/1000);
}
 
static void EDF_task_insert(LEVEL l, PID p)
457,7 → 423,7
 
/* Insert task in the coEDFect position */
proc_table[p].status = EDF_READY;
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
}
 
static void EDF_task_extract(LEVEL l, PID p)
518,22 → 484,7
correctly the task state to sleep... */
}
 
static void EDF_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* equal to EDF_task_endcycle */
proc_table[p].status = EDF_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
EDF_timer_delay,
(void *)p);
}
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the EDF ready queue. */
546,7 → 497,7
/* if the EDF_guest_create is called, then the pclass must be a
valid pclass. */
 
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline);
*iq_query_timespec(p, &lev->ready) = job->deadline;
lev->deadline_timer[p] = -1;
 
577,7 → 528,7
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
q_extract(p, &lev->ready);
iq_extract(p, &lev->ready);
}
 
static void EDF_guest_epilogue(LEVEL l, PID p)
585,7 → 536,7
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
594,12 → 545,12
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & EDF_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,
(void *)p);
 
610,7 → 561,7
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
q_timespec_insert(p,&lev->ready);
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
626,7 → 577,7
}
 
static void EDF_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void EDF_guest_end(LEVEL l, PID p)
{
635,13 → 586,9
//kern_printf("EDF_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == EDF_READY)
{
q_extract(p, &lev->ready);
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
else if (proc_table[p].status == EDF_DELAY) {
event_delete(proc_table[p].delay_timer);
proc_table[p].delay_timer = NIL; /* paranoia */
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
653,27 → 600,10
}
 
static void EDF_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void EDF_guest_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* equal to EDF_task_endcycle */
proc_table[p].status = EDF_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
EDF_timer_delay,
(void *)p);
}
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
725,7 → 655,6
lev->l.task_endcycle = EDF_task_endcycle;
lev->l.task_end = EDF_task_end;
lev->l.task_sleep = EDF_task_sleep;
lev->l.task_delay = EDF_task_delay;
 
lev->l.guest_create = EDF_guest_create;
lev->l.guest_detach = EDF_guest_detach;
737,7 → 666,6
lev->l.guest_endcycle = EDF_guest_endcycle;
lev->l.guest_end = EDF_guest_end;
lev->l.guest_sleep = EDF_guest_sleep;
lev->l.guest_delay = EDF_guest_delay;
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
746,7 → 674,7
lev->flag[i] = 0;
}
 
lev->ready = NIL;
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags & 0x07;
lev->U = 0;
}
/shark/tags/rel_0_2/kernel/modules/posix.c
20,11 → 20,11
 
/**
------------
CVS : $Id: posix.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: posix.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module compatible with POSIX
66,7 → 66,6
 
/*+ Status used in the level +*/
#define POSIX_READY MODULE_STATUS_BASE
#define POSIX_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
73,8 → 72,10
level_des l; /*+ the standard level descriptor +*/
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
int priority[MAX_PROC]; /*+ priority of each task +*/
 
QQUEUE *ready; /*+ the ready queue array +*/
IQUEUE *ready; /*+ the ready queue array +*/
 
int slice; /*+ the level's time slice +*/
 
94,30 → 95,10
 
switch (status) {
case POSIX_READY: return "POSIX_Ready";
case POSIX_DELAY: return "POSIX_Delay";
default : return "POSIX_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void POSIX_timer_delay(void *par)
{
PID p = (PID) par;
POSIX_level_des *lev;
 
lev = (POSIX_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = POSIX_READY;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int POSIX_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
141,9 → 122,9
for (p=0; p<MAX_PROC; p++)
if (proc_table[p].task_level == l && proc_table[p].status != POSIX_READY
&& proc_table[p].status != FREE )
kern_printf("Pid: %d\t Name: %20s Prio: %3ld Status: %s\n",
kern_printf("Pid: %d\t Name: %20s Prio: %3d Status: %s\n",
p,proc_table[p].name,
proc_table[p].priority,
lev->priority[p],
POSIX_status_to_a(proc_table[p].status));
 
}
164,7 → 145,7
prio = lev->maxpriority;
 
for (;;) {
p = qq_queryfirst(&lev->ready[prio]);
p = iq_query_first(&lev->ready[prio]);
if (p == NIL) {
if (prio) {
prio--;
177,8 → 158,8
if ((proc_table[p].control & CONTROL_CAP) &&
(proc_table[p].avail_time <= 0)) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_extract(p,&lev->ready[prio]);
qq_insertlast(p,&lev->ready[prio]);
iq_extract(p,&lev->ready[prio]);
iq_insertlast(p,&lev->ready[prio]);
}
else
return p;
208,7 → 189,7
proc_table[exec_shadow].task_level == l) {
/* We inherit the scheduling properties if the scheduling level
*is* the same */
proc_table[p].priority = proc_table[exec_shadow].priority;
lev->priority[p] = lev->priority[exec_shadow];
proc_table[p].avail_time = proc_table[exec_shadow].avail_time;
proc_table[p].wcet = proc_table[exec_shadow].wcet;
219,7 → 200,7
lev->nact[p] = (lev->nact[exec_shadow] == -1) ? -1 : 0;
}
else {
proc_table[p].priority = nrt->weight;
lev->priority[p] = nrt->weight;
if (nrt->slice) {
proc_table[p].avail_time = nrt->slice;
254,14 → 235,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void POSIX_task_dispatch(LEVEL l, PID p, int nostop)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
269,18 → 242,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
qq_extract(p, &lev->ready[proc_table[p].priority]);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
iq_extract(p, &lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_epilogue(LEVEL l, PID p)
289,7 → 251,7
 
if (lev->yielding) {
lev->yielding = 0;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
/* check if the slice is finished and insert the task in the coPOSIXect
qqueue position */
296,10 → 258,10
else if (proc_table[p].control & CONTROL_CAP &&
proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
else
qq_insertfirst(p,&lev->ready[proc_table[p].priority]);
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
 
proc_table[p].status = POSIX_READY;
}
320,7 → 282,7
 
/* Insert task in the correct position */
proc_table[p].status = POSIX_READY;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_insert(LEVEL l, PID p)
332,7 → 294,7
 
/* Insert task in the coPOSIXect position */
proc_table[p].status = POSIX_READY;
qq_insertlast(p,&lev->ready[proc_table[p].priority]);
iq_insertlast(p,&lev->ready[lev->priority[p]]);
}
 
static void POSIX_task_extract(LEVEL l, PID p)
355,7 → 317,7
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
qq_insertfirst(p,&lev->ready[proc_table[p].priority]);
iq_insertfirst(p,&lev->ready[lev->priority[p]]);
proc_table[p].status = POSIX_READY;
}
else
370,7 → 332,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
q_insert(p,&freedesc);
iq_priority_insert(p,&freedesc);
}
 
static void POSIX_task_sleep(LEVEL l, PID p)
380,59 → 342,39
proc_table[p].status = SLEEP;
}
 
static void POSIX_task_delay(LEVEL l, PID p, TIME usdelay)
{
// POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to POSIX_task_endcycle */
proc_table[p].status = POSIX_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
POSIX_timer_delay,
(void *)p);
}
 
 
static int POSIX_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void POSIX_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void POSIX_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
518,7 → 460,6
lev->l.task_endcycle = POSIX_task_endcycle;
lev->l.task_end = POSIX_task_end;
lev->l.task_sleep = POSIX_task_sleep;
lev->l.task_delay = POSIX_task_delay;
 
lev->l.guest_create = POSIX_guest_create;
lev->l.guest_detach = POSIX_guest_detach;
530,7 → 471,6
lev->l.guest_endcycle = POSIX_guest_endcycle;
lev->l.guest_end = POSIX_guest_end;
lev->l.guest_sleep = POSIX_guest_sleep;
lev->l.guest_delay = POSIX_guest_delay;
 
/* fill the POSIX descriptor part */
for (i = 0; i < MAX_PROC; i++)
538,10 → 478,10
 
lev->maxpriority = prioritylevels -1;
 
lev->ready = (QQUEUE *)kern_alloc(sizeof(QQUEUE) * prioritylevels);
lev->ready = (IQUEUE *)kern_alloc(sizeof(IQUEUE) * prioritylevels);
 
for (x = 0; x < prioritylevels; x++)
qq_init(&lev->ready[x]);
iq_init(&lev->ready[x], &freedesc, 0);
 
if (slice < POSIX_MINIMUM_SLICE) slice = POSIX_MINIMUM_SLICE;
if (slice > POSIX_MAXIMUM_SLICE) slice = POSIX_MAXIMUM_SLICE;
614,7 → 554,7
else
*policy = NRT_FIFO_POLICY;
 
*priority = proc_table[p].priority;
*priority = ((POSIX_level_des *)(level_table[l]))->priority[p];
 
return 0;
}
644,14 → 584,14
else
return EINVAL;
 
if (proc_table[p].priority != priority) {
if (lev->priority[p] != priority) {
if (proc_table[p].status == POSIX_READY) {
qq_extract(p,&lev->ready[proc_table[p].priority]);
proc_table[p].priority = priority;
qq_insertlast(p,&lev->ready[priority]);
iq_extract(p,&lev->ready[lev->priority[p]]);
lev->priority[p] = priority;
iq_insertlast(p,&lev->ready[priority]);
}
else
proc_table[p].priority = priority;
lev->priority[p] = priority;
}
 
return 0;
/shark/tags/rel_0_2/kernel/modules/hartport.c
20,11 → 20,11
 
/**
------------
CVS : $Id: hartport.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: hartport.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the Hartik 3.3.1 Port functions
110,8 → 110,8
struct hash_port htable[MAX_HASH_ENTRY];
struct port_ker port_des[MAX_PORT];
struct port_com port_int[MAX_PORT_INT];
QUEUE freeportdes;
QUEUE freeportint;
int freeportdes;
int freeportint;
 
static int port_installed = 0;
 
548,7 → 548,7
return -1;
}
if (!pd->valid) {
errno = EPORT_UNVALID_DESCR;
errno = EPORT_INVALID_DESCR;
return -1;
}
 
596,7 → 596,7
return -1;
}
if (!pd->valid) {
errno = EPORT_UNVALID_DESCR;
errno = EPORT_INVALID_DESCR;
return -1;
}
#endif
/shark/tags/rel_0_2/kernel/modules/rr2.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr2.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: rr2.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
63,7 → 63,6
 
/*+ Status used in the level +*/
#define RR2_READY MODULE_STATUS_BASE
#define RR2_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
71,7 → 70,7
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
QQUEUE ready; /*+ the ready queue +*/
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
87,30 → 86,10
 
switch (status) {
case RR2_READY: return "RR2_Ready";
case RR2_DELAY: return "RR2_Delay";
default : return "RR2_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void RR2_timer_delay(void *par)
{
PID p = (PID) par;
RR2_level_des *lev;
 
lev = (RR2_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RR2_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RR2_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
127,7 → 106,7
static void RR2_level_status(LEVEL l)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->ready);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
134,7 → 113,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR2_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
157,14 → 136,14
PID p;
 
for (;;) {
p = qq_queryfirst(&lev->ready);
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
return p;
222,14 → 201,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RR2_task_dispatch(LEVEL l, PID p, int nostop)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
237,18 → 208,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
iq_extract(p, &lev->ready);
}
 
static void RR2_task_epilogue(LEVEL l, PID p)
259,11 → 219,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
/* cuRR2 is >0, so the running task have to run for another cuRR2 usec */
qq_insertfirst(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR2_READY;
}
284,7 → 244,7
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
 
static void RR2_task_insert(LEVEL l, PID p)
296,7 → 256,7
 
/* Insert task in the coRR2ect position */
proc_table[p].status = RR2_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
 
static void RR2_task_extract(LEVEL l, PID p)
319,7 → 279,7
/* continue!!!! */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
qq_insertfirst(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RR2_READY;
}
else
334,7 → 294,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
q_insert(p,&freedesc);
iq_insertlast(p,&freedesc);
}
 
static void RR2_task_sleep(LEVEL l, PID p)
344,59 → 304,39
proc_table[p].status = SLEEP;
}
 
static void RR2_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RR2_task_endcycle */
proc_table[p].status = RR2_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RR2_timer_delay,
(void *)p);
}
 
 
static int RR2_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RR2_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR2_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
475,7 → 415,6
lev->l.task_endcycle = RR2_task_endcycle;
lev->l.task_end = RR2_task_end;
lev->l.task_sleep = RR2_task_sleep;
lev->l.task_delay = RR2_task_delay;
 
lev->l.guest_create = RR2_guest_create;
lev->l.guest_detach = RR2_guest_detach;
487,13 → 426,12
lev->l.guest_endcycle = RR2_guest_endcycle;
lev->l.guest_end = RR2_guest_end;
lev->l.guest_sleep = RR2_guest_sleep;
lev->l.guest_delay = RR2_guest_delay;
 
/* fill the RR2 descriptor part */
for (i = 0; i < MAX_PROC; i++)
lev->nact[i] = -1;
 
qq_init(&lev->ready);
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR2_MINIMUM_SLICE) slice = RR2_MINIMUM_SLICE;
if (slice > RR2_MAXIMUM_SLICE) slice = RR2_MAXIMUM_SLICE;
/shark/tags/rel_0_2/kernel/modules/ds.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ds.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: ds.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
83,7 → 83,7
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
QQUEUE wait; /*+ the wait queue of the DS +*/
IQUEUE wait; /*+ the wait queue of the DS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
128,8 → 128,8
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
DS_activation(lev);
event_need_reschedule();
}
178,7 → 178,7
static void DS_level_status(LEVEL l)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->wait);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & DS_ENABLE_GUARANTEE_EDF ||
190,8 → 190,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
DS_status_to_a(proc_table[lev->activated].status));
 
200,7 → 200,7
p,
proc_table[p].name,
DS_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->wait);
}
}
 
221,7 → 221,7
if (lev->flags & DS_BACKGROUND_BLOCK)
return NIL;
else
return qq_queryfirst(&lev->wait);
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
275,14 → 275,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void DS_task_dispatch(LEVEL l, PID p, int nostop)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
294,7 → 286,7
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
311,16 → 303,6
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void DS_task_epilogue(LEVEL l, PID p)
356,7 → 338,7
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
lev->activated = NIL;
}
367,7 → 349,7
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
388,7 → 370,7
DS_activation(lev);
}
else {
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
}
408,7 → 390,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the DS before... */
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
 
445,18 → 427,18
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = DS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
481,9 → 463,9
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
509,72 → 491,49
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
DS_activation(lev);
}
static void DS_task_delay(LEVEL l, PID p, TIME usdelay)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & DS_BACKGROUND)
lev->flags &= ~DS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int DS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void DS_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void DS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
647,7 → 606,6
lev->l.task_endcycle = DS_task_endcycle;
lev->l.task_end = DS_task_end;
lev->l.task_sleep = DS_task_sleep;
lev->l.task_delay = DS_task_delay;
 
lev->l.guest_create = DS_guest_create;
lev->l.guest_detach = DS_guest_detach;
659,7 → 617,6
lev->l.guest_endcycle = DS_guest_endcycle;
lev->l.guest_end = DS_guest_end;
lev->l.guest_sleep = DS_guest_sleep;
lev->l.guest_delay = DS_guest_delay;
 
/* fill the DS descriptor part */
 
671,7 → 628,7
 
lev->period = per;
 
qq_init(&lev->wait);
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/tags/rel_0_2/kernel/modules/cbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: cbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: cbs.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
76,7 → 76,6
/*+ Status used in the level +*/
#define CBS_IDLE APER_STATUS_BASE /*+ waiting the activation +*/
#define CBS_ZOMBIE APER_STATUS_BASE+1 /*+ waiting the period end +*/
#define CBS_DELAY APER_STATUS_BASE+2 /*+ waiting the delay end +*/
 
/*+ task flags +*/
#define CBS_SAVE_ARRIVALS 1
188,7 → 187,6
switch (status) {
case CBS_IDLE : return "CBS_Idle";
case CBS_ZOMBIE : return "CBS_Zombie";
case CBS_DELAY : return "CBS_Delay";
default : return "CBS_Unknown";
}
}
253,20 → 251,6
 
}
 
/*+ this function is called when a task finish his delay +*/
static void CBS_timer_delay(void *par)
{
PID p = (PID) par;
CBS_level_des *lev;
 
lev = (CBS_level_des *)level_table[proc_table[p].task_level];
 
CBS_activation(lev,p,&proc_table[p].timespec_priority);
 
event_need_reschedule();
}
 
 
/*+ this function is called when a killed or ended task reach the
period end +*/
static void CBS_timer_zombie(void *par)
278,7 → 262,7
 
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
 
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
451,29 → 435,11
return 0;
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void CBS_task_dispatch(LEVEL l, PID p, int nostop)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void CBS_task_epilogue(LEVEL l, PID p)
639,68 → 605,37
lev->nact[p] = 0;
}
 
static void CBS_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
 
/* check if the wcet is finished... */
CBS_avail_time_check(lev, p);
 
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = CBS_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
 
/* the timespec_priority field is used to store the time at witch the delay
timer raises */
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
CBS_timer_delay,
(void *)p);
}
 
 
static int CBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void CBS_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void CBS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
752,7 → 687,6
lev->l.task_endcycle = CBS_task_endcycle;
lev->l.task_end = CBS_task_end;
lev->l.task_sleep = CBS_task_sleep;
lev->l.task_delay = CBS_task_delay;
 
lev->l.guest_create = CBS_guest_create;
lev->l.guest_detach = CBS_guest_detach;
764,7 → 698,6
lev->l.guest_endcycle = CBS_guest_endcycle;
lev->l.guest_end = CBS_guest_end;
lev->l.guest_sleep = CBS_guest_sleep;
lev->l.guest_delay = CBS_guest_delay;
 
/* fill the CBS descriptor part */
for (i=0; i<MAX_PROC; i++) {
/shark/tags/rel_0_2/kernel/modules/nopm.c
20,11 → 20,11
 
/**
------------
CVS : $Id: nopm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: nopm.c,v 1.2 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
See modules/nopm.h.
73,7 → 73,7
mutex_t structure */
typedef struct {
PID owner;
QQUEUE blocked;
IQUEUE blocked;
int counter;
} NOPM_mutex_t;
 
108,12 → 108,12
kern_printf("----------------------\n");
for(i=0;i<index;i++) {
ptr=table[i]->opt;
if (ptr->blocked.first!=NIL) {
if (!iq_isempty(&ptr->blocked)) {
kern_printf("%i blocks on 0x%p: ",ptr->owner,table[i]);
j=ptr->blocked.first;
j=iq_query_first(&ptr->blocked);
while (j!=NIL) {
kern_printf("%i ",(int)j);
j=proc_table[j].next;
j=iq_query_next(j, &ptr->blocked);
}
kern_printf("\n");
} else {
181,7 → 181,7
return (ENOMEM);
 
p->owner = NIL;
qq_init(&p->blocked);
iq_init(&p->blocked, &freedesc, 0);
p->counter=0;
m->mutexlevel = l;
254,7 → 254,7
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOPM_WAIT;
qq_insertlast(exec_shadow,&p->blocked);
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
327,7 → 327,7
 
/* the mutex is mine, pop the firsttask to extract */
for (;;) {
e = qq_getfirst(&p->blocked);
e = iq_getfirst(&p->blocked);
if (e == NIL) {
p->owner = NIL;
break;
/shark/tags/rel_0_2/kernel/modules/rm.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rm.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: rm.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
71,7 → 71,6
 
/*+ Status used in the level +*/
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define RM_DELAY MODULE_STATUS_BASE+1 /*+ - Delay status +*/
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
94,7 → 93,7
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
QUEUE ready; /*+ the ready queue +*/
IQUEUE ready; /*+ the ready queue +*/
 
int flags; /*+ the init flags... +*/
 
110,7 → 109,6
 
switch (status) {
case RM_READY : return "RM_Ready";
case RM_DELAY : return "RM_Delay";
case RM_WCET_VIOLATED: return "RM_Wcet_Violated";
case RM_WAIT : return "RM_Sporadic_Wait";
case RM_IDLE : return "RM_Idle";
123,8 → 121,8
{
PID p = (PID) par;
RM_level_des *lev;
struct timespec *temp;
 
 
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
131,7 → 129,7
case RM_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
140,12 → 138,12
/* tracer stuff */
trc_logevent(TRC_INTACTIVATION,&p);
/* similar to RM_task_activate */
TIMESPEC_ASSIGN(&proc_table[p].request_time,
&proc_table[p].timespec_priority);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(&proc_table[p].request_time, temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = RM_READY;
q_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
iq_priority_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
173,23 → 171,6
kern_raise(XDEADLINE_MISS,p);
}
 
/*+ this function is called when a task finish his delay +*/
static void RM_timer_delay(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
 
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RM_READY;
q_insert(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
event_need_reschedule();
}
 
 
static int RM_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == HARD_PCLASS || m->pclass == (HARD_PCLASS | l)) {
222,7 → 203,7
static void RM_level_status(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
PID p = lev->ready;
PID p = iq_query_first(&lev->ready);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & RM_ENABLE_WCET_CHECK));
240,10 → 221,10
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
254,8 → 235,8
proc_table[p].name,
lev->flag[p] & RM_FLAG_SPORADIC ? "MinITime" : "Period ",
lev->period[p],
proc_table[p].timespec_priority.tv_sec,
proc_table[p].timespec_priority.tv_nsec/1000,
iq_query_timespec(p, &lev->ready)->tv_sec,
iq_query_timespec(p, &lev->ready)->tv_nsec/1000,
RM_status_to_a(proc_table[p].status));
}
 
274,7 → 255,7
kern_printf(") ");
}
*/
return (PID)lev->ready;
return iq_query_first(&lev->ready);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
305,7 → 286,7
 
HARD_TASK_MODEL *h = (HARD_TASK_MODEL *)m;
 
proc_table[p].priority = lev->period[p] = h->mit;
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = RM_FLAG_SPORADIC;
366,14 → 347,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RM_task_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
383,17 → 356,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
q_extract(p, &lev->ready);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
iq_extract(p, &lev->ready);
}
 
static void RM_task_epilogue(LEVEL l, PID p)
410,7 → 373,7
}
else {
/* the task has been preempted. it returns into the ready queue... */
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
}
418,6 → 381,7
static void RM_task_activate(LEVEL l, PID p)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == RM_WAIT) {
kern_raise(XACTIVATION,p);
434,16 → 398,16
/* see also RM_timer_deadline */
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
 
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority,
&proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], &proc_table[p].timespec_priority);
temp = iq_query_timespec(p, &lev->ready);
TIMESPEC_ASSIGN(temp, &proc_table[p].request_time);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
}
457,7 → 421,7
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
}
 
static void RM_task_extract(LEVEL l, PID p)
516,22 → 480,7
correctly the task state to sleep... */
}
 
static void RM_task_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* equal to RM_task_endcycle */
proc_table[p].status = RM_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RM_timer_delay,
(void *)p);
}
 
/* Guest Functions
These functions manages a JOB_TASK_MODEL, that is used to put
a guest task in the RM ready queue. */
544,9 → 493,9
/* if the RM_guest_create is called, then the pclass must be a
valid pclass. */
 
 
*iq_query_timespec(p,&lev->ready) = job->deadline;
TIMESPEC_ASSIGN(&proc_table[p].timespec_priority, &job->deadline);
lev->deadline_timer[p] = -1;
 
if (job->noraiseexc)
554,7 → 503,7
else
lev->flag[p] = 0;
 
proc_table[p].priority = lev->period[p] = job->period;
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
 
/* there is no bandwidth guarantee at this level, it is performed
by the level that inserts guest tasks... */
576,7 → 525,7
/* the task state is set to EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
q_extract(p, &lev->ready);
iq_extract(p, &lev->ready);
}
 
static void RM_guest_epilogue(LEVEL l, PID p)
584,7 → 533,7
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has been preempted. it returns into the ready queue... */
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
593,15 → 542,14
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
/* Set the deadline timer */
if (!(lev->flag[p] & RM_FLAG_NORAISEEXC))
lev->deadline_timer[p] = kern_event_post(&proc_table[p].timespec_priority,
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,
(void *)p);
 
}
 
static void RM_guest_insert(LEVEL l, PID p)
609,7 → 557,7
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Insert task in the correct position */
q_insert(p,&lev->ready);
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
625,7 → 573,7
}
 
static void RM_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RM_guest_end(LEVEL l, PID p)
{
634,13 → 582,9
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == RM_READY)
{
q_extract(p, &lev->ready);
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
else if (proc_table[p].status == RM_DELAY) {
event_delete(proc_table[p].delay_timer);
proc_table[p].delay_timer = NIL; /* paranoia */
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
652,27 → 596,11
}
 
static void RM_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RM_guest_delay(LEVEL l, PID p, TIME usdelay)
{
struct timespec wakeuptime;
// RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* equal to RM_task_endcycle */
proc_table[p].status = RM_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT, &wakeuptime);
ADDUSEC2TIMESPEC(usdelay, &wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RM_timer_delay,
(void *)p);
}
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
722,7 → 650,6
lev->l.task_endcycle = RM_task_endcycle;
lev->l.task_end = RM_task_end;
lev->l.task_sleep = RM_task_sleep;
lev->l.task_delay = RM_task_delay;
 
lev->l.guest_create = RM_guest_create;
lev->l.guest_detach = RM_guest_detach;
734,7 → 661,6
lev->l.guest_endcycle = RM_guest_endcycle;
lev->l.guest_end = RM_guest_end;
lev->l.guest_sleep = RM_guest_sleep;
lev->l.guest_delay = RM_guest_delay;
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
743,7 → 669,7
lev->flag[i] = 0;
}
 
lev->ready = NIL;
iq_init(&lev->ready, &freedesc, 0);
lev->flags = flags & 0x07;
lev->U = 0;
}
/shark/tags/rel_0_2/kernel/modules/rrsoft.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rrsoft.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: rrsoft.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
63,7 → 63,6
 
/*+ Status used in the level +*/
#define RRSOFT_READY MODULE_STATUS_BASE
#define RRSOFT_DELAY MODULE_STATUS_BASE+1
#define RRSOFT_IDLE MODULE_STATUS_BASE+2
 
/*+ the level redefinition for the Round Robin level +*/
72,7 → 71,7
 
int nact[MAX_PROC]; /*+ number of pending activations +*/
 
QQUEUE ready; /*+ the ready queue +*/
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
100,7 → 99,6
 
switch (status) {
case RRSOFT_READY: return "RRSOFT_Ready";
case RRSOFT_DELAY: return "RRSOFT_Delay";
case RRSOFT_IDLE : return "RRSOFT_Idle";
default : return "RRSOFT_Unknown";
}
121,7 → 119,7
/* the task has finished the current activation and must be
reactivated */
proc_table[p].status = RRSOFT_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
 
event_need_reschedule();
}
140,25 → 138,6
}
 
 
/*+ this function is called when a task finish his delay +*/
static void RRSOFT_timer_delay(void *par)
{
PID p = (PID) par;
RRSOFT_level_des *lev;
 
lev = (RRSOFT_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RRSOFT_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RRSOFT_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
181,7 → 160,7
static void RRSOFT_level_status(LEVEL l)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->ready);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
188,7 → 167,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RRSOFT_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
211,7 → 190,7
PID p;
 
for (;;) {
p = qq_queryfirst(&lev->ready);
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
//{kern_printf("(s%d)",p); return p;}
219,8 → 198,8
// kern_printf("(p=%d l=%d avail=%d wcet =%d)\n",p,l,proc_table[p].avail_time, proc_table[p].wcet);
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
//{kern_printf("(s%d)",p); return p;}
322,14 → 301,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RRSOFT_task_dispatch(LEVEL l, PID p, int nostop)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
338,18 → 309,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
iq_extract(p, &lev->ready);
}
 
static void RRSOFT_task_epilogue(LEVEL l, PID p)
360,11 → 320,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another cuRRSOFT usec */
qq_insertfirst(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RRSOFT_READY;
}
385,7 → 345,7
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
 
/* Set the reactivation timer */
if (lev->periodic[p])
408,7 → 368,7
 
/* Insert task in the coRRSOFTect position */
proc_table[p].status = RRSOFT_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
 
static void RRSOFT_task_extract(LEVEL l, PID p)
432,7 → 392,7
ll_gettime(TIME_EXACT, &proc_table[p].request_time);
lev->nact[p]--;
// qq_insertlast(p,&lev->ready);
qq_insertfirst(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
proc_table[p].status = RRSOFT_READY;
}
else
453,7 → 413,7
 
/* then, we insert the task in the free queue */
proc_table[p].status = FREE;
q_insert(p,&freedesc);
iq_insertlast(p,&freedesc);
}
 
static void RRSOFT_task_sleep(LEVEL l, PID p)
471,59 → 431,38
proc_table[p].status = SLEEP;
}
 
static void RRSOFT_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RRSOFT_task_endcycle */
proc_table[p].status = RRSOFT_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RRSOFT_timer_delay,
(void *)p);
}
 
 
static int RRSOFT_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RRSOFT_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RRSOFT_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
603,7 → 542,6
lev->l.task_endcycle = RRSOFT_task_endcycle;
lev->l.task_end = RRSOFT_task_end;
lev->l.task_sleep = RRSOFT_task_sleep;
lev->l.task_delay = RRSOFT_task_delay;
 
lev->l.guest_create = RRSOFT_guest_create;
lev->l.guest_detach = RRSOFT_guest_detach;
615,7 → 553,6
lev->l.guest_endcycle = RRSOFT_guest_endcycle;
lev->l.guest_end = RRSOFT_guest_end;
lev->l.guest_sleep = RRSOFT_guest_sleep;
lev->l.guest_delay = RRSOFT_guest_delay;
 
/* fill the RRSOFT descriptor part */
for (i = 0; i < MAX_PROC; i++) {
626,7 → 563,7
lev->period[i] = 0;
}
 
qq_init(&lev->ready);
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RRSOFT_MINIMUM_SLICE) slice = RRSOFT_MINIMUM_SLICE;
if (slice > RRSOFT_MAXIMUM_SLICE) slice = RRSOFT_MAXIMUM_SLICE;
/shark/tags/rel_0_2/kernel/modules/ps.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ps.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: ps.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the aperiodic server PS (Polling Server)
122,7 → 122,7
int Cs; /*+ server capacity +*/
int availCs; /*+ server avail time +*/
 
QQUEUE wait; /*+ the wait queue of the PS +*/
IQUEUE wait; /*+ the wait queue of the PS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
167,8 → 167,8
was not any other task to be put in the ready queue
... we are now activating the next task */
if (lev->availCs > 0 && lev->activated == NIL) {
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
PS_activation(lev);
event_need_reschedule();
}
219,7 → 219,7
static void PS_level_status(LEVEL l)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->wait);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
onoff(lev->flags & PS_ENABLE_GUARANTEE_EDF ||
231,8 → 231,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
PS_status_to_a(proc_table[lev->activated].status));
 
241,7 → 241,7
p,
proc_table[p].name,
PS_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->wait);
}
}
 
262,7 → 262,7
if (lev->flags & PS_BACKGROUND_BLOCK)
return NIL;
else
return qq_queryfirst(&lev->wait);
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
316,14 → 316,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void PS_task_dispatch(LEVEL l, PID p, int nostop)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
335,7 → 327,7
to exe before calling task_dispatch. we have to check
lev->activated != p instead */
if (lev->activated != p) {
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
//kern_printf("#%d#",p);
}
else {
352,16 → 344,6
}
 
// kern_printf("(disp %d %d)",ty.tv_sec, ty.tv_nsec);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void PS_task_epilogue(LEVEL l, PID p)
397,7 → 379,7
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
lev->activated = NIL;
}
408,7 → 390,7
level_table[ lev->scheduling_level ]->
guest_epilogue(lev->scheduling_level,p);
} else { //kern_printf("Û2");
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
429,7 → 411,7
PS_activation(lev);
}
else {
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
}
449,7 → 431,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the PS before... */
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
 
486,18 → 468,18
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0)
{
lev->nact[p]--;
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = PS_WAIT;
}
else
proc_table[p].status = SLEEP;
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
524,9 → 506,9
guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
554,74 → 536,51
level_table[ lev->scheduling_level ]->
guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated == NIL)
lev->availCs = 0; /* see note (*) at the begin of the file */
else
PS_activation(lev);
}
static void PS_task_delay(LEVEL l, PID p, TIME usdelay)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
struct timespec ty;
TIME tx;
 
/* update the server capacity */
if (lev->flags & PS_BACKGROUND)
lev->flags &= ~PS_BACKGROUND;
else {
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int PS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void PS_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void PS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
 
694,7 → 653,6
lev->l.task_endcycle = PS_task_endcycle;
lev->l.task_end = PS_task_end;
lev->l.task_sleep = PS_task_sleep;
lev->l.task_delay = PS_task_delay;
 
lev->l.guest_create = PS_guest_create;
lev->l.guest_detach = PS_guest_detach;
706,7 → 664,6
lev->l.guest_endcycle = PS_guest_endcycle;
lev->l.guest_end = PS_guest_end;
lev->l.guest_sleep = PS_guest_sleep;
lev->l.guest_delay = PS_guest_delay;
 
/* fill the PS descriptor part */
 
718,7 → 675,7
 
lev->period = per;
 
qq_init(&lev->wait);
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/tags/rel_0_2/kernel/modules/rr.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: rr.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the scheduling module RR (Round Robin)
63,13 → 63,12
 
/*+ Status used in the level +*/
#define RR_READY MODULE_STATUS_BASE
#define RR_DELAY MODULE_STATUS_BASE+1
 
/*+ the level redefinition for the Round Robin level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
 
QQUEUE ready; /*+ the ready queue +*/
IQUEUE ready; /*+ the ready queue +*/
 
int slice; /*+ the level's time slice +*/
 
85,30 → 84,10
 
switch (status) {
case RR_READY: return "RR_Ready";
case RR_DELAY: return "RR_Delay";
default : return "RR_Unknown";
}
}
 
/*+ this function is called when a task finish his delay +*/
static void RR_timer_delay(void *par)
{
PID p = (PID) par;
RR_level_des *lev;
 
lev = (RR_level_des *)level_table[proc_table[p].task_level];
 
proc_table[p].status = RR_READY;
qq_insertlast(p,&lev->ready);
 
proc_table[p].delay_timer = NIL; /* Paranoia */
 
// kern_printf(" DELAY TIMER %d ", p);
 
event_need_reschedule();
}
 
 
static int RR_level_accept_task_model(LEVEL l, TASK_MODEL *m)
{
if (m->pclass == NRT_PCLASS || m->pclass == (NRT_PCLASS | l))
125,7 → 104,7
static void RR_level_status(LEVEL l)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->ready);
PID p = iq_query_first(&lev->ready);
 
kern_printf("Slice: %d \n", lev->slice);
 
132,7 → 111,7
while (p != NIL) {
kern_printf("Pid: %d\t Name: %20s Status: %s\n",p,proc_table[p].name,
RR_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p,&lev->ready);
}
 
for (p=0; p<MAX_PROC; p++)
155,14 → 134,14
PID p;
 
for (;;) {
p = qq_queryfirst(&lev->ready);
p = iq_query_first(&lev->ready);
if (p == -1)
return p;
 
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_extract(p,&lev->ready);
qq_insertlast(p,&lev->ready);
iq_extract(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
return p;
215,14 → 194,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void RR_task_dispatch(LEVEL l, PID p, int nostop)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
230,20 → 201,7
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
qq_extract(p, &lev->ready);
 
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds],&schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
// if (nostop) kern_printf("Û");
// kern_printf("(RR d %d)",nostop);
iq_extract(p, &lev->ready);
}
 
static void RR_task_epilogue(LEVEL l, PID p)
254,11 → 212,11
qqueue position */
if (proc_table[p].avail_time <= 0) {
proc_table[p].avail_time += proc_table[p].wcet;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
else
/* curr is >0, so the running task have to run for another curr usec */
qq_insertfirst(p,&lev->ready);
iq_insertfirst(p,&lev->ready);
 
proc_table[p].status = RR_READY;
}
276,7 → 234,7
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
 
static void RR_task_insert(LEVEL l, PID p)
288,7 → 246,7
 
/* Insert task in the correct position */
proc_table[p].status = RR_READY;
qq_insertlast(p,&lev->ready);
iq_insertlast(p,&lev->ready);
}
 
static void RR_task_extract(LEVEL l, PID p)
318,7 → 276,7
 
/* we insert the task in the free queue */
proc_table[p].status = FREE;
q_insert(p,&freedesc);
iq_insertlast(p,&freedesc);
}
 
static void RR_task_sleep(LEVEL l, PID p)
326,59 → 284,39
proc_table[p].status = SLEEP;
}
 
static void RR_task_delay(LEVEL l, PID p, TIME usdelay)
{
// RR_level_des *lev = (RR_level_des *)(level_table[l]);
struct timespec wakeuptime;
 
/* equal to RR_task_endcycle */
proc_table[p].status = RR_DELAY;
 
/* we need to delete this event if we kill the task while it is sleeping */
ll_gettime(TIME_EXACT,&wakeuptime);
ADDUSEC2TIMESPEC(usdelay,&wakeuptime);
proc_table[p].delay_timer = kern_event_post(&wakeuptime,
RR_timer_delay,
(void *)p);
}
 
 
static int RR_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void RR_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void RR_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ This init function install the "main" task +*/
456,7 → 394,6
lev->l.task_endcycle = RR_task_endcycle;
lev->l.task_end = RR_task_end;
lev->l.task_sleep = RR_task_sleep;
lev->l.task_delay = RR_task_delay;
 
lev->l.guest_create = RR_guest_create;
lev->l.guest_detach = RR_guest_detach;
468,10 → 405,9
lev->l.guest_endcycle = RR_guest_endcycle;
lev->l.guest_end = RR_guest_end;
lev->l.guest_sleep = RR_guest_sleep;
lev->l.guest_delay = RR_guest_delay;
 
/* fill the RR descriptor part */
qq_init(&lev->ready);
iq_init(&lev->ready, &freedesc, 0);
 
if (slice < RR_MINIMUM_SLICE) slice = RR_MINIMUM_SLICE;
if (slice > RR_MAXIMUM_SLICE) slice = RR_MAXIMUM_SLICE;
/shark/tags/rel_0_2/kernel/modules/sem.c
20,11 → 20,11
 
/**
------------
CVS : $Id: sem.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: sem.c,v 1.2 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the Hartik 3.3.1 Semaphore functions
79,7 → 79,7
char *name; /* a name, for named semaphores */
int index; /* an index for sem_open, containing the sem number */
int count; /* the semaphore counter */
QQUEUE blocked; /* the blocked processes queue */
IQUEUE blocked; /* the blocked processes queue */
int next; /* the semaphore queue */
BYTE used; /* 1 if the semaphore is used */
} sem_table[SEM_NSEMS_MAX];
91,7 → 91,7
int sem; /* the semaphore on whitch the process is blocked */
} sp_table[MAX_PROC];
 
static QUEUE free_sem; /* Queue of free sem */
static int free_sem; /* Queue of free sem */
 
 
 
112,7 → 112,7
task_testcancel */
 
/* extract the process from the semaphore queue... */
qq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
iq_extract(i,&sem_table[ sp_table[i].sem ].blocked);
 
l = proc_table[i].task_level;
level_table[l]->task_insert(l,i);
134,7 → 134,7
sem_table[i].name = NULL;
sem_table[i].index = i;
sem_table[i].count = 0;
qq_init(&sem_table[i].blocked);
iq_init(&sem_table[i].blocked, &freedesc, 0);
sem_table[i].next = i+1;
sem_table[i].used = 0;
}
160,7 → 160,7
free_sem = sem_table[*sem].next;
sem_table[*sem].name = NULL;
sem_table[*sem].count = value;
qq_init(&sem_table[*sem].blocked);
iq_init(&sem_table[*sem].blocked, &freedesc, 0);
sem_table[*sem].used = 1;
}
else {
254,7 → 254,7
sem_table[sem].name = kern_alloc(strlen((char *)name)+1);
strcpy(sem_table[sem].name, (char *)name);
sem_table[sem].count = j;
qq_init(&sem_table[sem].blocked);
iq_init(&sem_table[sem].blocked, &freedesc, 0);
sem_table[sem].used = 1;
kern_sti();
return &sem_table[sem].index;
378,7 → 378,7
sp_table[exec_shadow].sem = *s;
 
/* ...and put it in sem queue */
qq_insertlast(exec_shadow,&s1->blocked);
iq_insertlast(exec_shadow,&s1->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
504,7 → 504,7
sp_table[exec_shadow].sem = *s;
/* ...and put it in sem queue */
qq_insertlast(exec_shadow,&s1->blocked);
iq_insertlast(exec_shadow,&s1->blocked);
/* and finally we reschedule */
exec = exec_shadow = -1;
554,7 → 554,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
qq_extract(p,&s1->blocked);
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
579,7 → 579,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
qq_extract(p,&s1->blocked);
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
627,7 → 627,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
qq_extract(p,&s1->blocked);
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
657,7 → 657,7
s1->count -= sp_table[p].decsem;
/* Get task from blocked queue */
qq_extract(p,&s1->blocked);
iq_extract(p,&s1->blocked);
l = proc_table[p].task_level;
level_table[l]->task_insert(l,p);
695,16 → 695,16
 
kern_cli();
 
if (sem_table[*sem].blocked.first == NIL)
if (iq_isempty(&sem_table[*sem].blocked))
/* the sem is free */
*sval = sem_table[*sem].count;
else {
/* the sem is busy */
*sval = 0;
p = sem_table[*sem].blocked.first;
p = iq_query_first(&sem_table[*sem].blocked);
do {
(*sval)--;
p = proc_table[p].next;
p = iq_query_next(p, &sem_table[*sem].blocked);
} while (p != NIL);
}
 
/shark/tags/rel_0_2/kernel/modules/ss.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ss.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: ss.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
155,7 → 155,7
bandwidth_t U; /*+ the used bandwidth by the server +*/
 
QQUEUE wait; /*+ the wait queue of the SS +*/
IQUEUE wait; /*+ the wait queue of the SS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
314,7 → 314,7
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
325,7 → 325,7
else {
kern_printf("SS not active when posting R.A.\n");
SS_level_status(l);
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
401,7 → 401,7
kern_printf("SS: no more space to post replenishment\n");
kern_printf(" You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
457,7 → 457,7
/* replenish queue is empty */
kern_printf("Replenish Timer fires but no Replenish Amount defined\n");
SS_level_status(l);
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
465,8 → 465,8
}
 
if (lev->availCs > 0 && lev->activated == NIL) {
if (qq_queryfirst(&lev->wait) != NIL) {
lev->activated = qq_getfirst(&lev->wait);
if (iq_query_first(&lev->wait) != NIL) {
lev->activated = iq_getfirst(&lev->wait);
/* if server is active, replenish time already set */
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
536,7 → 536,7
void SS_level_status(LEVEL l)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->wait);
PID p = iq_query_first(&lev->wait);
 
kern_printf("On-line guarantee : %s\n",
(lev->flags & SS_ENABLE_GUARANTEE_EDF ||
554,8 → 554,8
kern_printf("Activated: Pid: %d Name: %10s Dl: %ld.%ld Nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
iq_query_timespec(lev->activated,&lev->wait)->tv_sec,
iq_query_timespec(lev->activated,&lev->wait)->tv_nsec,
lev->nact[lev->activated],
SS_status_to_a(proc_table[lev->activated].status));
 
564,7 → 564,7
p,
proc_table[p].name,
SS_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->wait);
}
}
 
593,7 → 593,7
if (lev->flags & SS_BACKGROUND_BLOCK)
return NIL;
else
return qq_queryfirst(&lev->wait);
return iq_query_first(&lev->wait);
}
 
/* The on-line guarantee is enabled only if the appropriate flag is set... */
696,7 → 696,7
to exe before calling task_dispatch.
We have to check lev->activated != p instead */
if (lev->activated != p) {
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
#ifdef DEBUG
kern_printf("extr task:%d ",p);
#endif
766,7 → 766,7
kern_printf("SS: no more space to post replenishment\n");
kern_printf("You should recompile setting higher SS_MAX_REPLENISH into include/modules/ss.h\n");
SS_level_status(l);
kern_raise(XUNVALID_SS_REPLENISH,exec_shadow);
kern_raise(XINVALID_SS_REPLENISH,exec_shadow);
#ifdef DEBUG
sys_abort(-1);
exit(-1);
779,7 → 779,7
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
lev->activated = NIL;
}
793,7 → 793,7
guest_epilogue(lev->scheduling_level,p);
}
else { /* goes into wait queue */
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
833,7 → 833,7
SS_activation(lev);
}
else {
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
}
860,7 → 860,7
 
/* when we reinsert the task into the system, the server capacity
is always 0 because nobody executes with the SS before... */
qq_insertfirst(p, &lev->wait);
iq_insertfirst(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
 
912,11 → 912,11
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
if (lev->nact[p] > 0) {
lev->nact[p]--;
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
proc_table[p].status = SS_WAIT;
}
else {
923,7 → 923,7
proc_table[p].status = SLEEP;
}
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
962,9 → 962,9
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
 
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
1004,11 → 1004,11
if (lev->activated == p)
level_table[lev->scheduling_level]->guest_end(lev->scheduling_level,p);
else
qq_extract(p, &lev->wait);
iq_extract(p, &lev->wait);
 
proc_table[p].status = SLEEP;
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL) {
SS_activation(lev);
}
1020,41 → 1020,7
}
}
 
static void SS_task_delay(LEVEL l, PID p, TIME usdelay)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
int tx;
 
#ifdef DEBUG
kern_printf("SS_tdelay ");
#endif
 
/* update the server capacity */
if (BACKGROUND_ON)
lev->flags &= ~SS_BACKGROUND;
else {
 
SUBTIMESPEC(&schedule_time, &cap_lasttime, &ty);
tx = TIMESPEC2USEC(&ty);
lev->availCs -= tx;
lev->replenish_amount += tx;
#ifdef DEBUG
kern_printf("PID:%d RA=%d ",p,lev->replenish_amount);
#endif
 
/* Here set replenish amount because delay may be too long and
replenish time could arrive */
SS_set_ra(l);
}
 
/* I hope no delay when owning a mutex... */
if (lev->activated == p)
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
/*-------------------------------------------------------------------*/
 
/*** Guest functions ***/
1063,39 → 1029,36
/* SS doesn't handles guest tasks */
 
static int SS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void SS_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void SS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
/*-------------------------------------------------------------------*/
 
/*** Registration functions ***/
1153,7 → 1116,6
lev->l.task_endcycle = SS_task_endcycle;
lev->l.task_end = SS_task_end;
lev->l.task_sleep = SS_task_sleep;
lev->l.task_delay = SS_task_delay;
 
lev->l.guest_create = SS_guest_create;
lev->l.guest_detach = SS_guest_detach;
1165,7 → 1127,6
lev->l.guest_endcycle = SS_guest_endcycle;
lev->l.guest_end = SS_guest_end;
lev->l.guest_sleep = SS_guest_sleep;
lev->l.guest_delay = SS_guest_delay;
 
/* fill the SS descriptor part */
 
1177,7 → 1138,7
 
lev->period = per;
 
qq_init(&lev->wait);
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / per) * Cs;
/shark/tags/rel_0_2/kernel/modules/tbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: tbs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: tbs.c,v 1.3 2002-11-11 08:32:07 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:07 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
84,7 → 84,7
struct timespec lastdline; /*+ the last deadline assigned to
a TBS task +*/
 
QQUEUE wait; /*+ the wait queue of the TBS +*/
IQUEUE wait; /*+ the wait queue of the TBS +*/
PID activated; /*+ the task inserted in another queue +*/
 
int flags; /*+ the init flags... +*/
206,7 → 206,7
static void TBS_level_status(LEVEL l)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
PID p = qq_queryfirst(&lev->wait);
PID p = iq_query_first(&lev->wait);
 
kern_printf("Wcet Check : %s\n",
onoff(lev->flags & TBS_ENABLE_WCET_CHECK));
221,8 → 221,8
kern_printf("Activated: Pid: %2d Name: %10s Dl: %ld.%9ld nact: %d Stat: %s\n",
lev->activated,
proc_table[lev->activated].name,
proc_table[lev->activated].timespec_priority.tv_sec,
proc_table[lev->activated].timespec_priority.tv_nsec,
iq_query_timespec(lev->activated, &lev->wait)->tv_sec,
iq_query_timespec(lev->activated, &lev->wait)->tv_nsec,
lev->nact[lev->activated],
TBS_status_to_a(proc_table[lev->activated].status));
 
231,7 → 231,7
p,
proc_table[p].name,
TBS_status_to_a(proc_table[p].status));
p = proc_table[p].next;
p = iq_query_next(p, &lev->wait);
}
}
 
288,14 → 288,6
return 0; /* if the task p is chosen, it is always eligible */
}
 
#ifdef __TEST1__
extern int testactive;
extern struct timespec s_stime[];
extern TIME s_curr[];
extern TIME s_PID[];
extern int useds;
#endif
 
static void TBS_task_dispatch(LEVEL l, PID p, int nostop)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
305,16 → 297,6
 
level_table[ lev->scheduling_level ]->
guest_dispatch(lev->scheduling_level,p,nostop);
 
#ifdef __TEST1__
if (testactive)
{
TIMESPEC_ASSIGN(&s_stime[useds], &schedule_time);
s_curr[useds] = proc_table[p].avail_time;
s_PID[useds] = p;
useds++;
}
#endif
}
 
static void TBS_task_epilogue(LEVEL l, PID p)
345,7 → 327,7
lev->lastdline.tv_sec, lev->lastdline.tv_nsec);
#endif
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
372,7 → 354,7
}
else {
proc_table[p].status = TBS_WAIT;
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
}
}
else if (lev->flag[p] & TBS_SAVE_ARRIVALS)
417,12 → 399,12
// lev->nact[p] can be >0 only if the SAVE_ARRIVALS bit is set
lev->nact[p]--;
proc_table[p].status = TBS_WAIT;
qq_insertlast(p, &lev->wait);
iq_insertlast(p, &lev->wait);
}
else
proc_table[p].status = SLEEP;
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
438,9 → 420,9
TBS_bandwidth_reclaiming(lev,p);
 
proc_table[p].status = FREE;
q_insertfirst(p,&freedesc);
iq_insertfirst(p,&freedesc);
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
}
465,57 → 447,45
 
lev->nact[p] = 0;
 
lev->activated = qq_getfirst(&lev->wait);
lev->activated = iq_getfirst(&lev->wait);
if (lev->activated != NIL)
TBS_activation(lev);
 
}
 
static void TBS_task_delay(LEVEL l, PID p, TIME usdelay)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
 
level_table[ lev->scheduling_level ]->
guest_delay(lev->scheduling_level,p,usdelay);
}
 
 
static int TBS_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void TBS_guest_detach(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_epilogue(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_activate(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_insert(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_extract(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_endcycle(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_end(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_sleep(LEVEL l, PID p)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void TBS_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/* Registration functions */
 
/*+ Registration function:
567,7 → 537,6
lev->l.task_endcycle = TBS_task_endcycle;
lev->l.task_end = TBS_task_end;
lev->l.task_sleep = TBS_task_sleep;
lev->l.task_delay = TBS_task_delay;
 
lev->l.guest_create = TBS_guest_create;
lev->l.guest_detach = TBS_guest_detach;
579,7 → 548,6
lev->l.guest_endcycle = TBS_guest_endcycle;
lev->l.guest_end = TBS_guest_end;
lev->l.guest_sleep = TBS_guest_sleep;
lev->l.guest_delay = TBS_guest_delay;
 
/* fill the TBS descriptor part */
 
590,7 → 558,7
 
NULL_TIMESPEC(&lev->lastdline);
 
qq_init(&lev->wait);
iq_init(&lev->wait, &freedesc, 0);
lev->activated = NIL;
 
lev->U = (MAX_BANDWIDTH / den) * num;
/shark/tags/rel_0_2/kernel/modules/dummy.c
20,11 → 20,11
 
/**
------------
CVS : $Id: dummy.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: dummy.c,v 1.3 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.3 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
This file contains the Dummy scheduling module
154,62 → 154,54
}
 
static void dummy_task_activate(LEVEL l, PID p)
{ kern_printf("Dummy1"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy1"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_insert(LEVEL l, PID p)
{ kern_printf("Dummy2"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy2"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_extract(LEVEL l, PID p)
{ kern_printf("Dummy3"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy3"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_endcycle(LEVEL l, PID p)
{ kern_printf("Dummy4"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy4"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_end(LEVEL l, PID p)
{ kern_printf("Dummy5"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy5"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_sleep(LEVEL l, PID p)
{ kern_printf("Dummy6"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
{ kern_printf("Dummy6"); kern_raise(XINVALID_DUMMY_OP,exec_shadow); }
 
static void dummy_task_delay(LEVEL l, PID p, TIME tickdelay)
{ kern_printf("Dummy7"); kern_raise(XUNVALID_DUMMY_OP,exec_shadow); }
 
static int dummy_guest_create(LEVEL l, PID p, TASK_MODEL *m)
{ kern_printf("Dummy8"); kern_raise(XUNVALID_GUEST,exec_shadow); return 0; }
{ kern_printf("Dummy8"); kern_raise(XINVALID_GUEST,exec_shadow); return 0; }
 
static void dummy_guest_detach(LEVEL l, PID p)
{ kern_printf("Dummy9"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummy9"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_dispatch(LEVEL l, PID p, int nostop)
{ kern_printf("Dummy0"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummy0"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_epilogue(LEVEL l, PID p)
{ kern_printf("Dummya"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummya"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_activate(LEVEL l, PID p)
{ kern_printf("Dummyb"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummyb"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_insert(LEVEL l, PID p)
{ kern_printf("Dummyc"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummyc"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_extract(LEVEL l, PID p)
{ kern_printf("Dummyd"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummyd"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_endcycle(LEVEL l, PID p)
{ kern_printf("Dummye"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummye"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_end(LEVEL l, PID p)
{ kern_printf("Dummyf"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummyf"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_sleep(LEVEL l, PID p)
{ kern_printf("Dummyg"); kern_raise(XUNVALID_GUEST,exec_shadow); }
{ kern_printf("Dummyg"); kern_raise(XINVALID_GUEST,exec_shadow); }
 
static void dummy_guest_delay(LEVEL l, PID p,DWORD tickdelay)
{ kern_printf("Dummyh"); kern_raise(XUNVALID_GUEST,exec_shadow); }
 
 
 
 
/*+ Dummy task must be present & cannot be killed; +*/
static TASK dummy()
{
298,7 → 290,6
lev->l.task_endcycle = dummy_task_endcycle;
lev->l.task_end = dummy_task_end;
lev->l.task_sleep = dummy_task_sleep;
lev->l.task_delay = dummy_task_delay;
 
lev->l.guest_create = dummy_guest_create;
lev->l.guest_detach = dummy_guest_detach;
310,7 → 301,6
lev->l.guest_endcycle = dummy_guest_endcycle;
lev->l.guest_end = dummy_guest_end;
lev->l.guest_sleep = dummy_guest_sleep;
lev->l.guest_delay = dummy_guest_delay;
 
/* the dummy process will be created at init_time.
see also dummy_level_accept_model,dummy_create */
/shark/tags/rel_0_2/kernel/modules/nop.c
20,11 → 20,11
 
/**
------------
CVS : $Id: nop.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: nop.c,v 1.2 2002-11-11 08:32:06 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-11-11 08:32:06 $
------------
 
Binary Semaphores. see nop.h for more details...
73,7 → 73,7
mutex_t structure */
typedef struct {
PID owner;
QQUEUE blocked;
IQUEUE blocked;
} NOP_mutex_t;
 
 
124,7 → 124,7
return (ENOMEM);
 
p->owner = NIL;
qq_init(&p->blocked);
iq_init(&p->blocked, &freedesc, 0);
 
m->mutexlevel = l;
m->opt = (void *)p;
192,7 → 192,7
 
/* we insert the task in the semaphore queue */
proc_table[exec_shadow].status = NOP_WAIT;
qq_insertlast(exec_shadow,&p->blocked);
iq_insertlast(exec_shadow,&p->blocked);
 
/* and finally we reschedule */
exec = exec_shadow = -1;
253,7 → 253,7
proc_table[exec_shadow].context = kern_context_save();
 
/* the mutex is mine, pop the firsttask to extract */
p->owner = qq_getfirst(&p->blocked);
p->owner = iq_getfirst(&p->blocked);
if (p->owner != NIL) {
l = proc_table[p->owner].task_level;
level_table[l]->task_insert(l,p->owner);
/shark/tags/rel_0_2/kernel/modules/trcudp.c
6,7 → 6,9
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
16,6 → 18,26
* http://shark.sssup.it
*/
 
/*
* Copyright (C) 2002 Paolo Gai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* CVS : $Id: trcudp.c,v 1.3 2002-10-28 10:11:38 pj Exp $
*/
 
#include <ll/sys/types.h>
#include <ll/stdlib.h>
 
23,73 → 45,217
#include <kernel/mem.h>
#include <kernel/log.h>
 
#include <drivers/udpip.h>
 
#include <trace/types.h>
#include <trace/trace.h>
#include <trace/queues.h>
 
#include <fs/fs.h>
//#define DEBUG_TRCUDP
 
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
#define TRCUDP_MAXEVENTS (1500/sizeof(trc_event_t))
//#define TRCUDP_MAXEVENTS 10
 
typedef struct TAGudp_queue_t {
UDP_ADDR addr;
trc_event_t evt;
} udp_queue_t;
/* Well... this file is very similar to trccirc.c! */
 
static trc_event_t *udp_get(udp_queue_t *queue)
typedef struct TAGtrcudp_queue_t {
/*+ size of the queue +*/
int size;
/*+ index of the next insertion into the queue +*/
int index;
/*+ index of the next item to write (if online_tracer activated) +*/
int windex;
/*+ number of events lost (if online_tracer activated) +*/
long hoops;
/*+ local and remote IP numbers +*/
UDP_ADDR local, remote;
/*+ unique number that identify the queue +*/
int uniq;
/*+ =1 when the system shuts down +*/
int mustgodown;
TASK_MODEL *m;
/*+ dummy, needed for creating a valid packet (dirty trick ;-) +*/
short int dummy;
/*+ events table +*/
trc_event_t table[0];
} trcudp_queue_t;
 
static TASK online_tracer(trcudp_queue_t *queue)
{
return &queue->evt;
int s; /* the socket */
int newwindex; /* new write index after sending the packet */
int n; /* number of packets to send */
short int *pkt;
 
 
s = udp_bind(&queue->local, NULL);
for (;;) {
if (queue->index<queue->windex) {
if (queue->windex+TRCUDP_MAXEVENTS < queue->size) {
newwindex = queue->windex+TRCUDP_MAXEVENTS;
n = TRCUDP_MAXEVENTS;
} else {
newwindex = 0;
n = queue->size-queue->windex;
}
} else {
if (queue->windex+TRCUDP_MAXEVENTS < queue->index) {
newwindex = queue->windex+TRCUDP_MAXEVENTS;
n = TRCUDP_MAXEVENTS;
} else {
newwindex = queue->index;
n = queue->index-queue->windex;
}
}
if (n) {
/* set the number of events into the UDP packet. It works
because the event entry before windex is always empty, or
because we use the dummy field into the struct */
pkt = ((short int *)(queue->table+queue->windex))-1;
*pkt = (short int)n;
udp_sendto(s,(char *)pkt,
n*sizeof(trc_event_t)+2,&queue->remote);
#ifdef DEBUG_TRCUDP
printk(KERN_DEBUG "UDP: SEND %d events,"
" index %d windex %d new %d!!!\n",n,
queue->index, queue->windex, newwindex);
#endif
queue->windex = newwindex;
}
if (queue->mustgodown) {
if (queue->windex == queue->index)
break;
}
else
task_endcycle();
}
 
return NULL;
}
 
static int udp_post(udp_queue_t *queue)
 
static trc_event_t *trcudp_get(trcudp_queue_t *queue)
{
//int s=0;
/* s ??? */
//udp_sendto(s,&queue->evt,sizeof(trc_event_t),&queue->addr);
if (queue->mustgodown)
return NULL;
 
if (queue->index==queue->size-1) {
if (queue->windex==0) {
queue->hoops++;
return NULL;
}
queue->index=0;
return &queue->table[queue->size-1];
}
if (queue->index+1==queue->windex) {
queue->hoops++;
return NULL;
}
return &queue->table[queue->index++];
}
 
static int trcudp_post(trcudp_queue_t *queue)
{
return 0;
}
 
static int udp_create(trc_queue_t *queue, TRC_UDP_PARMS *args)
static void trcudp_shutdown(trcudp_queue_t *queue);
 
static int trcudp_create(trc_queue_t *p, TRC_UDP_PARMS *args)
{
udp_queue_t *ptr;
trcudp_queue_t *queue;
 
if (args==NULL) return -1;
if (args==NULL) {
printk(KERN_ERR "trcudp_create: you must specify a non-NULL parameter!");
return -1;
}
ptr=(udp_queue_t*)kern_alloc(sizeof(udp_queue_t));
if (ptr==NULL) return -1;
queue->get=(trc_event_t*(*)(void*))udp_get;
queue->post=(int(*)(void*))udp_post;
queue->data=ptr;
queue=(trcudp_queue_t*)kern_alloc(sizeof(trcudp_queue_t)+
sizeof(trc_event_t)*args->size);
if (queue==NULL) {
printk(KERN_ERR "trcudp_create: error during memory allocation!");
return -1;
}
 
memcpy(&ptr->addr,&args->addr,sizeof(UDP_ADDR));
p->get=(trc_event_t*(*)(void*))trcudp_get;
p->post=(int(*)(void*))trcudp_post;
p->data=queue;
queue->size=args->size;
queue->windex=queue->index=0;
queue->hoops=0;
queue->local=args->local;
queue->remote=args->remote;
/* uniq initialized in trcudp_activate */
queue->mustgodown=0;
queue->m = args->model;
/* dummy unused */
/* AFTER exit because in that way we can hope to be back in text mode... */
sys_atrunlevel((void (*)(void *))trcudp_shutdown, (void *)queue, RUNLEVEL_AFTER_EXIT);
return 0;
}
 
static int udp_activate(udp_queue_t *queue)
static int trcudp_activate(trcudp_queue_t *queue, int uniq)
{
SOFT_TASK_MODEL model;
TASK_MODEL *m;
PID pid;
 
 
queue->uniq=uniq;
 
if (!queue->m) {
soft_task_default_model(model);
soft_task_def_system(model);
/* soft_task_def_notrace(model); Should we trace the tracer? */
soft_task_def_periodic(model);
soft_task_def_period(model,250000);
soft_task_def_met(model,10000);
soft_task_def_wcet(model,10000);
/* soft_task_def_nokill(model); NOOOOOOO!!!! */
soft_task_def_arg(model,queue);
m = (TASK_MODEL *)&model;
}
else {
m = queue->m;
task_def_arg(*m,queue);
}
 
pid=task_create("trcUDP",online_tracer,m,NULL);
if (pid==-1) {
printk(KERN_ERR "can't start tracer online trcudp trace task");
} else
task_activate(pid);
 
return 0;
}
 
static int udp_terminate(udp_queue_t *queue)
static int trcudp_terminate(trcudp_queue_t *queue)
{
queue->mustgodown = 1;
 
return 0;
}
 
static void trcudp_shutdown(trcudp_queue_t *queue)
{
printk(KERN_NOTICE "tracer: %li events lost into UDP queue %d",
queue->hoops, queue->uniq);
}
 
int trc_register_udp_queue(void)
{
int res;
res=trc_register_queuetype(TRC_UDP_QUEUE,
(int(*)(trc_queue_t*,void*))udp_create,
(int(*)(void*))udp_activate,
(int(*)(void*))udp_terminate
);
if (res!=0) printk(KERN_WARNING "can't register tracer udp queue");
(int(*)(trc_queue_t*,void*))trcudp_create,
(int(*)(void*,int))trcudp_activate,
(int(*)(void*))trcudp_terminate
);
if (res!=0) printk(KERN_WARNING "can't register tracer trcudp queue");
return res;
}
/shark/tags/rel_0_2/kernel/modules/cabs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: cabs.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: cabs.c,v 1.2 2002-10-28 07:55:54 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:54 $
------------
 
Date: 2/7/96
95,7 → 95,7
static int checkcab(CAB id)
{
if (id >= MAX_CAB) {
errno = ECAB_UNVALID_ID;
errno = ECAB_INVALID_ID;
return -1;
}
if (cabs[id].busy == TRUE) return TRUE;
117,7 → 117,7
}
cabs[MAX_CAB-1].next_cab_free = NIL;
cabs[MAX_CAB-1].busy = FALSE;
// for (i = CAB_UNVALID_MSG_NUM; i <= CAB_CLOSED; i++)
// for (i = CAB_INVALID_MSG_NUM; i <= CAB_CLOSED; i++)
// exc_set(i,cab_exception);
}
 
139,7 → 139,7
/* solleva l'eccezioni */
 
if (num_mes < 1) {
errno = ECAB_UNVALID_MSG_NUM;
errno = ECAB_INVALID_MSG_NUM;
kern_frestore(f);
return -1;
}
/shark/tags/rel_0_2/kernel/modules/srp.c
20,11 → 20,11
 
/**
------------
CVS : $Id: srp.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
CVS : $Id: srp.c,v 1.2 2002-10-28 07:55:55 pj Exp $
 
File: $File$
Revision: $Revision: 1.1.1.1 $
Last update: $Date: 2002-03-29 14:12:52 $
Revision: $Revision: 1.2 $
Last update: $Date: 2002-10-28 07:55:55 $
------------
 
Stack Resource Policy. see srp.h for general details...
595,7 → 595,7
// lev, mut->owner,
// mut->use[exec_shadow],
// lev->proc_preempt[exec_shadow].preempt,exec_shadow);
kern_raise(XSRP_UNVALID_LOCK, exec_shadow);
kern_raise(XSRP_INVALID_LOCK, exec_shadow);
kern_sti();
return (EINVAL);
}
/shark/tags/rel_0_2/kernel/modules/makefile
42,10 → 42,10
TRC_OBJ = trace.o \
trcdummy.o \
trcfixed.o \
trccirc.o
trccirc.o \
trcdfix.o \
trcudp.o
 
# trcudp.o
OBJS = $(SCHED_OBJ) $(APER_OBJ) $(RES_OBJ) $(TRC_OBJ)
 
include $(BASE)/config/lib.mk
/shark/tags/rel_0_2/kernel/modules/trace.c
38,11 → 38,11
*/
 
/*
* CVS : $Id: trace.c,v 1.1.1.1 2002-03-29 14:12:52 pj Exp $
* CVS : $Id: trace.c,v 1.2 2002-10-21 10:13:56 pj Exp $
*
* File: $File$
* Revision: $Revision: 1.1.1.1 $
* Last update: $Date: 2002-03-29 14:12:52 $
* Revision: $Revision: 1.2 $
* Last update: $Date: 2002-10-21 10:13:56 $
*/
 
#include <ll/sys/types.h>
58,6 → 58,7
 
#include <bits/limits.h>
 
/* maximum number of different queues where we want to log our events */
#define TRC_MAXQUEUES 5
 
/*
64,8 → 65,11
*
*/
 
/* this is the base path that is used as a prologue for all the
filenames that are passed to the tracer */
static char basepath[PATH_MAX];
 
/* used to create the name for a tracer file */
void trc_create_name(char *basename, int uniq, char *pathname)
{
if (uniq) sprintf(pathname,"%s/%s%i",basepath,basename,uniq);
76,27 → 80,42
*
*/
 
/* the flag used to discriminate if an event have to be traced or not */
#define FLAG_NOTRACE 0x01
 
typedef struct TAGtrc_evtinfo_t {
trc_queue_t *queue;
unsigned flags;
trc_queue_t *queue; /* the queue responsible for the logging of an event */
unsigned flags; /* if = FLAG_NOTRACE the event must not be logged */
} trc_evtinfo_t;
 
/* -- */
 
/* one entry for each event; this array says for each event the queue to use
and if it must be logged */
trc_evtinfo_t eventstable[TRC_NUMEVENTS];
 
/* For each kind of queue (see include/tracer/queues.h) there is a set of
pointers to the functions that a queue should implement */
int (*createqueue[TRC_QUEUETYPESNUMBER])(trc_queue_t *, void *);
int (*activatequeue[TRC_QUEUETYPESNUMBER])(void *,int);
int (*terminatequeue[TRC_QUEUETYPESNUMBER])(void *);
 
/* for each queue registered in the system,
the functions used to get/post an event
The elements of this table are initialized with calls to createqueue[type]()
(see include/trace/queues.h) */
trc_queue_t queuetable[TRC_MAXQUEUES];
 
/* initialized as a dummy queue, the default value of all the queues */
trc_queue_t queuesink;
 
/* number of registered queues in the system */
int numqueues;
 
/* -- */
 
/* The Dummy queue */
 
static trc_event_t *dummy_get(void *foo)
{
return NULL;
127,6 → 146,8
 
/* -- */
 
/* this function simply register the functions that are used to
handle a queue */
int trc_register_queuetype(int queuetype,
int(*creat)(trc_queue_t *, void *),
int(*activate)(void *,int),
139,6 → 160,11
return 0;
}
 
/* this function register a queue in the system.
It uses the type to access to the queue handling functions registered
with the previous function (trc_register_queuetype)
numqueue is incremented!
*/
int trc_create_queue(int queuetype, void *args)
{
int res;
186,20 → 212,28
printk(KERN_INFO "initializing tracer...");
/* all the queues are initialized to the dummy queue (sink!) */
for (i=0;i<TRC_QUEUETYPESNUMBER;i++) {
createqueue[i]=dummy_createqueue;
terminatequeue[i]=dummy_terminatequeue;
}
/* the sink queue is initialized */
dummy_createqueue(&queuesink,NULL);
/* no queues registered yet */
numqueues=0;
/* all the events are initialized to put to the sink queue */
for (i=0;i<TRC_NUMEVENTS;i++) {
eventstable[i].queue=&queuesink;
eventstable[i].flags=FLAG_NOTRACE;
}
/* this will end the tracer at shutdown */
i=sys_atrunlevel(trc_end,NULL,RUNLEVEL_SHUTDOWN);
 
/* initialize the parameters if not initialized */
{
TRC_PARMS m;
trc_default_parms(m);
212,10 → 246,13
trc_suspend=internal_trc_suspend;
trc_resume=internal_trc_resume;
/* start the tracer */
trc_resume();
return 0;
}
 
/* this function simply activates all the registered queues.
This is usually called into the init() tasks!!! */
int TRC_init_phase2(void)
{
int i;
224,6 → 261,8
return 0;
}
 
/* saves the current logevent function and set it as
the internal_trc_logevent */
static int internal_trc_resume(void)
{
SYS_FLAGS f;
238,6 → 277,8
return ret;
}
 
/* restores the saved logevent function (initially, the logevent function is
a dummy function) */
static int internal_trc_suspend(void)
{
SYS_FLAGS f;
258,8 → 299,10
trc_queue_t *queue;
SYS_FLAGS f;
 
/* disables interrupts (this function can be called also into a task */
f=kern_fsave();
 
/* check if the event has to be logged */
if (eventstable[event].flags&FLAG_NOTRACE) {
kern_frestore(f);
return;
266,6 → 309,7
}
queue=eventstable[event].queue;
/* gets a free event descriptor, fills it and post it */
evt=queue->get(queue->data);
if (evt!=NULL) {
evt->event=event;
283,6 → 327,10
*
*/
 
/* these set of functions can be used to trace or not single event and classes.
They make use of the classtable structure, that is used to discriminate
the indexes occupied by every class */
 
int classtable[TRC_NUMCLASSES+1]={
TRC_F_TRACER,
TRC_F_SYSTEM,
353,21 → 401,29
{
int qf,qc;
int res;
 
/* initialize the trace */
res=TRC_init_phase1(NULL);
if (res) return res;
 
/* register two kinds of queues, fixed and circular */
res=trc_register_circular_queue();
if (res) return res;
res=trc_register_fixed_queue();
if (res) return res;
 
/* creates two queues:
a circular queue for the system events,
a fixed queue
*/
qc=trc_create_queue(TRC_CIRCULAR_QUEUE,NULL);
qf=trc_create_queue(TRC_FIXED_QUEUE,NULL);
if (qc==-1||qf==-1) return -97;
 
/* We want to trace all the system events */
res=trc_trace_class(TRC_CLASS_SYSTEM);
if (res) return res;
/* All the system events must be traced into the circular queue */
res=trc_assign_class_to_queue(TRC_CLASS_SYSTEM,qc);
if (res) return res;
 
/shark/tags/rel_0_2/kernel/modules/trcfixed.c
33,6 → 33,13
#include <fcntl.h>
#include <limits.h>
 
/* this file implement a fixed queue, that is simply an array that
is filled with the events until it is full. After that, all the other
events are discarded. */
 
 
 
 
typedef struct TAGfixed_queue_t {
int size;
int index;
39,9 → 46,13
char *filename;
int uniq;
trc_event_t table[0];
trc_event_t table[0];
/* Yes, 0!... the elements are allocated
in a dirty way into the kern_alloc into fixed_create */
} fixed_queue_t;
 
/* This function simply return an event to fill (only if the fixed table
is not yet full) */
static trc_event_t *fixed_get(fixed_queue_t *queue)
{
if (queue->index>=queue->size) return NULL;
48,6 → 59,8
return &queue->table[queue->index++];
}
 
/* since get returns the correct event address,
the post function does nothing... */
static int fixed_post(fixed_queue_t *queue)
{
return 0;
60,6 → 73,7
{
fixed_queue_t *ptr;
 
/* initialize the default arguments for the fixed queue */
if (!once) {
/* well... this func is called when the system is not running! */
once=1;
67,11 → 81,12
}
if (args==NULL) args=&defaultargs;
/* allocate the fixed queue data structure plus the array of events */
ptr=(fixed_queue_t*)kern_alloc(sizeof(fixed_queue_t)+
sizeof(trc_event_t)*(args->size+1));
if (ptr==NULL) return -1;
 
/* set the current queue pointers and data */
queue->get=(trc_event_t*(*)(void*))fixed_get;
queue->post=(int(*)(void*))fixed_post;
queue->data=ptr;
/shark/tags/rel_0_2/kernel/modules/trcdfix.c
0,0 → 1,152
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
#include <ll/sys/types.h>
#include <ll/stdlib.h>
 
#include <kernel/func.h>
#include <kernel/mem.h>
#include <kernel/log.h>
 
#include <trace/types.h>
#include <trace/trace.h>
#include <trace/queues.h>
 
#include <ll/i386/x-dos.h>
 
/* this file implement a fixed queue, that is simply an array that
is filled with the events until it is full. After that, all the other
events are discarded. It uses the DOSFS Filesystem to write all the data
 
This file is derived from the trcfixed.c file; I used a different file
because including trcfixed.c in the executable would have implied the
linking of all the filesystem...
*/
 
 
 
 
typedef struct TAGfixed_queue_t {
int size;
int index;
char *filename;
int uniq;
trc_event_t table[0];
/* Yes, 0!... the elements are allocated
in a dirty way into the kern_alloc into fixed_create */
} dosfs_fixed_queue_t;
 
/* This function simply return an event to fill (only if the fixed table
is not yet full) */
static trc_event_t *dosfs_fixed_get(dosfs_fixed_queue_t *queue)
{
if (queue->index>=queue->size) return NULL;
return &queue->table[queue->index++];
}
 
/* since get returns the correct event address,
the post function does nothing... */
static int dosfs_fixed_post(dosfs_fixed_queue_t *queue)
{
return 0;
}
 
static TRC_FIXED_PARMS defaultargs;
static int once=0;
 
static void dosfs_fixed_flush(void *arg);
 
static int dosfs_fixed_create(trc_queue_t *queue, TRC_FIXED_PARMS *args)
{
dosfs_fixed_queue_t *ptr;
 
/* initialize the default arguments for the fixed queue */
if (!once) {
/* well... this func is called when the system is not running! */
once=1;
trc_fixed_default_parms(defaultargs);
}
if (args==NULL) args=&defaultargs;
/* allocate the fixed queue data structure plus the array of events */
ptr=(dosfs_fixed_queue_t*)kern_alloc(sizeof(dosfs_fixed_queue_t)+
sizeof(trc_event_t)*(args->size+1));
if (ptr==NULL) return -1;
 
/* set the current queue pointers and data */
queue->get=(trc_event_t*(*)(void*))dosfs_fixed_get;
queue->post=(int(*)(void*))dosfs_fixed_post;
queue->data=ptr;
 
ptr->size=args->size;
ptr->index=0;
ptr->filename=args->filename;
 
/* prepare for shutdown ;-) */
sys_atrunlevel(dosfs_fixed_flush, (void *)ptr, RUNLEVEL_AFTER_EXIT);
 
return 0;
}
 
static void dosfs_fixed_flush(void *arg)
{
DOS_FILE *f;
dosfs_fixed_queue_t *queue = (dosfs_fixed_queue_t *)arg;
char pathname[100]; /* it should be PATH_MAX, but we do not use the
filesystem, so the symbol is not defined */
 
if (queue->filename==NULL) trc_create_name("fix",queue->uniq,pathname);
else trc_create_name(queue->filename,0,pathname);
 
printk(KERN_DEBUG "tracer flush index= %d pathname=%s\n",
queue->index, pathname);
 
f = DOS_fopen(pathname,"w");
 
DOS_fwrite(queue->table,1,queue->index*sizeof(trc_event_t),f);
 
DOS_fclose(f);
 
}
 
static int dosfs_fixed_activate(dosfs_fixed_queue_t *queue, int uniq)
{
queue->uniq=uniq;
return 0;
}
 
static int dosfs_fixed_terminate(dosfs_fixed_queue_t *queue)
{
return 0;
}
 
int trc_register_dosfs_fixed_queue(void)
{
int res;
res=trc_register_queuetype(TRC_DOSFS_FIXED_QUEUE,
(int(*)(trc_queue_t*,void*))dosfs_fixed_create,
(int(*)(void*,int))dosfs_fixed_activate,
(int(*)(void*))dosfs_fixed_terminate
);
if (res!=0) printk(KERN_WARNING "can't register tracer DOSFS fixed queue");
return res;
}