Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 656 → Rev 657

/shark/trunk/kernel/activate.c
18,11 → 18,11
 
/**
------------
CVS : $Id: activate.c,v 1.7 2004-03-10 14:51:42 giacomo Exp $
CVS : $Id: activate.c,v 1.8 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:42 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
task_activate & group_activate
70,6 → 70,7
int task_activate(PID p)
{
LEVEL l; /* the level of the task p */
struct timespec t;
 
/* some controls on the task p */
if (p<0 || p>=MAX_PROC) {
81,6 → 82,8
return -1;
}
 
kern_gettime(&t);
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
91,7 → 94,7
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
}
kern_frestore(f);
return 0;
106,7 → 109,7
proc_table[p].frozen_activations++;
else {
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
event_need_reschedule();
}
kern_frestore(f);
120,7 → 123,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[p].context,0);
l = proc_table[p].task_level;
level_table[l]->public_activate(l,p);
level_table[l]->public_activate(l,p,&t);
/* Preempt if necessary */
scheduler();
140,6 → 143,7
{
PID i; /* a counter */
register LEVEL l; /* a level value */
struct timespec t;
 
if (g == 0) {
errno = EINVALID_GROUP;
146,6 → 150,8
return -1;
}
 
kern_gettime(&t);
 
/*+ if we are calling the runlevel functions the system is
into the global_context... we only have to call
the task_activate of the level +*/
162,7 → 168,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
}
 
kern_frestore(f);
181,7 → 187,7
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
event_need_reschedule();
}
kern_frestore(f);
196,7 → 202,7
continue;
}
l = proc_table[i].task_level;
level_table[l]->public_activate(l,i);
level_table[l]->public_activate(l,i,&t);
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_activate,(unsigned short int)proc_table[i].context,0);
}
/shark/trunk/kernel/init.c
18,11 → 18,11
 
/**
------------
CVS : $Id: init.c,v 1.3 2003-04-16 17:18:15 giacomo Exp $
CVS : $Id: init.c,v 1.4 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2003-04-16 17:18:15 $
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
- Kernel module registration functions
226,7 → 226,7
(int (*)(LEVEL,PID)) level_return0, /* public_eligible */
(void (*)(LEVEL,PID, int)) level_excfunc, /* public_dispatch */
(void (*)(LEVEL,PID)) level_excfunc, /* public_epilogue */
(void (*)(LEVEL,PID)) level_excfunc, /* public_activate */
(void (*)(LEVEL,PID,struct timespec *))level_excfunc, /* public_activate */
(void (*)(LEVEL,PID)) level_excfunc, /* public_unblock */
(void (*)(LEVEL,PID)) level_excfunc, /* public_block */
(int (*)(LEVEL,PID,void *)) level_excfunc, /* public_message */
/shark/trunk/kernel/modules/edf.c
20,11 → 20,11
 
/**
------------
CVS : $Id: edf.c,v 1.13 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: edf.c,v 1.14 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.13 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.14 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the scheduling module EDF (Earliest Deadline First)
60,104 → 60,180
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
//#define EDF_DEBUG
#define edf_printf kern_printf
 
/*+ Status used in the level +*/
#define EDF_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define EDF_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define EDF_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define EDF_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define EDF_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
#ifdef EDF_DEBUG
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/*+ flags +*/
#define EDF_FLAG_SPORADIC 1
#define EDF_FLAG_NORAISEEXC 2
#define EDF_FLAG_SLEEP 4
/* statuses used in the level */
#define EDF_READY MODULE_STATUS_BASE /* ready */
#define EDF_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define EDF_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define EDF_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
 
/*+ the level redefinition for the Earliest Deadline First level +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
/* task flags */
#define EDF_FLAG_SPORADIC 1 /* the task is sporadic */
#define EDF_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
/* the level redefinition for the Earliest Deadline First level */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
 
IQUEUE ready; /*+ the ready queue +*/
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
struct timespec adeadline[MAX_PROC]; /* latest assigned deadline
(needed to correctly assign deadlines to queued activations) */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
} EDF_level_des;
 
int flags; /*+ the init flags... +*/
 
bandwidth_t U; /*+ the used bandwidth +*/
static void EDF_timer_endperiod(void *par);
 
} EDF_level_des;
 
/* This function is called when a task misses its deadline */
 
static void EDF_timer_deadline(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
struct timespec *temp;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
}
}
 
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
 
static void EDF_intern_release(PID p, EDF_level_des *lev)
{
struct timespec temp;
 
/* post deadline timer */
if (lev->flags & EDF_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,EDF_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == EDF_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = EDF_READY;
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
iq_timespec_insert(p,&lev->ready);
#ifdef EDF_DEBUG
edf_printf("At %s: releasing %s with deadline %s\n", pnow(),
proc_table[p].name, ptime1(&lev->adeadline[p]));
#endif
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
/* post end of period timer */
lev->eop_timer[p] = kern_event_post(&lev->release[p],
EDF_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
}
 
 
/* Release after an offset */
 
static void EDF_timer_offset(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
#ifdef EDF_DEBUG
edf_printf("(EDF:Dl TIMER:%d)",p);
#endif
EDF_intern_release(p, lev);
}
 
switch (proc_table[p].status) {
case EDF_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
 
case EDF_IDLE:
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
/* similar to EDF_task_activate */
temp = iq_query_timespec(p,&lev->ready);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
event_need_reschedule();
break;
/* This function is called at the end of the period */
 
case EDF_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
static void EDF_timer_endperiod(void *par)
{
PID p = (PID) par;
EDF_level_des *lev;
lev = (EDF_level_des *)level_table[proc_table[p].task_level];
 
/* Reset the EDF_FLAG_SLEEP */
lev->flag[p] &= ~EDF_FLAG_SLEEP;
lev->eop_timer[p] = -1;
 
break;
if (proc_table[p].status == EDF_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
 
default:
/* else, a deadline miss occurred!!! */
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
if (proc_table[p].status == EDF_WAIT) {
proc_table[p].status = SLEEP;
return;
}
if (!(lev->taskflags[p] & EDF_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
EDF_intern_release(p, lev);
} else {
/* the sporadic task is still busy. mark it as late */
lev->taskflags[p] |= EDF_FLAG_SPOR_LATE;
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void EDF_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
#ifdef EDF_DEBUG
edf_printf("(EDF:AAARRRGGGHHH!!!)");
#endif
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
191,57 → 267,68
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
if (!h->drel) {
lev->rdeadline[p] = h->mit;
} else {
lev->rdeadline[p] = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & EDF_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
else
} else {
return -1;
}
}
 
/* now we know that m is a valid model */
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
lev->flags |= EDF_ENABLE_WCET_CHECK;
}
if (lev->flags & EDF_ENABLE_DL_EXCEPTION) {
lev->flags |= EDF_ENABLE_DL_CHECK;
}
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubCrt:%d)", p);
#endif
 
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->flag[p] = 0;
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->flag[p] |= EDF_FLAG_SPORADIC;
lev->taskflags[p] |= EDF_FLAG_SPORADIC;
lev->deadline_timer[p] = -1;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void EDF_public_detach(LEVEL l, PID p)
{
/* the EDF level doesn't introduce any dinamic allocated new field.
we have only to decrement the allocated bandwidth */
 
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubDet:%d)", p);
#endif
 
if (lev->flags & EDF_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
248,14 → 335,6
static void EDF_public_dispatch(LEVEL l, PID p, int nostop)
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubDsp:%d)",p);
#endif
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
263,65 → 342,67
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubEpi:%d)",p);
#endif
 
/* check if the wcet is finished... */
if ((lev->flags & EDF_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = EDF_WCET_VIOLATED;
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
if (lev->flags & EDF_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
}
}
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
}
 
/* the task returns to the ready queue */
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
 
}
 
static void EDF_public_activate(LEVEL l, PID p)
static void EDF_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
struct timespec *temp;
 
#ifdef EDF_DEBUG
edf_printf("(EDF:PubAct:%d)", p);
#endif
kern_gettime(&clocktime);
 
if (lev->flag[p] & EDF_FLAG_SLEEP) {
lev->flag[p] &= ~EDF_FLAG_SLEEP;
if (!(lev->flag[p] & EDF_FLAG_SPORADIC))
proc_table[p].status = EDF_IDLE;
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & EDF_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef EDF_DEBUG
edf_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
#endif
lev->nskip[p]++;
}
return;
}
 
if (proc_table[p].status == EDF_WAIT) {
kern_raise(XACTIVATION,p);
return;
}
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != EDF_WCET_VIOLATED)
return;
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
 
/* set the absolute deadline to the activation time + offset + rdeadline */
lev->adeadline[p] = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &lev->adeadline[p]);
 
/* see also EDF_timer_deadline */
temp = iq_query_timespec(p, &lev->ready);
kern_gettime(temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
/* Check if release > clocktime. If so, release it later,
otherwise release it now. */
 
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_IDLE;
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
EDF_timer_deadline,
(void *)p);
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],EDF_timer_offset,(void *)p);
} else {
/* release now */
EDF_intern_release(p, lev);
}
}
 
static void EDF_public_unblock(LEVEL l, PID p)
328,10 → 409,7
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* Similar to EDF_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the coEDFect position */
/* Insert task in the correct position */
proc_table[p].status = EDF_READY;
iq_timespec_insert(p,&lev->ready);
}
353,53 → 431,86
{
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* task_message evaluation */
switch((long)(m)) {
 
/* task_endcycle */
case (long)(NULL):
 
#ifdef EDF_DEBUG
edf_printf("(EDF:EndCyc:%d)",p);
#endif
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (!(lev->flag[p] & EDF_FLAG_SPORADIC) &&
!(lev->flag[p] & EDF_FLAG_SLEEP))
proc_table[p].status = EDF_IDLE;
else
proc_table[p].status = EDF_WAIT;
 
/* we reset the capacity counters... */
if (lev->flags & EDF_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
/* task_disable */
case 1:
 
#ifdef EDF_DEBUG
edf_printf("(EDF:Dis:%d)",p);
#endif
 
/* Set the EDF_FLAG_SLEEP, in the next endcycle the task will
be set in EDF_WAIT */
lev->flag[p] |= EDF_FLAG_SLEEP;
 
/* If the task is EDF_IDLE, set to EDF_WAIT now */
if (proc_table[p].status == EDF_IDLE)
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = EDF_IDLE;
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
/* compute and assign absolute deadline */
*iq_query_timespec(p,&lev->ready) = lev->adeadline[p];
iq_timespec_insert(p,&lev->ready);
/* increase assigned deadline */
ADDUSEC2TIMESPEC(lev->period[p], &lev->adeadline[p]);
#ifdef EDF_DEBUG
edf_printf("(Late) At %s: releasing %s with deadline %s\n",
pnow(),proc_table[p].name,ptime1(&lev->adeadline[p]));
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & EDF_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
proc_table[p].status = EDF_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~EDF_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
/* we are on time. go to the EDF_WAIT state */
proc_table[p].status = EDF_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
}
}
}
break;
}
 
TRACER_LOGEVENT(FTrace_EVT_task_disable,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
break;
 
if (lev->flags & EDF_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
 
proc_table[p].avail_time = proc_table[p].wcet;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
return 0;
 
}
406,10 → 517,22
 
static void EDF_public_end(LEVEL l, PID p)
{
proc_table[p].status = EDF_ZOMBIE;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
if (!(lev->taskflags[p] & EDF_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
proc_table[p].status = EDF_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
static void EDF_private_insert(LEVEL l, PID p, TASK_MODEL *m)
429,18 → 552,13
iq_timespec_insert(p,&lev->ready);
proc_table[p].status = EDF_READY;
lev->deadline_timer[p] = -1;
lev->dl_timer[p] = -1;
 
lev->period[p] = job->period;
 
/* Set the deadline timer */
if (job->noraiseexc)
lev->flag[p] = EDF_FLAG_NORAISEEXC;
else {
lev->flag[p] = 0;
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,
(void *)p);
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
EDF_timer_guest_deadline,(void *)p);
}
}
 
471,18 → 589,16
iq_extract(p, &lev->ready);
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
kern_event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
 
}
 
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see edf.h +*/
/* Registration function:
int flags the init flags ... see edf.h */
LEVEL EDF_register_level(int flags)
{
LEVEL l; /* the level that we register */
520,9 → 636,14
 
/* fill the EDF descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->eop_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
539,3 → 660,35
return lev->U;
}
 
int EDF_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return lev->nact[p];
}
 
int EDF_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->dl_miss[p];
}
 
int EDF_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
}
 
int EDF_get_nskip(PID p)
{
LEVEL l = proc_table[p].task_level;
EDF_level_des *lev = (EDF_level_des *)(level_table[l]);
return lev->nskip[p];
}
 
/shark/trunk/kernel/modules/posix.c
20,11 → 20,11
 
/**
------------
CVS : $Id: posix.c,v 1.8 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: posix.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module compatible with POSIX
210,7 → 210,7
proc_table[p].status = POSIX_READY;
}
 
static void POSIX_public_activate(LEVEL l, PID p)
static void POSIX_public_activate(LEVEL l, PID p, struct timespec *t)
{
POSIX_level_des *lev = (POSIX_level_des *)(level_table[l]);
 
311,7 → 311,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
POSIX_public_activate(lev,p);
POSIX_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/srp.c
20,11 → 20,11
 
/**
------------
CVS : $Id: srp.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: srp.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
Stack Resource Policy. see srp.h for general details...
707,8 → 707,10
 
/* activate the task if it was activated while in lobby list! */
if (task_unblock_activation(x)) {
struct timespec t;
LEVEL sl = proc_table[x].task_level;
level_table[sl]->public_activate(sl,x);
kern_gettime(&t);
level_table[sl]->public_activate(sl,x,&t);
// kern_printf("activate it!!!");
}
}
/shark/trunk/kernel/modules/rr2.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr2.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rr2.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RR2 (Round Robin) version 2
167,7 → 167,7
proc_table[p].status = RR2_READY;
}
 
static void RR2_public_activate(LEVEL l, PID p)
static void RR2_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR2_level_des *lev = (RR2_level_des *)(level_table[l]);
 
265,7 → 265,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RR2_public_activate(lev,p);
RR2_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/hardcbs.c
358,10 → 358,9
private_epilogue(lev->scheduling_level,p);
}
 
static void HCBS_public_activate(LEVEL l, PID p)
static void HCBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
HCBS_level_des *lev = (HCBS_level_des *)(level_table[l]);
struct timespec t;
 
if (lev->flag[p] & HCBS_SLEEP) {
lev->flag[p] &= ~HCBS_SLEEP;
375,10 → 374,8
return;
}
 
kern_gettime(&t);
HCBS_activation(lev, p, t);
 
HCBS_activation(lev, p, &t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & HCBS_APERIODIC))
{
386,7 → 383,7
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t);
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
/shark/trunk/kernel/modules/ds.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ds.c,v 1.7 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: ds.c,v 1.8 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the aperiodic server DS (Deferrable Server)
278,7 → 278,7
}
}
 
static void DS_public_activate(LEVEL l, PID p)
static void DS_public_activate(LEVEL l, PID p, struct timespec *t)
{
DS_level_des *lev = (DS_level_des *)(level_table[l]);
 
/shark/trunk/kernel/modules/cbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: cbs.c,v 1.10 2004-03-10 14:51:43 giacomo Exp $
CVS : $Id: cbs.c,v 1.11 2004-05-17 15:03:51 anton Exp $
 
File: $File$
Revision: $Revision: 1.10 $
Last update: $Date: 2004-03-10 14:51:43 $
Revision: $Revision: 1.11 $
Last update: $Date: 2004-05-17 15:03:51 $
------------
 
This file contains the aperiodic server CBS (Total Bandwidth Server)
404,10 → 404,9
private_epilogue(lev->scheduling_level,p);
}
 
static void CBS_public_activate(LEVEL l, PID p)
static void CBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
CBS_level_des *lev = (CBS_level_des *)(level_table[l]);
struct timespec t;
 
if (lev->flag[p] & CBS_SLEEP) {
lev->flag[p] &= ~CBS_SLEEP;
421,10 → 420,8
return;
}
 
kern_gettime(&t);
CBS_activation(lev, p, t);
 
CBS_activation(lev, p, &t);
 
/* Set the reactivation timer */
if (!(lev->flag[p] & CBS_APERIODIC))
{
432,7 → 429,7
the deadline may be != from actual_time + period
(if we call the task_activate after a task_sleep, and the
deadline was postponed a lot...) */
TIMESPEC_ASSIGN(&lev->reactivation_time[p], &t);
TIMESPEC_ASSIGN(&lev->reactivation_time[p], t);
ADDUSEC2TIMESPEC(lev->period[p], &lev->reactivation_time[p]);
// TIMESPEC_ASSIGN(&lev->reactivation_time[p], &lev->cbs_dline[p]);
lev->reactivation_timer[p] = kern_event_post(&lev->reactivation_time[p],
/shark/trunk/kernel/modules/rm.c
20,24 → 20,17
 
/**
------------
CVS : $Id: rm.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rm.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RM (Rate Monotonic)
This file contains the scheduling module RM (rate/deadline monotonic)
 
Read rm.h for further details.
 
This file is equal to EDF.c except for:
 
. EDF changed to RM :-)
. q_timespec_insert changed to q_insert
. proc_table[p].priority is also modified when we modify lev->period[p]
 
 
**/
 
/*
67,91 → 60,174
#include <kernel/descr.h>
#include <kernel/var.h>
#include <kernel/func.h>
 
#include <tracer.h>
 
/*+ Status used in the level +*/
#define RM_READY MODULE_STATUS_BASE /*+ - Ready status +*/
#define RM_WCET_VIOLATED MODULE_STATUS_BASE+2 /*+ when wcet is finished +*/
#define RM_WAIT MODULE_STATUS_BASE+3 /*+ to wait the deadline +*/
#define RM_IDLE MODULE_STATUS_BASE+4 /*+ to wait the deadline +*/
#define RM_ZOMBIE MODULE_STATUS_BASE+5 /*+ to wait the free time +*/
//#define RM_DEBUG
#define rm_printf kern_printf
 
/*+ flags +*/
#define RM_FLAG_SPORADIC 1
#define RM_FLAG_NORAISEEXC 2
#ifdef RM_DEBUG
/* some debug print functions */
char *pnow() {
static char buf[40];
struct timespec t;
sys_gettime(&t);
sprintf(buf, "%ld.%06ld", t.tv_sec, t.tv_nsec/1000);
return buf;
}
char *ptime1(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
char *ptime2(struct timespec *t) {
static char buf[40];
sprintf(buf, "%ld.%06ld", t->tv_sec, t->tv_nsec/1000);
return buf;
}
#endif
 
/*+ the level redefinition for the Rate Monotonic +*/
typedef struct {
level_des l; /*+ the standard level descriptor +*/
/* statuses used in the level */
#define RM_READY MODULE_STATUS_BASE /* ready */
#define RM_IDLE MODULE_STATUS_BASE+1 /* idle, waiting for offset/eop */
#define RM_WAIT MODULE_STATUS_BASE+2 /* to sleep, waiting for eop */
#define RM_ZOMBIE MODULE_STATUS_BASE+3 /* zombie, waiting for eop */
 
TIME period[MAX_PROC]; /*+ The task periods; the deadlines are
stored in the priority field +*/
int deadline_timer[MAX_PROC];
/*+ The task deadline timers +*/
/* task flags */
#define RM_FLAG_SPORADIC 1 /* the task is sporadic */
#define RM_FLAG_SPOR_LATE 2 /* sporadic task with period overrun */
 
int flag[MAX_PROC];
/*+ used to manage the JOB_TASK_MODEL and the
periodicity +*/
 
IQUEUE ready; /*+ the ready queue +*/
/* the level redefinition for the Earliest Deadline First level */
typedef struct {
level_des l; /* standard level descriptor */
IQUEUE ready; /* the ready queue */
int flags; /* level flags */
bandwidth_t U; /* used bandwidth */
 
int flags; /*+ the init flags... +*/
int taskflags[MAX_PROC]; /* task flags */
TIME period[MAX_PROC]; /* task period */
TIME rdeadline[MAX_PROC]; /* task relative deadlines */
TIME offset[MAX_PROC]; /* task release offsets */
struct timespec release[MAX_PROC]; /* release time of the task */
int dl_timer[MAX_PROC]; /* deadline overrun timer */
int eop_timer[MAX_PROC]; /* end of period timer */
int dl_miss[MAX_PROC]; /* deadline miss counter */
int wcet_miss[MAX_PROC]; /* WCET miss counter */
int nact[MAX_PROC]; /* number of pending periodic jobs */
int nskip[MAX_PROC]; /* number of skipped sporadic jobs */
} RM_level_des;
 
bandwidth_t U; /*+ the used bandwidth +*/
 
} RM_level_des;
static void RM_timer_endperiod(void *par);
 
 
/* This function is called when a task misses its deadline */
 
static void RM_timer_deadline(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
struct timespec *temp;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
 
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
kern_raise(XDEADLINE_MISS,p);
} else {
lev->dl_miss[p]++;
}
}
 
 
/* Release (or queue) task, post deadline and endperiod timers.
The release time is stored in lev->release[p]. */
 
static void RM_intern_release(PID p, RM_level_des *lev)
{
struct timespec temp;
 
/* post deadline timer */
if (lev->flags & RM_ENABLE_DL_CHECK) {
temp = lev->release[p];
ADDUSEC2TIMESPEC(lev->rdeadline[p], &temp);
lev->dl_timer[p] = kern_event_post(&temp,RM_timer_deadline,(void *)p);
}
 
/* release or queue next job */
if (proc_table[p].status == RM_IDLE) {
/* assign deadline, insert task in the ready queue */
proc_table[p].status = RM_READY;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("At %s: releasing %s\n", pnow(), proc_table[p].name);
#endif
/* reschedule */
event_need_reschedule();
} else {
/* queue */
lev->nact[p]++;
}
 
/* increase release time */
ADDUSEC2TIMESPEC(lev->period[p],&lev->release[p]);
/* post end of period timer */
kern_event_post(&lev->release[p],RM_timer_endperiod,(void *)p);
 
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
}
 
 
/* First release */
 
static void RM_timer_offset(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
switch (proc_table[p].status) {
case RM_ZOMBIE:
/* we finally put the task in the ready queue */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* and free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->period[p]) * proc_table[p].wcet;
break;
RM_intern_release(p, lev);
}
 
case RM_IDLE:
/* tracer stuff */
TRACER_LOGEVENT(FTrace_EVT_task_timer,(unsigned short int)proc_table[p].context,(unsigned int)proc_table[p].task_level);
/* similar to RM_task_activate */
temp = iq_query_timespec(p, &lev->ready);
ADDUSEC2TIMESPEC(lev->period[p], temp);
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
//printk("(d%d idle priority set to %d)",p,proc_table[p].priority );
event_need_reschedule();
break;
 
case RM_WAIT:
/* Without this, the task cannot be reactivated!!! */
proc_table[p].status = SLEEP;
break;
/* This function is called at the end of the period */
 
default:
/* else, a deadline miss occurred!!! */
kern_printf("timer_deadline:AAARRRGGGHHH!!!");
kern_raise(XDEADLINE_MISS,p);
static void RM_timer_endperiod(void *par)
{
PID p = (PID) par;
RM_level_des *lev;
lev = (RM_level_des *)level_table[proc_table[p].task_level];
 
if (proc_table[p].status == RM_ZOMBIE) {
/* put the task in the FREE state */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
return;
}
if (!(lev->taskflags[p] & RM_FLAG_SPORADIC)) {
/* if the task is periodic, rerelease it (now or later) */
RM_intern_release(p, lev);
} else {
/* else check if the task is waiting for end of period */
if (proc_table[p].status == RM_WAIT) {
proc_table[p].status = SLEEP;
} else {
/* the task is still busy. mark it as late */
lev->taskflags[p] |= RM_FLAG_SPOR_LATE;
}
}
}
 
 
/* This function is called when a guest task misses its deadline */
 
static void RM_timer_guest_deadline(void *par)
{
PID p = (PID) par;
 
kern_printf("AAARRRGGGHHH!!!");
TRACER_LOGEVENT(FTrace_EVT_task_deadline_miss,(unsigned short int)proc_table[p].context,0);
kern_raise(XDEADLINE_MISS,p);
}
 
159,7 → 235,6
static PID RM_public_scheduler(LEVEL l)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return iq_query_first(&lev->ready);
}
 
179,7 → 254,6
static int RM_public_create(LEVEL l, PID p, TASK_MODEL *m)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
HARD_TASK_MODEL *h;
 
if (m->pclass != HARD_PCLASS) return -1;
186,49 → 260,68
if (m->level != 0 && m->level != l) return -1;
h = (HARD_TASK_MODEL *)m;
if (!h->wcet || !h->mit) return -1;
if (h->drel > h->mit) return -1; /* only D <= T supported */
 
/* update the bandwidth... */
if (!h->drel) {
lev->rdeadline[p] = h->mit;
} else {
lev->rdeadline[p] = h->drel;
}
 
/* check the free bandwidth... */
if (lev->flags & RM_ENABLE_GUARANTEE) {
bandwidth_t b;
b = (MAX_BANDWIDTH / h->mit) * h->wcet;
b = (MAX_BANDWIDTH / lev->rdeadline[p]) * h->wcet;
 
/* really update lev->U, checking an overflow... */
if (MAX_BANDWIDTH - lev->U > b)
if (MAX_BANDWIDTH - lev->U > b) {
lev->U += b;
else
} else {
return -1;
}
}
 
/* now we know that m is a valid model */
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
lev->flags |= RM_ENABLE_WCET_CHECK;
}
if (lev->flags & RM_ENABLE_DL_EXCEPTION) {
lev->flags |= RM_ENABLE_DL_CHECK;
}
 
*iq_query_priority(p, &lev->ready) = lev->period[p] = h->mit;
lev->period[p] = h->mit;
if (lev->rdeadline[p] == lev->period[p]) {
/* Ensure that D <= T-eps to make dl_timer trigger before rel_timer */
lev->rdeadline[p] = lev->period[p] - 1;
}
lev->taskflags[p] = 0;
 
if (h->periodicity == APERIODIC)
lev->flag[p] = RM_FLAG_SPORADIC;
else
lev->flag[p] = 0;
lev->deadline_timer[p] = -1;
lev->taskflags[p] |= RM_FLAG_SPORADIC;
lev->dl_timer[p] = -1;
lev->eop_timer[p] = -1;
 
/* Enable wcet check */
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].avail_time = h->wcet;
proc_table[p].wcet = h->wcet;
proc_table[p].control |= CONTROL_CAP;
proc_table[p].control |= CONTROL_CAP; /* turn on measurement */
}
 
lev->offset[p] = h->offset;
 
NULL_TIMESPEC(&lev->release[p]);
 
return 0; /* OK, also if the task cannot be guaranteed... */
}
 
static void RM_public_detach(LEVEL l, PID p)
{
/* the RM level doesn't introduce any dinamic allocated new field.
we have only to reset the NO_GUARANTEE FIELD and decrement the allocated
bandwidth */
 
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
if (lev->flags & RM_ENABLE_GUARANTEE) {
lev->U -= (MAX_BANDWIDTH / lev->period[p]) * proc_table[p].wcet;
lev->U -= (MAX_BANDWIDTH / lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
235,12 → 328,6
static void RM_public_dispatch(LEVEL l, PID p, int nostop)
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(disp %d)",p);
 
/* the task state is set EXE by the scheduler()
we extract the task from the ready queue
NB: we can't assume that p is the first task in the queue!!! */
iq_extract(p, &lev->ready);
}
 
248,51 → 335,63
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
// kern_printf("(epil %d)",p);
 
/* check if the wcet is finished... */
if ((lev->flags & RM_ENABLE_WCET_CHECK) && proc_table[p].avail_time <= 0) {
/* if it is, raise a XWCET_VIOLATION exception */
kern_raise(XWCET_VIOLATION,p);
proc_table[p].status = RM_WCET_VIOLATED;
if (lev->flags & RM_ENABLE_WCET_CHECK) {
if (proc_table[p].avail_time <= 0) {
TRACER_LOGEVENT(FTrace_EVT_task_wcet_violation,(unsigned short int)proc_table[p].context,0);
if (lev->flags & RM_ENABLE_WCET_EXCEPTION) {
kern_raise(XWCET_VIOLATION,p);
} else {
proc_table[p].control &= ~CONTROL_CAP;
lev->wcet_miss[p]++;
}
}
}
else {
/* the task has been preempted. it returns into the ready queue... */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
}
 
/* the task returns to the ready queue */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
 
}
 
static void RM_public_activate(LEVEL l, PID p)
static void RM_public_activate(LEVEL l, PID p, struct timespec *t)
{
struct timespec clocktime;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
struct timespec *temp;
 
if (proc_table[p].status == RM_WAIT) {
kern_raise(XACTIVATION,p);
kern_gettime(&clocktime);
 
/* check if we are not in the SLEEP state */
if (proc_table[p].status != SLEEP) {
if (lev->flags & RM_ENABLE_ACT_EXCEPTION) {
/* too frequent or wrongful activation: raise exception */
kern_raise(XACTIVATION,p);
} else {
/* skip the sporadic job, but increase a counter */
#ifdef RM_DEBUG
rm_printf("At %s: activation of %s skipped\n", pnow(), proc_table[p].name);
#endif
lev->nskip[p]++;
}
return;
}
/* set the release time to the activation time + offset */
lev->release[p] = *t;
ADDUSEC2TIMESPEC(lev->offset[p], &lev->release[p]);
 
/* Test if we are trying to activate a non sleeping task */
/* Ignore this; the task is already active */
if (proc_table[p].status != SLEEP &&
proc_table[p].status != RM_WCET_VIOLATED)
return;
/* Check if release > clocktime. If so, release it later,
otherwise release it now. */
 
proc_table[p].status = RM_IDLE;
 
/* see also RM_timer_deadline */
temp = iq_query_timespec(p, &lev->ready);
kern_gettime(temp);
ADDUSEC2TIMESPEC(lev->period[p], temp);
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
 
/* Set the deadline timer */
lev->deadline_timer[p] = kern_event_post(temp,
RM_timer_deadline,
(void *)p);
if (TIMESPEC_A_GT_B(&lev->release[p], &clocktime)) {
/* release later */
kern_event_post(&lev->release[p],RM_timer_offset,(void *)p);
} else {
/* release now */
RM_intern_release(p, lev);
}
}
 
static void RM_public_unblock(LEVEL l, PID p)
299,9 → 398,6
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* Similar to RM_task_activate,
but we don't check in what state the task is */
 
/* Insert task in the correct position */
proc_table[p].status = RM_READY;
iq_priority_insert(p,&lev->ready);
324,31 → 420,105
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* the task has terminated his job before it consume the wcet. All OK! */
if (lev->flag[p] & RM_FLAG_SPORADIC)
proc_table[p].status = RM_WAIT;
else /* pclass = sporadic_pclass */
proc_table[p].status = RM_IDLE;
switch((long)(m)) {
/* task_endcycle() */
case 0:
/* if there are no pending jobs */
if (lev->nact[p] == 0) {
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
proc_table[p].status = RM_IDLE;
}
} else {
/* we are late / there are pending jobs */
lev->nact[p]--;
*iq_query_priority(p,&lev->ready) = lev->rdeadline[p];
iq_priority_insert(p,&lev->ready);
#ifdef RM_DEBUG
rm_printf("(Late) At %s: releasing %s\n",
pnow(), proc_table[p].name);
#endif
}
break;
/* task_sleep() */
case 1:
/* remove deadline timer, if any */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
if (lev->taskflags[p] & RM_FLAG_SPORADIC) {
/* sporadic task */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
proc_table[p].status = RM_WAIT;
} else {
/* it's late, move it directly to SLEEP */
proc_table[p].status = SLEEP;
lev->taskflags[p] &= ~RM_FLAG_SPOR_LATE;
}
} else {
/* periodic task */
if (!(lev->nact[p] > 0)) {
/* we are on time. go to the RM_WAIT state */
proc_table[p].status = RM_WAIT;
} else {
/* we are late. delete pending activations and go to SLEEP */
lev->nact[p] = 0;
proc_table[p].status = SLEEP;
/* remove end of period timer */
if (lev->eop_timer[p] != -1) {
kern_event_delete(lev->eop_timer[p]);
lev->eop_timer[p] = -1;
}
}
}
break;
}
 
/* we reset the capacity counters... */
if (lev->flags & RM_ENABLE_WCET_CHECK)
proc_table[p].avail_time = proc_table[p].wcet;
 
if (lev->flags & RM_ENABLE_WCET_CHECK) {
proc_table[p].control |= CONTROL_CAP;
}
proc_table[p].avail_time = proc_table[p].wcet;
jet_update_endcycle(); /* Update the Jet data... */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
/* when the deadline timer fire, it recognize the situation and set
correctly all the stuffs (like reactivation, sleep, etc... ) */
TRACER_LOGEVENT(FTrace_EVT_task_end_cycle,(unsigned short int)proc_table[p].context,(unsigned int)l);
 
return 0;
 
}
 
static void RM_public_end(LEVEL l, PID p)
{
proc_table[p].status = RM_ZOMBIE;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
/* When the deadline timer fire, it put the task descriptor in
the free queue, and free the allocated bandwidth... */
if (!(lev->taskflags[p] & RM_FLAG_SPOR_LATE)) {
/* remove the deadline timer (if any) */
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
proc_table[p].status = RM_ZOMBIE;
} else {
/* no endperiod timer will be fired, free the task now! */
proc_table[p].status = FREE;
iq_insertfirst(p,&freedesc);
/* free the allocated bandwidth */
lev->U -= (MAX_BANDWIDTH/lev->rdeadline[p]) * proc_table[p].wcet;
}
}
 
static void RM_private_insert(LEVEL l, PID p, TASK_MODEL *m)
363,22 → 533,21
 
job = (JOB_TASK_MODEL *)m;
 
*iq_query_timespec(p,&lev->ready) = job->deadline;
/* Insert task in the correct position */
*iq_query_timespec(p, &lev->ready) = job->deadline;
/* THIS IS QUESTIONABLE!! rel deadline? */
*iq_query_priority(p, &lev->ready) = lev->period[p] = job->period;
lev->deadline_timer[p] = -1;
 
/* Insert task in the correct position */
iq_priority_insert(p,&lev->ready);
proc_table[p].status = RM_READY;
lev->dl_timer[p] = -1;
 
if (job->noraiseexc)
lev->flag[p] = RM_FLAG_NORAISEEXC;
else {
lev->flag[p] = 0;
lev->deadline_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,
(void *)p);
lev->period[p] = job->period;
 
if (!job->noraiseexc) {
lev->dl_timer[p] = kern_event_post(iq_query_timespec(p, &lev->ready),
RM_timer_guest_deadline,(void *)p);
}
}
 
405,26 → 574,21
{
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
//kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
if (proc_table[p].status == RM_READY)
{
iq_extract(p, &lev->ready);
//kern_printf("(g_end rdy extr)");
}
 
/* we remove the deadline timer, because the slice is finished */
if (lev->deadline_timer[p] != NIL) {
// kern_printf("RM_guest_end: dline timer %d\n",lev->deadline_timer[p]);
kern_event_delete(lev->deadline_timer[p]);
lev->deadline_timer[p] = NIL;
if (lev->dl_timer[p] != -1) {
kern_event_delete(lev->dl_timer[p]);
lev->dl_timer[p] = -1;
}
 
}
 
/* Registration functions */
 
/*+ Registration function:
int flags the init flags ... see rm.h +*/
 
/* Registration function:
int flags the init flags ... see rm.h */
LEVEL RM_register_level(int flags)
{
LEVEL l; /* the level that we register */
462,9 → 626,13
 
/* fill the RM descriptor part */
for(i=0; i<MAX_PROC; i++) {
lev->period[i] = 0;
lev->deadline_timer[i] = -1;
lev->flag[i] = 0;
lev->period[i] = 0;
lev->dl_timer[i] = -1;
lev->taskflags[i] = 0;
lev->dl_miss[i] = 0;
lev->wcet_miss[i] = 0;
lev->nact[i] = 0;
lev->nskip[i] = 0;
}
 
iq_init(&lev->ready, &freedesc, 0);
481,3 → 649,35
return lev->U;
}
 
int RM_get_nact(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return lev->nact[p];
}
 
int RM_get_dl_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->dl_miss[p];
}
 
int RM_get_wcet_miss(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
 
return lev->wcet_miss[p];
}
 
int RM_get_nskip(PID p)
{
LEVEL l = proc_table[p].task_level;
RM_level_des *lev = (RM_level_des *)(level_table[l]);
return lev->nskip[p];
}
 
/shark/trunk/kernel/modules/rrsoft.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rrsoft.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rrsoft.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RRSOFT (Round Robin)
257,7 → 257,7
proc_table[p].status = RRSOFT_READY;
}
 
static void RRSOFT_public_activate(LEVEL l, PID p)
static void RRSOFT_public_activate(LEVEL l, PID p, struct timespec *t)
{
RRSOFT_level_des *lev = (RRSOFT_level_des *)(level_table[l]);
 
372,7 → 372,7
if (p == NIL)
printk("\nPanic!!! can't create main task...\n");
 
RRSOFT_public_activate(lev,p);
RRSOFT_public_activate(lev,p,NULL);
}
 
 
/shark/trunk/kernel/modules/ps.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ps.c,v 1.7 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: ps.c,v 1.8 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the aperiodic server PS (Polling Server)
318,7 → 318,7
}
}
 
static void PS_public_activate(LEVEL l, PID p)
static void PS_public_activate(LEVEL l, PID p, struct timespec *t)
{
PS_level_des *lev = (PS_level_des *)(level_table[l]);
 
/shark/trunk/kernel/modules/rr.c
20,11 → 20,11
 
/**
------------
CVS : $Id: rr.c,v 1.8 2004-03-10 14:51:44 giacomo Exp $
CVS : $Id: rr.c,v 1.9 2004-05-17 15:03:52 anton Exp $
 
File: $File$
Revision: $Revision: 1.8 $
Last update: $Date: 2004-03-10 14:51:44 $
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-17 15:03:52 $
------------
 
This file contains the scheduling module RR (Round Robin)
191,7 → 191,7
#endif
}
 
static void RR_public_activate(LEVEL l, PID p)
static void RR_public_activate(LEVEL l, PID p, struct timespec *t)
{
RR_level_des *lev = (RR_level_des *)(level_table[l]);
 
294,7 → 294,7
if (p == NIL)
printk(KERN_EMERG "Panic!!! can't create main task... errno =%d\n",errno);
 
RR_public_activate(lev,p);
RR_public_activate(lev,p,NULL);
 
#ifdef RRDEBUG
rr_printf("(main created %d)",p);
/shark/trunk/kernel/modules/intdrive.c
183,7 → 183,7
}
 
static void INTDRIVE_public_activate(LEVEL l, PID p)
static void INTDRIVE_public_activate(LEVEL l, PID p, struct timespec *t)
{
 
INTDRIVE_level_des *lev = (INTDRIVE_level_des *)(level_table[l]);
/shark/trunk/kernel/modules/ss.c
20,11 → 20,11
 
/**
------------
CVS : $Id: ss.c,v 1.7 2004-03-10 14:51:45 giacomo Exp $
CVS : $Id: ss.c,v 1.8 2004-05-17 15:03:53 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:45 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:53 $
------------
 
This file contains the aperiodic Sporadic Server (SS).
751,10 → 751,9
}
}
 
static void SS_public_activate(LEVEL l, PID p)
static void SS_public_activate(LEVEL l, PID p, struct timespec *t)
{
SS_level_des *lev = (SS_level_des *)(level_table[l]);
struct timespec ty;
 
#ifdef DEBUG
kern_printf("SS_tacti ");
770,13 → 769,12
if (lev->server_active == SS_SERVER_NOTACTIVE) {
lev->server_active = SS_SERVER_ACTIVE;
/* set replenish time */
kern_gettime(&ty);
ADDUSEC2TIMESPEC(lev->period, &ty);
TIMESPEC_ASSIGN(&lev->lastdline, &ty);
ADDUSEC2TIMESPEC(lev->period, t);
TIMESPEC_ASSIGN(&lev->lastdline, t);
#ifdef DEBUG
kern_printf("RT=%d.%d ",ty.tv_sec,ty.tv_nsec);
kern_printf("RT=%d.%d ",t->tv_sec,t->tv_nsec);
#endif
kern_event_post(&ty, SS_replenish_timer, (void *) l);
kern_event_post(t, SS_replenish_timer, (void *) l);
}
}
lev->activated = p;
/shark/trunk/kernel/modules/tbs.c
20,11 → 20,11
 
/**
------------
CVS : $Id: tbs.c,v 1.7 2004-03-10 14:51:45 giacomo Exp $
CVS : $Id: tbs.c,v 1.8 2004-05-17 15:03:53 anton Exp $
 
File: $File$
Revision: $Revision: 1.7 $
Last update: $Date: 2004-03-10 14:51:45 $
Revision: $Revision: 1.8 $
Last update: $Date: 2004-05-17 15:03:53 $
------------
 
This file contains the aperiodic server TBS (Total Bandwidth Server)
252,17 → 252,15
private_epilogue(lev->scheduling_level,p);
}
 
static void TBS_public_activate(LEVEL l, PID p)
static void TBS_public_activate(LEVEL l, PID p, struct timespec *t)
{
TBS_level_des *lev = (TBS_level_des *)(level_table[l]);
struct timespec t;
 
if (proc_table[p].status == SLEEP ||
proc_table[p].status == TBS_WCET_VIOLATED) {
 
kern_gettime(&t);
if (TIMESPEC_A_GT_B(&t, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, &t );
if (TIMESPEC_A_GT_B(t, &lev->lastdline))
TIMESPEC_ASSIGN(&lev->lastdline, t );
 
 
if (lev->activated == NIL) {